Changeset 31402 in vbox
- Timestamp:
- Aug 5, 2010 12:28:18 PM (14 years ago)
- Location:
- trunk
- Files:
-
- 21 edited
- 1 moved
-
include/VBox/pgm.h (modified) (3 diffs)
-
src/VBox/VMM/Makefile.kmk (modified) (2 diffs)
-
src/VBox/VMM/PATM/VMMGC/PATMGC.cpp (modified) (1 diff)
-
src/VBox/VMM/PGM.cpp (modified) (6 diffs)
-
src/VBox/VMM/PGMInline.h (modified) (23 diffs)
-
src/VBox/VMM/PGMInternal.h (modified) (16 diffs)
-
src/VBox/VMM/VMMAll/MMAllPagePool.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMAll/PGMAll.cpp (modified) (7 diffs)
-
src/VBox/VMM/VMMAll/PGMAllBth.h (modified) (19 diffs)
-
src/VBox/VMM/VMMAll/PGMAllMap.cpp (modified) (7 diffs)
-
src/VBox/VMM/VMMAll/PGMAllPhys.cpp (modified) (7 diffs)
-
src/VBox/VMM/VMMAll/PGMAllPool.cpp (modified) (27 diffs)
-
src/VBox/VMM/VMMAll/TRPMAll.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMGC/PGMGC.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMGC/TRPMGCHandlers.cpp (modified) (21 diffs)
-
src/VBox/VMM/VMMGC/TRPMGCHandlersA.asm (modified) (5 diffs)
-
src/VBox/VMM/VMMGC/VMMGC.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMR0/HWACCMR0.cpp (modified) (4 diffs)
-
src/VBox/VMM/VMMR0/HWVMXR0.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMRZ/PGMRZDynMap.cpp (moved) (moved from trunk/src/VBox/VMM/VMMR0/PGMR0DynMap.cpp ) (90 diffs)
-
src/VBox/VMM/testcase/tstVMStructRC.cpp (modified) (4 diffs)
-
src/VBox/VMM/testcase/tstVMStructSize.cpp (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/pgm.h
r31201 r31402 386 386 387 387 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE) 388 VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv); 389 VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv); 390 # ifdef IN_RC 391 VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv); 392 VMMDECL(void) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage); 393 VMMDECL(void) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage); 394 # ifdef VBOX_STRICT 395 VMMDECL(void) PGMDynCheckLocks(PVM pVM); 396 # endif 397 # endif 398 VMMDECL(void) PGMDynMapStartAutoSet(PVMCPU pVCpu); 399 VMMDECL(bool) PGMDynMapStartOrMigrateAutoSet(PVMCPU pVCpu); 400 VMMDECL(void) PGMDynMapReleaseAutoSet(PVMCPU pVCpu); 401 VMMDECL(void) PGMDynMapFlushAutoSet(PVMCPU pVCpu); 402 VMMDECL(void) PGMDynMapMigrateAutoSet(PVMCPU pVCpu); 403 VMMDECL(uint32_t) PGMDynMapPushAutoSubset(PVMCPU pVCpu); 404 VMMDECL(void) PGMDynMapPopAutoSubset(PVMCPU pVCpu, uint32_t iPrevSubset); 388 VMMDECL(void) PGMRZDynMapStartAutoSet(PVMCPU pVCpu); 389 VMMDECL(void) PGMRZDynMapReleaseAutoSet(PVMCPU pVCpu); 390 VMMDECL(void) PGMRZDynMapFlushAutoSet(PVMCPU pVCpu); 391 VMMDECL(uint32_t) PGMRZDynMapPushAutoSubset(PVMCPU pVCpu); 392 VMMDECL(void) PGMRZDynMapPopAutoSubset(PVMCPU pVCpu, uint32_t iPrevSubset); 405 393 #endif 406 407 394 408 395 VMMDECL(void) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages); … … 422 409 * @{ 423 410 */ 411 VMMRCDECL(int) PGMRCDynMapInit(PVM pVM); 424 412 /** @} */ 425 413 #endif /* IN_RC */ … … 441 429 VMMR0DECL(void) PGMR0DynMapTermVM(PVM pVM); 442 430 VMMR0DECL(int) PGMR0DynMapAssertIntegrity(void); 431 VMMR0DECL(bool) PGMR0DynMapStartOrMigrateAutoSet(PVMCPU pVCpu); 432 VMMR0DECL(void) PGMR0DynMapMigrateAutoSet(PVMCPU pVCpu); 443 433 # endif 444 434 /** @} */ -
trunk/src/VBox/VMM/Makefile.kmk
r30888 r31402 362 362 VMMGC/HWACCMGCA.asm \ 363 363 VMMRZ/DBGFRZ.cpp \ 364 VMMRZ/PGMRZDynMap.cpp \ 364 365 VMMRZ/VMMRZ.cpp \ 365 366 VMMAll/CPUMAllRegs.cpp \ … … 494 495 VMMR0/VMMR0JmpA-x86.asm 495 496 VMMR0_SOURCES.darwin.x86 = \ 496 VMMR 0/PGMR0DynMap.cpp497 VMMRZ/PGMRZDynMap.cpp 497 498 498 499 # disable annoying warnings about array subscript above array bounds in aPages[] -
trunk/src/VBox/VMM/PATM/VMMGC/PATMGC.cpp
r30326 r31402 292 292 /* We are no longer executing PATM code; set PIF again. */ 293 293 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1; 294 PGMRZDynMapReleaseAutoSet(VMMGetCpu0(pVM)); 294 295 CPUMGCCallV86Code(pRegFrame); 295 296 /* does not return */ -
trunk/src/VBox/VMM/PGM.cpp
r31206 r31402 481 481 * In order to be able to map in and out memory and to be able to support 482 482 * guest with more RAM than we've got virtual address space, we'll employing 483 * a mapping cache. There is already a tiny one for GC (see PGMGCDynMapGCPageEx) 484 * and we'll create a similar one for ring-0 unless we decide to setup a dedicate 485 * memory context for the HWACCM execution. 483 * a mapping cache. Normally ring-0 and ring-3 can share the same cache, 484 * however on 32-bit darwin the ring-0 code is running in a different memory 485 * context and therefore needs a separate cache. In raw-mode context we also 486 * need a separate cache. The 32-bit darwin mapping cache and the one for 487 * raw-mode context share a lot of code, see PGMRZDYNMAP. 486 488 * 487 489 * … … 1720 1722 1721 1723 /* GC only: */ 1722 PGM_REG_COUNTER(&pStats->StatRCDynMapCacheHits, "/PGM/RC/DynMapCache/Hits" , "Number of dynamic page mapping cache hits.");1723 PGM_REG_COUNTER(&pStats->StatRCDynMapCacheMisses, "/PGM/RC/DynMapCache/Misses" , "Number of dynamic page mapping cache misses.");1724 1724 PGM_REG_COUNTER(&pStats->StatRCInvlPgConflict, "/PGM/RC/InvlPgConflict", "Number of times PGMInvalidatePage() detected a mapping conflict."); 1725 1725 PGM_REG_COUNTER(&pStats->StatRCInvlPgSyncMonCR3, "/PGM/RC/InvlPgSyncMonitorCR3", "Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3."); … … 1778 1778 # endif 1779 1779 /* R0 only: */ 1780 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapMigrateInvlPg, "/PGM/CPU%u/R0/DynMapMigrateInvlPg", "invlpg count in PGMDynMapMigrateAutoSet.");1781 PGM_REG_PROFILE(&pCpuStats->StatR0DynMapGCPageInl, "/PGM/CPU%u/R0/DynMapPageGCPageInl", "Calls to pgmR0DynMapGCPageInlined.");1782 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapGCPageInlHits, "/PGM/CPU%u/R0/DynMapPageGCPageInl/Hits", "Hash table lookup hits.");1783 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapGCPageInlMisses, "/PGM/CPU%u/R0/DynMapPageGCPageInl/Misses", "Misses that falls back to code common with PGMDynMapHCPage.");1784 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapGCPageInlRamHits, "/PGM/CPU%u/R0/DynMapPageGCPageInl/RamHits", "1st ram range hits.");1785 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapGCPageInlRamMisses, "/PGM/CPU%u/R0/DynMapPageGCPageInl/RamMisses", "1st ram range misses, takes slow path.");1786 PGM_REG_PROFILE(&pCpuStats->StatR0DynMapHCPageInl, "/PGM/CPU%u/R0/DynMapPageHCPageInl", "Calls to pgmR0DynMapHCPageInlined.");1787 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapHCPageInlHits, "/PGM/CPU%u/R0/DynMapPageHCPageInl/Hits", "Hash table lookup hits.");1788 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapHCPageInlMisses, "/PGM/CPU%u/R0/DynMapPageHCPageInl/Misses", "Misses that falls back to code common with PGMDynMapHCPage.");1789 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPage, "/PGM/CPU%u/R0/DynMapPage", "Calls to pgmR0DynMapPage");1790 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapSetOptimize, "/PGM/CPU%u/R0/DynMapPage/SetOptimize", "Calls to pgmDynMapOptimizeAutoSet.");1791 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapSetSearchFlushes, "/PGM/CPU%u/R0/DynMapPage/SetSearchFlushes","Set search restorting to subset flushes.");1792 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapSetSearchHits, "/PGM/CPU%u/R0/DynMapPage/SetSearchHits", "Set search hits.");1793 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapSetSearchMisses, "/PGM/CPU%u/R0/DynMapPage/SetSearchMisses", "Set search misses.");1794 PGM_REG_PROFILE(&pCpuStats->StatR0DynMapHCPage, "/PGM/CPU%u/R0/DynMapPage/HCPage", "Calls to PGMDynMapHCPage (ring-0).");1795 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageHits0, "/PGM/CPU%u/R0/DynMapPage/Hits0", "Hits at iPage+0");1796 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageHits1, "/PGM/CPU%u/R0/DynMapPage/Hits1", "Hits at iPage+1");1797 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageHits2, "/PGM/CPU%u/R0/DynMapPage/Hits2", "Hits at iPage+2");1798 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageInvlPg, "/PGM/CPU%u/R0/DynMapPage/InvlPg", "invlpg count in pgmR0DynMapPageSlow.");1799 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageSlow, "/PGM/CPU%u/R0/DynMapPage/Slow", "Calls to pgmR0DynMapPageSlow - subtract this from pgmR0DynMapPage to get 1st level hits.");1800 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageSlowLoopHits, "/PGM/CPU%u/R0/DynMapPage/SlowLoopHits" , "Hits in the loop path.");1801 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageSlowLoopMisses, "/PGM/CPU%u/R0/DynMapPage/SlowLoopMisses", "Misses in the loop path. NonLoopMisses = Slow - SlowLoopHit - SlowLoopMisses");1802 //PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageSlowLostHits, "/PGM/CPU%u/R0/DynMapPage/SlowLostHits", "Lost hits.");1803 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapSubsets, "/PGM/CPU%u/R0/Subsets", "Times PGMDynMapPushAutoSubset was called.");1804 PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPopFlushes, "/PGM/CPU%u/R0/SubsetPopFlushes", "Times PGMDynMapPopAutoSubset flushes the subset.");1805 PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[0], "/PGM/CPU%u/R0/SetSize000..09", "00-09% filled");1806 PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[1], "/PGM/CPU%u/R0/SetSize010..19", "10-19% filled");1807 PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[2], "/PGM/CPU%u/R0/SetSize020..29", "20-29% filled");1808 PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[3], "/PGM/CPU%u/R0/SetSize030..39", "30-39% filled");1809 PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[4], "/PGM/CPU%u/R0/SetSize040..49", "40-49% filled");1810 PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[5], "/PGM/CPU%u/R0/SetSize050..59", "50-59% filled");1811 PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[6], "/PGM/CPU%u/R0/SetSize060..69", "60-69% filled");1812 PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[7], "/PGM/CPU%u/R0/SetSize070..79", "70-79% filled");1813 PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[8], "/PGM/CPU%u/R0/SetSize080..89", "80-89% filled");1814 PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[9], "/PGM/CPU%u/R0/SetSize090..99", "90-99% filled");1815 PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[10], "/PGM/CPU%u/R0/SetSize100", "100% filled");1816 1780 1817 1781 /* RZ only: */ … … 1869 1833 PGM_REG_COUNTER(&pCpuStats->StatRZGuestROMWriteUnhandled, "/PGM/CPU%u/RZ/ROMWriteUnhandled", "The number of times the Guest ROM change was passed back to the recompiler."); 1870 1834 1835 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapMigrateInvlPg, "/PGM/CPU%u/RZ/DynMap/MigrateInvlPg", "invlpg count in PGMR0DynMapMigrateAutoSet."); 1836 PGM_REG_PROFILE(&pCpuStats->StatRZDynMapGCPageInl, "/PGM/CPU%u/RZ/DynMap/PageGCPageInl", "Calls to pgmR0DynMapGCPageInlined."); 1837 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlHits, "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/Hits", "Hash table lookup hits."); 1838 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlMisses, "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/Misses", "Misses that falls back to the code common."); 1839 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlRamHits, "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/RamHits", "1st ram range hits."); 1840 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlRamMisses, "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/RamMisses", "1st ram range misses, takes slow path."); 1841 PGM_REG_PROFILE(&pCpuStats->StatRZDynMapHCPageInl, "/PGM/CPU%u/RZ/DynMap/PageHCPageInl", "Calls to pgmRZDynMapHCPageInlined."); 1842 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapHCPageInlHits, "/PGM/CPU%u/RZ/DynMap/PageHCPageInl/Hits", "Hash table lookup hits."); 1843 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapHCPageInlMisses, "/PGM/CPU%u/RZ/DynMap/PageHCPageInl/Misses", "Misses that falls back to the code common."); 1844 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPage, "/PGM/CPU%u/RZ/DynMap/Page", "Calls to pgmR0DynMapPage"); 1845 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetOptimize, "/PGM/CPU%u/RZ/DynMap/Page/SetOptimize", "Calls to pgmRZDynMapOptimizeAutoSet."); 1846 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetSearchFlushes, "/PGM/CPU%u/RZ/DynMap/Page/SetSearchFlushes", "Set search restorting to subset flushes."); 1847 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetSearchHits, "/PGM/CPU%u/RZ/DynMap/Page/SetSearchHits", "Set search hits."); 1848 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetSearchMisses, "/PGM/CPU%u/RZ/DynMap/Page/SetSearchMisses", "Set search misses."); 1849 PGM_REG_PROFILE(&pCpuStats->StatRZDynMapHCPage, "/PGM/CPU%u/RZ/DynMap/Page/HCPage", "Calls to pgmRZDynMapHCPageCommon (ring-0)."); 1850 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageHits0, "/PGM/CPU%u/RZ/DynMap/Page/Hits0", "Hits at iPage+0"); 1851 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageHits1, "/PGM/CPU%u/RZ/DynMap/Page/Hits1", "Hits at iPage+1"); 1852 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageHits2, "/PGM/CPU%u/RZ/DynMap/Page/Hits2", "Hits at iPage+2"); 1853 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageInvlPg, "/PGM/CPU%u/RZ/DynMap/Page/InvlPg", "invlpg count in pgmR0DynMapPageSlow."); 1854 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlow, "/PGM/CPU%u/RZ/DynMap/Page/Slow", "Calls to pgmR0DynMapPageSlow - subtract this from pgmR0DynMapPage to get 1st level hits."); 1855 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlowLoopHits, "/PGM/CPU%u/RZ/DynMap/Page/SlowLoopHits" , "Hits in the loop path."); 1856 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlowLoopMisses, "/PGM/CPU%u/RZ/DynMap/Page/SlowLoopMisses", "Misses in the loop path. NonLoopMisses = Slow - SlowLoopHit - SlowLoopMisses"); 1857 //PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlowLostHits, "/PGM/CPU%u/R0/DynMap/Page/SlowLostHits", "Lost hits."); 1858 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSubsets, "/PGM/CPU%u/RZ/DynMap/Subsets", "Times PGMRZDynMapPushAutoSubset was called."); 1859 PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPopFlushes, "/PGM/CPU%u/RZ/DynMap/SubsetPopFlushes", "Times PGMRZDynMapPopAutoSubset flushes the subset."); 1860 PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[0], "/PGM/CPU%u/RZ/DynMap/SetFilledPct000..09", "00-09% filled (RC: min(set-size, dynmap-size))"); 1861 PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[1], "/PGM/CPU%u/RZ/DynMap/SetFilledPct010..19", "10-19% filled (RC: min(set-size, dynmap-size))"); 1862 PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[2], "/PGM/CPU%u/RZ/DynMap/SetFilledPct020..29", "20-29% filled (RC: min(set-size, dynmap-size))"); 1863 PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[3], "/PGM/CPU%u/RZ/DynMap/SetFilledPct030..39", "30-39% filled (RC: min(set-size, dynmap-size))"); 1864 PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[4], "/PGM/CPU%u/RZ/DynMap/SetFilledPct040..49", "40-49% filled (RC: min(set-size, dynmap-size))"); 1865 PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[5], "/PGM/CPU%u/RZ/DynMap/SetFilledPct050..59", "50-59% filled (RC: min(set-size, dynmap-size))"); 1866 PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[6], "/PGM/CPU%u/RZ/DynMap/SetFilledPct060..69", "60-69% filled (RC: min(set-size, dynmap-size))"); 1867 PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[7], "/PGM/CPU%u/RZ/DynMap/SetFilledPct070..79", "70-79% filled (RC: min(set-size, dynmap-size))"); 1868 PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[8], "/PGM/CPU%u/RZ/DynMap/SetFilledPct080..89", "80-89% filled (RC: min(set-size, dynmap-size))"); 1869 PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[9], "/PGM/CPU%u/RZ/DynMap/SetFilledPct090..99", "90-99% filled (RC: min(set-size, dynmap-size))"); 1870 PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[10], "/PGM/CPU%u/RZ/DynMap/SetFilledPct100", "100% filled (RC: min(set-size, dynmap-size))"); 1871 1871 1872 /* HC only: */ 1872 1873 … … 2037 2038 pVM->pgm.s.paDynPageMapPaePTEsGC = pMapping->aPTs[iPT].paPaePTsRC + iPG * sizeof(pMapping->aPTs[0].paPaePTsR3->a[0]); 2038 2039 2039 /* init cache */2040 /* init cache area */ 2040 2041 RTHCPHYS HCPhysDummy = MMR3PageDummyHCPhys(pVM); 2041 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache); i++) 2042 pVM->pgm.s.aHCPhysDynPageMapCache[i] = HCPhysDummy; 2043 2044 for (unsigned i = 0; i < MM_HYPER_DYNAMIC_SIZE; i += PAGE_SIZE) 2045 { 2046 rc = PGMMap(pVM, pVM->pgm.s.pbDynPageMapBaseGC + i, HCPhysDummy, PAGE_SIZE, 0); 2042 for (uint32_t offDynMap = 0; offDynMap < MM_HYPER_DYNAMIC_SIZE; offDynMap += PAGE_SIZE) 2043 { 2044 rc = PGMMap(pVM, pVM->pgm.s.pbDynPageMapBaseGC + offDynMap, HCPhysDummy, PAGE_SIZE, 0); 2047 2045 AssertRCReturn(rc, rc); 2048 2046 } … … 2205 2203 */ 2206 2204 pVM->pgm.s.paDynPageMap32BitPTEsGC += offDelta; 2207 pVM->pgm.s.paDynPageMapPaePTEsGC += offDelta; 2208 pVM->pgm.s.pbDynPageMapBaseGC += offDelta; 2205 pVM->pgm.s.paDynPageMapPaePTEsGC += offDelta; 2206 pVM->pgm.s.pbDynPageMapBaseGC += offDelta; 2207 2208 if (pVM->pgm.s.pRCDynMap) 2209 { 2210 pVM->pgm.s.pRCDynMap += offDelta; 2211 PPGMRCDYNMAP pDynMap = (PPGMRCDYNMAP)MMHyperRCToCC(pVM, pVM->pgm.s.pRCDynMap); 2212 2213 pDynMap->paPages += offDelta; 2214 PPGMRCDYNMAPENTRY paPages = (PPGMRCDYNMAPENTRY)MMHyperRCToCC(pVM, pDynMap->paPages); 2215 2216 for (uint32_t iPage = 0; iPage < pDynMap->cPages; iPage++) 2217 { 2218 paPages[iPage].pvPage += offDelta; 2219 paPages[iPage].uPte.pv += offDelta; 2220 } 2221 } 2209 2222 2210 2223 /* -
trunk/src/VBox/VMM/PGMInline.h
r31178 r31402 286 286 } 287 287 288 #if def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0289 290 /** 291 * Inlined version of the ring-0 version of PGMDynMapHCPage that292 * optimizes access to pages already in the set.288 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 289 290 /** 291 * Inlined version of the ring-0 version of the host page mapping code 292 * that optimizes access to pages already in the set. 293 293 * 294 294 * @returns VINF_SUCCESS. Will bail out to ring-3 on failure. … … 297 297 * @param ppv Where to store the mapping address. 298 298 */ 299 DECLINLINE(int) pgmR 0DynMapHCPageInlined(PVMCPU pVCpu, RTHCPHYS HCPhys, void **ppv)299 DECLINLINE(int) pgmRZDynMapHCPageInlined(PVMCPU pVCpu, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL) 300 300 { 301 301 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet; 302 302 303 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapHCPageInl, a);303 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a); 304 304 Assert(!(HCPhys & PAGE_OFFSET_MASK)); 305 305 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries)); … … 308 308 unsigned iEntry = pSet->aiHashTable[iHash]; 309 309 if ( iEntry < pSet->cEntries 310 && pSet->aEntries[iEntry].HCPhys == HCPhys) 311 { 310 && pSet->aEntries[iEntry].HCPhys == HCPhys 311 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1) 312 { 313 pSet->aEntries[iEntry].cInlinedRefs++; 312 314 *ppv = pSet->aEntries[iEntry].pvPage; 313 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapHCPageInlHits);315 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlHits); 314 316 } 315 317 else 316 318 { 317 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapHCPageInlMisses);318 pgmR 0DynMapHCPageCommon(pSet, HCPhys, ppv);319 } 320 321 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapHCPageInl, a);319 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlMisses); 320 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS); 321 } 322 323 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a); 322 324 return VINF_SUCCESS; 323 325 } … … 325 327 326 328 /** 327 * Inlined version of the ring-0 version of PGMDynMapGCPage that optimizes328 * a ccess to pages already in the set.329 * 330 * @returns See PGMDynMapGCPage.329 * Inlined version of the guest page mapping code that optimizes access to pages 330 * already in the set. 331 * 332 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details. 331 333 * @param pVM The VM handle. 332 334 * @param pVCpu The current CPU. … … 334 336 * @param ppv Where to store the mapping address. 335 337 */ 336 DECLINLINE(int) pgmR 0DynMapGCPageV2Inlined(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv)337 { 338 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapGCPageInl, a);338 DECLINLINE(int) pgmRZDynMapGCPageV2Inlined(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL) 339 { 340 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a); 339 341 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys)); 340 342 … … 347 349 /** @todo || page state stuff */)) 348 350 { 349 /* This case is not counted into StatR 0DynMapGCPageInl. */350 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapGCPageInlRamMisses);351 return PGMDynMapGCPage(pVM, GCPhys, ppv);351 /* This case is not counted into StatRZDynMapGCPageInl. */ 352 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses); 353 return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS); 352 354 } 353 355 354 356 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]); 355 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapGCPageInlRamHits);357 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits); 356 358 357 359 /* 358 * pgmR 0DynMapHCPageInlined with out stats.360 * pgmRZDynMapHCPageInlined with out stats. 359 361 */ 360 362 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet; … … 365 367 unsigned iEntry = pSet->aiHashTable[iHash]; 366 368 if ( iEntry < pSet->cEntries 367 && pSet->aEntries[iEntry].HCPhys == HCPhys) 368 { 369 && pSet->aEntries[iEntry].HCPhys == HCPhys 370 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1) 371 { 372 pSet->aEntries[iEntry].cInlinedRefs++; 369 373 *ppv = pSet->aEntries[iEntry].pvPage; 370 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapGCPageInlHits);374 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits); 371 375 } 372 376 else 373 377 { 374 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapGCPageInlMisses);375 pgmR 0DynMapHCPageCommon(pSet, HCPhys, ppv);376 } 377 378 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapGCPageInl, a);378 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses); 379 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS); 380 } 381 382 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a); 379 383 return VINF_SUCCESS; 380 384 } … … 382 386 383 387 /** 384 * Inlined version of the ring-0 version of PGMDynMapGCPagethat optimizes388 * Inlined version of the ring-0 version of guest page mapping that optimizes 385 389 * access to pages already in the set. 386 390 * 387 * @returns See PGMDynMapGCPage.391 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details. 388 392 * @param pVCpu The current CPU. 389 393 * @param GCPhys The guest physical address of the page. 390 394 * @param ppv Where to store the mapping address. 391 395 */ 392 DECLINLINE(int) pgmR 0DynMapGCPageInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv)393 { 394 return pgmR 0DynMapGCPageV2Inlined(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhys, ppv);395 } 396 397 398 /** 399 * Inlined version of the ring-0 version of PGMDynMapGCPageOff that optimizes400 * access to pages already in the set.401 * 402 * @returns See PGMDynMapGCPage.396 DECLINLINE(int) pgmRZDynMapGCPageInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL) 397 { 398 return pgmRZDynMapGCPageV2Inlined(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS); 399 } 400 401 402 /** 403 * Inlined version of the ring-0 version of the guest byte mapping code 404 * that optimizes access to pages already in the set. 405 * 406 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details. 403 407 * @param pVCpu The current CPU. 404 408 * @param HCPhys The physical address of the page. 405 * @param ppv Where to store the mapping address. 406 */ 407 DECLINLINE(int) pgmR0DynMapGCPageOffInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv) 408 { 409 STAM_PROFILE_START(&pVCpu->pgm.s.StatR0DynMapGCPageInl, a); 409 * @param ppv Where to store the mapping address. The offset is 410 * preserved. 411 */ 412 DECLINLINE(int) pgmRZDynMapGCPageOffInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL) 413 { 414 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZDynMapGCPageInl, a); 410 415 411 416 /* … … 418 423 /** @todo || page state stuff */)) 419 424 { 420 /* This case is not counted into StatR 0DynMapGCPageInl. */421 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapGCPageInlRamMisses);422 return PGMDynMapGCPageOff(pVM, GCPhys, ppv);425 /* This case is not counted into StatRZDynMapGCPageInl. */ 426 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses); 427 return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS); 423 428 } 424 429 425 430 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]); 426 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapGCPageInlRamHits);431 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits); 427 432 428 433 /* 429 * pgmR 0DynMapHCPageInlined with out stats.434 * pgmRZDynMapHCPageInlined with out stats. 430 435 */ 431 436 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet; … … 436 441 unsigned iEntry = pSet->aiHashTable[iHash]; 437 442 if ( iEntry < pSet->cEntries 438 && pSet->aEntries[iEntry].HCPhys == HCPhys) 439 { 443 && pSet->aEntries[iEntry].HCPhys == HCPhys 444 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1) 445 { 446 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits); 447 pSet->aEntries[iEntry].cInlinedRefs++; 440 448 *ppv = (void *)((uintptr_t)pSet->aEntries[iEntry].pvPage | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys)); 441 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlHits);442 449 } 443 450 else 444 451 { 445 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapGCPageInlMisses);446 pgmR 0DynMapHCPageCommon(pSet, HCPhys, ppv);452 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses); 453 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS); 447 454 *ppv = (void *)((uintptr_t)*ppv | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys)); 448 455 } 449 456 450 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapGCPageInl, a);457 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a); 451 458 return VINF_SUCCESS; 452 459 } … … 462 469 * @param pPage The page. 463 470 */ 464 DECLINLINE(void *) pgmPoolMapPageInlined(PVM pVM, PPGMPOOLPAGE pPage )471 DECLINLINE(void *) pgmPoolMapPageInlined(PVM pVM, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL) 465 472 { 466 473 if (pPage->idx >= PGMPOOL_IDX_FIRST) … … 468 475 Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages); 469 476 void *pv; 470 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 471 pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), pPage->Core.Key, &pv); 472 # else 473 PGMDynMapHCPage(pVM, pPage->Core.Key, &pv); 474 # endif 477 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS); 475 478 return pv; 476 479 } … … 486 489 * @param pPage The page. 487 490 */ 488 DECLINLINE(void *) pgmPoolMapPageV2Inlined(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pPage )491 DECLINLINE(void *) pgmPoolMapPageV2Inlined(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL) 489 492 { 490 493 if (pPage->idx >= PGMPOOL_IDX_FIRST) … … 492 495 Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages); 493 496 void *pv; 494 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0495 497 Assert(pVCpu == VMMGetCpu(pVM)); 496 pgmR0DynMapHCPageInlined(pVCpu, pPage->Core.Key, &pv); 497 # else 498 PGMDynMapHCPage(pVM, pPage->Core.Key, &pv); 499 # endif 498 pgmRZDynMapHCPageInlined(pVCpu, pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS); 500 499 return pv; 501 500 } … … 514 513 * @param HCPhys HC Physical address of the page. 515 514 */ 516 DECLINLINE(void *) pgm DynMapHCPageOff(PVM pVM, RTHCPHYS HCPhys)515 DECLINLINE(void *) pgmRZDynMapHCPageOff(PVM pVM, RTHCPHYS HCPhys RTLOG_COMMA_SRC_POS_DECL) 517 516 { 518 517 void *pv; 519 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 520 pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv); 521 # else 522 PGMDynMapHCPage(pVM, HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv); 523 # endif 518 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv RTLOG_COMMA_SRC_POS_ARGS); 524 519 pv = (void *)((uintptr_t)pv | ((uintptr_t)HCPhys & PAGE_OFFSET_MASK)); 525 520 return pv; … … 651 646 { 652 647 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 653 int rc = pgmR 0DynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPd);648 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPd RTLOG_COMMA_SRC_POS); 654 649 if (RT_FAILURE(rc)) 655 650 { … … 676 671 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 677 672 PX86PD pGuestPD = NULL; 678 int rc = pgmR 0DynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPD);673 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPD RTLOG_COMMA_SRC_POS); 679 674 if (RT_FAILURE(rc)) 680 675 { … … 705 700 { 706 701 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 707 int rc = pgmR 0DynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPdpt);702 int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPdpt RTLOG_COMMA_SRC_POS); 708 703 if (RT_FAILURE(rc)) 709 704 { … … 749 744 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 750 745 PX86PDPT pGuestPDPT = NULL; 751 int rc = pgmR 0DynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPDPT);746 int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPDPT RTLOG_COMMA_SRC_POS); 752 747 AssertRCReturn(rc, NULL); 753 748 #else … … 785 780 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 786 781 PX86PDPAE pGuestPD = NULL; 787 int rc = pgmR 0DynMapGCPageInlined(pVCpu,782 int rc = pgmRZDynMapGCPageInlined(pVCpu, 788 783 pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, 789 (void **)&pGuestPD); 784 (void **)&pGuestPD 785 RTLOG_COMMA_SRC_POS); 790 786 if (RT_SUCCESS(rc)) 791 787 return pGuestPD->a[iPD]; … … 837 833 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 838 834 PX86PDPAE pGuestPD = NULL; 839 int rc = pgmR 0DynMapGCPageInlined(pVCpu,835 int rc = pgmRZDynMapGCPageInlined(pVCpu, 840 836 pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, 841 (void **)&pGuestPD); 837 (void **)&pGuestPD 838 RTLOG_COMMA_SRC_POS); 842 839 if (RT_FAILURE(rc)) 843 840 { … … 868 865 { 869 866 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 870 int rc = pgmR 0DynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPml4);867 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPml4 RTLOG_COMMA_SRC_POS); 871 868 if (RT_FAILURE(rc)) 872 869 { … … 910 907 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 911 908 PX86PML4 pGuestPml4; 912 int rc = pgmR 0DynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPml4);909 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPml4 RTLOG_COMMA_SRC_POS); 913 910 AssertRCReturn(rc, NULL); 914 911 #else -
trunk/src/VBox/VMM/PGMInternal.h
r31206 r31402 234 234 * this. 235 235 * 236 * @remark In RC this uses PGMDynMapHCPage(), so it will consume of the small237 * page window employeed by that function. Be careful.236 * @remark Use with care as we don't have so much dynamic mapping space in 237 * ring-0 on 32-bit darwin and in RC. 238 238 * @remark There is no need to assert on the result. 239 239 */ 240 #if def IN_RC240 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 241 241 # define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) \ 242 PGMDynMapHCPage(pVM, HCPhys, (void **)(ppv)) 243 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 244 # define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) \ 245 pgmR0DynMapHCPageInlined(pVCpu, HCPhys, (void **)(ppv)) 242 pgmRZDynMapHCPageInlined(pVCpu, HCPhys, (void **)(ppv) RTLOG_COMMA_SRC_POS) 246 243 #else 247 244 # define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) \ … … 258 255 * @param ppv Where to store the virtual address. No need to cast this. 259 256 * 260 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the261 * small page window employeed by that function. Be careful.257 * @remark Use with care as we don't have so much dynamic mapping space in 258 * ring-0 on 32-bit darwin and in RC. 262 259 * @remark There is no need to assert on the result. 263 260 */ 264 #if def IN_RC261 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 265 262 # define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \ 266 PGMDynMapGCPage(pVM, GCPhys, (void **)(ppv)) 267 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 268 # define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \ 269 pgmR0DynMapGCPageV2Inlined(pVM, pVCpu, GCPhys, (void **)(ppv)) 263 pgmRZDynMapGCPageV2Inlined(pVM, pVCpu, GCPhys, (void **)(ppv) RTLOG_COMMA_SRC_POS) 270 264 #else 271 265 # define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \ … … 281 275 * @param ppv Where to store the virtual address. No need to cast this. 282 276 * 283 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the284 * small page window employeed by that function. Be careful.277 * @remark Use with care as we don't have so much dynamic mapping space in 278 * ring-0 on 32-bit darwin and in RC. 285 279 * @remark There is no need to assert on the result. 286 280 */ … … 295 289 * @param ppv Where to store the virtual address. No need to cast this. 296 290 * 297 * @remark In RC this uses PGMGCDynMapGCPage(), so it will consume of the298 * small page window employeed by that function. Be careful.291 * @remark Use with care as we don't have so much dynamic mapping space in 292 * ring-0 on 32-bit darwin and in RC. 299 293 * @remark There is no need to assert on the result. 300 294 */ … … 309 303 * @param ppv Where to store the virtual address. No need to cast this. 310 304 * 311 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the312 * small page window employeed by that function. Be careful.305 * @remark Use with care as we don't have so much dynamic mapping space in 306 * ring-0 on 32-bit darwin and in RC. 313 307 * @remark There is no need to assert on the result. 314 308 */ 315 309 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 316 310 # define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \ 317 PGMDynMapGCPageOff(pVM, GCPhys, (void **)(ppv))311 pgmRZDynMapGCPageOffInlined(VMMGetCpu(pVM), GCPhys, (void **)(ppv) RTLOG_COMMA_SRC_POS) 318 312 #else 319 313 # define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \ 320 314 PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */ 321 315 #endif 316 317 /** @def PGM_DYNMAP_UNUSED_HINT 318 * Hints to the dynamic mapping code in RC and R0/darwin that the specified page 319 * is no longer used. 320 * 321 * @param pVCpu The current CPU. 322 * @param pPage The pool page. 323 */ 324 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 325 # ifdef LOG_ENABLED 326 # define PGM_DYNMAP_UNUSED_HINT(pVCpu, pvPage) pgmRZDynMapUnusedHint(pVCpu, pvPage, RT_SRC_POS) 327 # else 328 # define PGM_DYNMAP_UNUSED_HINT(pVCpu, pvPage) pgmRZDynMapUnusedHint(pVCpu, pvPage) 329 # endif 330 #else 331 # define PGM_DYNMAP_UNUSED_HINT(pVCpu, pvPage) do {} while (0) 332 #endif 333 334 /** @def PGM_DYNMAP_UNUSED_HINT_VM 335 * Hints to the dynamic mapping code in RC and R0/darwin that the specified page 336 * is no longer used. 337 * 338 * @param pVM The VM handle. 339 * @param pPage The pool page. 340 */ 341 #define PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvPage) PGM_DYNMAP_UNUSED_HINT(VMMGetCpu(pVM), pvPage) 342 322 343 323 344 /** @def PGM_INVL_PG … … 1549 1570 1550 1571 /** 1572 * Raw-mode context dynamic mapping cache entry. 1573 * 1574 * Because of raw-mode context being reloctable and all relocations are applied 1575 * in ring-3, this has to be defined here and be RC specfic. 1576 * 1577 * @sa PGMRZDYNMAPENTRY, PGMR0DYNMAPENTRY. 1578 */ 1579 typedef struct PGMRCDYNMAPENTRY 1580 { 1581 /** The physical address of the currently mapped page. 1582 * This is duplicate for three reasons: cache locality, cache policy of the PT 1583 * mappings and sanity checks. */ 1584 RTHCPHYS HCPhys; 1585 /** Pointer to the page. */ 1586 RTRCPTR pvPage; 1587 /** The number of references. */ 1588 int32_t volatile cRefs; 1589 /** PTE pointer union. */ 1590 union PGMRCDYNMAPENTRY_PPTE 1591 { 1592 /** PTE pointer, 32-bit legacy version. */ 1593 RCPTRTYPE(PX86PTE) pLegacy; 1594 /** PTE pointer, PAE version. */ 1595 RCPTRTYPE(PX86PTEPAE) pPae; 1596 /** PTE pointer, the void version. */ 1597 RTRCPTR pv; 1598 } uPte; 1599 /** Alignment padding. */ 1600 RTRCPTR RCPtrAlignment; 1601 } PGMRCDYNMAPENTRY; 1602 /** Pointer to a dynamic mapping cache entry for the raw-mode context. */ 1603 typedef PGMRCDYNMAPENTRY *PPGMRCDYNMAPENTRY; 1604 1605 1606 /** 1607 * Dynamic mapping cache for the raw-mode context. 1608 * 1609 * This is initialized during VMMRC init based upon the pbDynPageMapBaseGC and 1610 * paDynPageMap* PGM members. However, it has to be defined in PGMInternal.h 1611 * so that we can perform relocations from PGMR3Relocate. This has the 1612 * consequence that we must have separate ring-0 and raw-mode context versions 1613 * of this struct even if they share the basic elements. 1614 * 1615 * @sa PPGMRZDYNMAP, PGMR0DYNMAP. 1616 */ 1617 typedef struct PGMRCDYNMAP 1618 { 1619 /** The usual magic number / eye catcher (PGMRZDYNMAP_MAGIC). */ 1620 uint32_t u32Magic; 1621 /** Array for tracking and managing the pages. */ 1622 RCPTRTYPE(PPGMRCDYNMAPENTRY) paPages; 1623 /** The cache size given as a number of pages. */ 1624 uint32_t cPages; 1625 /** Whether it's 32-bit legacy or PAE/AMD64 paging mode. */ 1626 bool fLegacyMode; 1627 /** The current load. 1628 * This does not include guard pages. */ 1629 uint32_t cLoad; 1630 /** The max load ever. 1631 * This is maintained to get trigger adding of more mapping space. */ 1632 uint32_t cMaxLoad; 1633 /** The number of guard pages. */ 1634 uint32_t cGuardPages; 1635 /** The number of users (protected by hInitLock). */ 1636 uint32_t cUsers; 1637 } PGMRCDYNMAP; 1638 /** Pointer to the dynamic cache for the raw-mode context. */ 1639 typedef PGMRCDYNMAP *PPGMRCDYNMAP; 1640 1641 1642 /** 1551 1643 * Mapping cache usage set entry. 1552 1644 * 1553 1645 * @remarks 16-bit ints was choosen as the set is not expected to be used beyond 1554 1646 * the dynamic ring-0 and (to some extent) raw-mode context mapping 1555 * cache. If it's extended to include ring-3, well, then something will1556 * have be changed here...1647 * cache. If it's extended to include ring-3, well, then something 1648 * will have be changed here... 1557 1649 */ 1558 1650 typedef struct PGMMAPSETENTRY 1559 1651 { 1652 /** Pointer to the page. */ 1653 #ifndef IN_RC 1654 RTR0PTR pvPage; 1655 #else 1656 RTRCPTR pvPage; 1657 # if HC_ARCH_BITS == 64 1658 uint32_t u32Alignment2; 1659 # endif 1660 #endif 1560 1661 /** The mapping cache index. */ 1561 1662 uint16_t iPage; … … 1563 1664 * The max is UINT16_MAX - 1. */ 1564 1665 uint16_t cRefs; 1565 #if HC_ARCH_BITS == 64 1566 uint32_t alignment; 1567 #endif 1568 /** Pointer to the page. */ 1569 RTR0PTR pvPage; 1666 /** The number inlined references. 1667 * The max is UINT16_MAX - 1. */ 1668 uint16_t cInlinedRefs; 1669 /** Unreferences. */ 1670 uint16_t cUnrefs; 1671 1672 #if HC_ARCH_BITS == 32 1673 uint32_t u32Alignment1; 1674 #endif 1570 1675 /** The physical address for this entry. */ 1571 1676 RTHCPHYS HCPhys; 1572 1677 } PGMMAPSETENTRY; 1678 AssertCompileMemberOffset(PGMMAPSETENTRY, iPage, RT_MAX(sizeof(RTR0PTR), sizeof(RTRCPTR))); 1679 AssertCompileMemberAlignment(PGMMAPSETENTRY, HCPhys, sizeof(RTHCPHYS)); 1573 1680 /** Pointer to a mapping cache usage set entry. */ 1574 1681 typedef PGMMAPSETENTRY *PPGMMAPSETENTRY; … … 2150 2257 * @remark There is no need to assert on the result. 2151 2258 */ 2152 #if defined(IN_RC) 2153 # define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageInlined((pVM), (pPage)) 2154 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 2155 # define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageInlined((pVM), (pPage)) 2259 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 2260 # define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageInlined((pVM), (pPage) RTLOG_COMMA_SRC_POS) 2156 2261 #elif defined(VBOX_STRICT) 2157 2262 # define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageStrict(pPage) … … 2178 2283 * @remark There is no need to assert on the result. 2179 2284 */ 2180 #if defined(IN_RC) 2181 # define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage) pgmPoolMapPageV2Inlined((pVM), (pVCpu), (pPage)) 2182 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 2183 # define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage) pgmPoolMapPageV2Inlined((pVM), (pVCpu), (pPage)) 2285 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 2286 # define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage) pgmPoolMapPageV2Inlined((pVM), (pVCpu), (pPage) RTLOG_COMMA_SRC_POS) 2184 2287 #else 2185 2288 # define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage) PGMPOOL_PAGE_2_PTR((pVM), (pPage)) … … 2623 2726 2624 2727 /* RC only: */ 2625 STAMCOUNTER StatRCDynMapCacheMisses; /**< RC: The number of dynamic page mapping cache misses */2626 STAMCOUNTER StatRCDynMapCacheHits; /**< RC: The number of dynamic page mapping cache hits */2627 2728 STAMCOUNTER StatRCInvlPgConflict; /**< RC: Number of times PGMInvalidatePage() detected a mapping conflict. */ 2628 2729 STAMCOUNTER StatRCInvlPgSyncMonCR3; /**< RC: Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3. */ … … 2846 2947 /** Base address of the dynamic page mapping area. 2847 2948 * The array is MM_HYPER_DYNAMIC_SIZE bytes big. 2949 * 2950 * @todo The plan of keeping PGMRCDYNMAP private to PGMRZDynMap.cpp didn't 2951 * work out. Some cleaning up of the initialization that would 2952 * remove this memory is yet to be done... 2848 2953 */ 2849 2954 RCPTRTYPE(uint8_t *) pbDynPageMapBaseGC; 2850 /** The index of the last entry used in the dynamic page mapping area. */ 2851 RTUINT iDynPageMapLast; 2852 /** Cache containing the last entries in the dynamic page mapping area. 2853 * The cache size is covering half of the mapping area. */ 2854 RTHCPHYS aHCPhysDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT + 1)]; 2855 /** Keep a lock counter for the full (!) mapping area. */ 2856 uint32_t aLockedDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)]; 2857 2955 /** The address of the raw-mode context mapping cache. */ 2956 RCPTRTYPE(PPGMRCDYNMAP) pRCDynMap; 2858 2957 /** The address of the ring-0 mapping cache if we're making use of it. */ 2859 2958 RTR0PTR pvR0DynMapUsed; … … 3052 3151 AssertCompileMemberAlignment(PGM, GCPtrMappingFixed, sizeof(RTGCPTR)); 3053 3152 AssertCompileMemberAlignment(PGM, HCPhysInterPD, 8); 3054 AssertCompileMemberAlignment(PGM, aHCPhysDynPageMapCache, 8);3055 3153 AssertCompileMemberAlignment(PGM, CritSect, 8); 3056 3154 AssertCompileMemberAlignment(PGM, ChunkR3Map, 8); … … 3072 3170 3073 3171 /* R0 only: */ 3074 STAMCOUNTER StatR0DynMapMigrateInvlPg; /**< R0: invlpg in PGMDynMapMigrateAutoSet. */3075 STAMPROFILE StatR0DynMapGCPageInl; /**< R0: Calls to pgmR0DynMapGCPageInlined. */3076 STAMCOUNTER StatR0DynMapGCPageInlHits; /**< R0: Hash table lookup hits. */3077 STAMCOUNTER StatR0DynMapGCPageInlMisses; /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */3078 STAMCOUNTER StatR0DynMapGCPageInlRamHits; /**< R0: 1st ram range hits. */3079 STAMCOUNTER StatR0DynMapGCPageInlRamMisses; /**< R0: 1st ram range misses, takes slow path. */3080 STAMPROFILE StatR0DynMapHCPageInl; /**< R0: Calls to pgmR0DynMapHCPageInlined. */3081 STAMCOUNTER StatR0DynMapHCPageInlHits; /**< R0: Hash table lookup hits. */3082 STAMCOUNTER StatR0DynMapHCPageInlMisses; /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */3083 STAMPROFILE StatR0DynMapHCPage; /**< R0: Calls to PGMDynMapHCPage. */3084 STAMCOUNTER StatR0DynMapSetOptimize; /**< R0: Calls to pgmDynMapOptimizeAutoSet. */3085 STAMCOUNTER StatR0DynMapSetSearchFlushes; /**< R0: Set search restorting to subset flushes. */3086 STAMCOUNTER StatR0DynMapSetSearchHits; /**< R0: Set search hits. */3087 STAMCOUNTER StatR0DynMapSetSearchMisses; /**< R0: Set search misses. */3088 STAMCOUNTER StatR0DynMapPage; /**< R0: Calls to pgmR0DynMapPage. */3089 STAMCOUNTER StatR0DynMapPageHits0; /**< R0: Hits at iPage+0. */3090 STAMCOUNTER StatR0DynMapPageHits1; /**< R0: Hits at iPage+1. */3091 STAMCOUNTER StatR0DynMapPageHits2; /**< R0: Hits at iPage+2. */3092 STAMCOUNTER StatR0DynMapPageInvlPg; /**< R0: invlpg. */3093 STAMCOUNTER StatR0DynMapPageSlow; /**< R0: Calls to pgmR0DynMapPageSlow. */3094 STAMCOUNTER StatR0DynMapPageSlowLoopHits; /**< R0: Hits in the pgmR0DynMapPageSlow search loop. */3095 STAMCOUNTER StatR0DynMapPageSlowLoopMisses; /**< R0: Misses in the pgmR0DynMapPageSlow search loop. */3096 //STAMCOUNTER StatR0DynMapPageSlowLostHits; /**< R0: Lost hits. */3097 STAMCOUNTER StatR0DynMapSubsets; /**< R0: Times PGMDynMapPushAutoSubset was called. */3098 STAMCOUNTER StatR0DynMapPopFlushes; /**< R0: Times PGMDynMapPopAutoSubset flushes the subset. */3099 STAMCOUNTER aStatR0DynMapSetSize[11]; /**< R0: Set size distribution. */3100 3172 3101 3173 /* RZ only: */ … … 3148 3220 STAMCOUNTER StatRZGuestROMWriteHandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was successfully called. */ 3149 3221 STAMCOUNTER StatRZGuestROMWriteUnhandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was called and we had to fall back to the recompiler */ 3222 STAMCOUNTER StatRZDynMapMigrateInvlPg; /**< RZ: invlpg in PGMR0DynMapMigrateAutoSet. */ 3223 STAMPROFILE StatRZDynMapGCPageInl; /**< RZ: Calls to pgmRZDynMapGCPageInlined. */ 3224 STAMCOUNTER StatRZDynMapGCPageInlHits; /**< RZ: Hash table lookup hits. */ 3225 STAMCOUNTER StatRZDynMapGCPageInlMisses; /**< RZ: Misses that falls back to the code common. */ 3226 STAMCOUNTER StatRZDynMapGCPageInlRamHits; /**< RZ: 1st ram range hits. */ 3227 STAMCOUNTER StatRZDynMapGCPageInlRamMisses; /**< RZ: 1st ram range misses, takes slow path. */ 3228 STAMPROFILE StatRZDynMapHCPageInl; /**< RZ: Calls to pgmRZDynMapHCPageInlined. */ 3229 STAMCOUNTER StatRZDynMapHCPageInlHits; /**< RZ: Hash table lookup hits. */ 3230 STAMCOUNTER StatRZDynMapHCPageInlMisses; /**< RZ: Misses that falls back to the code common. */ 3231 STAMPROFILE StatRZDynMapHCPage; /**< RZ: Calls to pgmRZDynMapHCPageCommon. */ 3232 STAMCOUNTER StatRZDynMapSetOptimize; /**< RZ: Calls to pgmRZDynMapOptimizeAutoSet. */ 3233 STAMCOUNTER StatRZDynMapSetSearchFlushes; /**< RZ: Set search restorting to subset flushes. */ 3234 STAMCOUNTER StatRZDynMapSetSearchHits; /**< RZ: Set search hits. */ 3235 STAMCOUNTER StatRZDynMapSetSearchMisses; /**< RZ: Set search misses. */ 3236 STAMCOUNTER StatRZDynMapPage; /**< RZ: Calls to pgmR0DynMapPage. */ 3237 STAMCOUNTER StatRZDynMapPageHits0; /**< RZ: Hits at iPage+0. */ 3238 STAMCOUNTER StatRZDynMapPageHits1; /**< RZ: Hits at iPage+1. */ 3239 STAMCOUNTER StatRZDynMapPageHits2; /**< RZ: Hits at iPage+2. */ 3240 STAMCOUNTER StatRZDynMapPageInvlPg; /**< RZ: invlpg. */ 3241 STAMCOUNTER StatRZDynMapPageSlow; /**< RZ: Calls to pgmR0DynMapPageSlow. */ 3242 STAMCOUNTER StatRZDynMapPageSlowLoopHits; /**< RZ: Hits in the pgmR0DynMapPageSlow search loop. */ 3243 STAMCOUNTER StatRZDynMapPageSlowLoopMisses; /**< RZ: Misses in the pgmR0DynMapPageSlow search loop. */ 3244 //STAMCOUNTER StatRZDynMapPageSlowLostHits; /**< RZ: Lost hits. */ 3245 STAMCOUNTER StatRZDynMapSubsets; /**< RZ: Times PGMDynMapPushAutoSubset was called. */ 3246 STAMCOUNTER StatRZDynMapPopFlushes; /**< RZ: Times PGMDynMapPopAutoSubset flushes the subset. */ 3247 STAMCOUNTER aStatRZDynMapSetFilledPct[11]; /**< RZ: Set fill distribution, percent. */ 3150 3248 3151 3249 /* HC - R3 and (maybe) R0: */ … … 3270 3368 { 3271 3369 /** Offset to the VM structure. */ 3272 RTINToffVM;3370 int32_t offVM; 3273 3371 /** Offset to the VMCPU structure. */ 3274 RTINToffVCpu;3372 int32_t offVCpu; 3275 3373 /** Offset of the PGM structure relative to VMCPU. */ 3276 RTINToffPGM;3277 RTINTuPadding0; /**< structure size alignment. */3278 3279 #if def VBOX_WITH_2X_4GB_ADDR_SPACE3374 int32_t offPGM; 3375 uint32_t uPadding0; /**< structure size alignment. */ 3376 3377 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE) || defined(VBOX_WITH_RAW_MODE) 3280 3378 /** Automatically tracked physical memory mapping set. 3281 3379 * Ring-0 and strict raw-mode builds. */ … … 3593 3691 3594 3692 #endif /* IN_RING3 */ 3595 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3596 int pgmR0DynMapHCPageCommon(PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv); 3693 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || IN_RC 3694 int pgmRZDynMapHCPageCommon(PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL); 3695 int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL); 3696 # ifdef LOG_ENABLED 3697 void pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint, RT_SRC_POS_DECL); 3698 # else 3699 void pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint); 3700 # endif 3597 3701 #endif 3598 3702 int pgmPoolAllocEx(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage, bool fLockPage = false); -
trunk/src/VBox/VMM/VMMAll/MMAllPagePool.cpp
r28800 r31402 39 39 40 40 41 #if ndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R041 #if !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(IN_RC) 42 42 43 43 /** -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r31170 r31402 936 936 PGMPOOLKIND enmKind; 937 937 938 # if defined(IN_RC)939 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */940 PGMDynLockHCPage(pVM, (uint8_t *)pPdpe);941 # endif942 943 938 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu)) 944 939 { … … 990 985 */ 991 986 ASMReloadCR3(); 992 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);993 987 # endif 988 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe); 994 989 } 995 990 else … … 1524 1519 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1525 1520 #if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1526 1527 1521 /** 1528 1522 * Performs the lazy mapping of the 32-bit guest PD. … … 1563 1557 return rc; 1564 1558 } 1565 1566 1559 #endif 1567 1560 … … 2272 2265 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 2273 2266 2274 /** Common worker for PGMDynMapGCPage and PGMDynMapGCPageOff. */ 2275 DECLINLINE(int) pgmDynMapGCPageInternal(PVM pVM, RTGCPHYS GCPhys, void **ppv) 2267 /** 2268 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined. 2269 * 2270 * @returns VBox status code. 2271 * @param pVM The VM handle. 2272 * @param pVCpu The current CPU. 2273 * @param GCPhys The guest physical address of the page to map. The 2274 * offset bits are not ignored. 2275 * @param ppv Where to return the address corresponding to @a GCPhys. 2276 */ 2277 int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL) 2276 2278 { 2277 2279 pgmLock(pVM); 2278 2280 2279 2281 /* 2280 * Convert it to a writable page and it on to PGMDynMapHCPage.2282 * Convert it to a writable page and it on to the dynamic mapper. 2281 2283 */ 2282 2284 int rc; … … 2287 2289 if (RT_SUCCESS(rc)) 2288 2290 { 2289 //Log(("PGMDynMapGCPage: GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage)); 2290 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 2291 rc = pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), PGM_PAGE_GET_HCPHYS(pPage), ppv); 2292 #else 2293 rc = PGMDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), ppv); 2294 #endif 2291 void *pv; 2292 rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS); 2293 if (RT_SUCCESS(rc)) 2294 *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK)); 2295 2295 } 2296 2296 else … … 2307 2307 } 2308 2308 2309 /**2310 * Temporarily maps one guest page specified by GC physical address.2311 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.2312 *2313 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is2314 * reused after 8 mappings (or perhaps a few more if you score with the cache).2315 *2316 * @returns VBox status.2317 * @param pVM VM handle.2318 * @param GCPhys GC Physical address of the page.2319 * @param ppv Where to store the address of the mapping.2320 */2321 VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)2322 {2323 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));2324 return pgmDynMapGCPageInternal(pVM, GCPhys, ppv);2325 }2326 2327 2328 /**2329 * Temporarily maps one guest page specified by unaligned GC physical address.2330 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.2331 *2332 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is2333 * reused after 8 mappings (or perhaps a few more if you score with the cache).2334 *2335 * The caller is aware that only the speicifed page is mapped and that really bad things2336 * will happen if writing beyond the page!2337 *2338 * @returns VBox status.2339 * @param pVM VM handle.2340 * @param GCPhys GC Physical address within the page to be mapped.2341 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.2342 */2343 VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)2344 {2345 void *pv;2346 int rc = pgmDynMapGCPageInternal(pVM, GCPhys, &pv);2347 if (RT_SUCCESS(rc))2348 {2349 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));2350 return VINF_SUCCESS;2351 }2352 return rc;2353 }2354 2355 # ifdef IN_RC2356 2357 /**2358 * Temporarily maps one host page specified by HC physical address.2359 *2360 * Be WARNED that the dynamic page mapping area is small, 16 pages, thus the space is2361 * reused after 16 mappings (or perhaps a few more if you score with the cache).2362 *2363 * @returns VINF_SUCCESS, will bail out to ring-3 on failure.2364 * @param pVM VM handle.2365 * @param HCPhys HC Physical address of the page.2366 * @param ppv Where to store the address of the mapping. This is the2367 * address of the PAGE not the exact address corresponding2368 * to HCPhys. Use PGMDynMapHCPageOff if you care for the2369 * page offset.2370 */2371 VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)2372 {2373 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));2374 2375 /*2376 * Check the cache.2377 */2378 register unsigned iCache;2379 for (iCache = 0;iCache < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache);iCache++)2380 {2381 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =2382 {2383 { 0, 9, 10, 11, 12, 13, 14, 15},2384 { 0, 1, 10, 11, 12, 13, 14, 15},2385 { 0, 1, 2, 11, 12, 13, 14, 15},2386 { 0, 1, 2, 3, 12, 13, 14, 15},2387 { 0, 1, 2, 3, 4, 13, 14, 15},2388 { 0, 1, 2, 3, 4, 5, 14, 15},2389 { 0, 1, 2, 3, 4, 5, 6, 15},2390 { 0, 1, 2, 3, 4, 5, 6, 7},2391 { 8, 1, 2, 3, 4, 5, 6, 7},2392 { 8, 9, 2, 3, 4, 5, 6, 7},2393 { 8, 9, 10, 3, 4, 5, 6, 7},2394 { 8, 9, 10, 11, 4, 5, 6, 7},2395 { 8, 9, 10, 11, 12, 5, 6, 7},2396 { 8, 9, 10, 11, 12, 13, 6, 7},2397 { 8, 9, 10, 11, 12, 13, 14, 7},2398 { 8, 9, 10, 11, 12, 13, 14, 15},2399 };2400 AssertCompile(RT_ELEMENTS(au8Trans) == 16);2401 AssertCompile(RT_ELEMENTS(au8Trans[0]) == 8);2402 2403 if (pVM->pgm.s.aHCPhysDynPageMapCache[iCache] == HCPhys)2404 {2405 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];2406 2407 /* The cache can get out of sync with locked entries. (10 locked, 2 overwrites its cache position, last = 11, lookup 2 -> page 10 instead of 2) */2408 if ((pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & X86_PTE_PG_MASK) == HCPhys)2409 {2410 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);2411 *ppv = pv;2412 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCDynMapCacheHits);2413 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));2414 return VINF_SUCCESS;2415 }2416 LogFlow(("Out of sync entry %d\n", iPage));2417 }2418 }2419 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 8);2420 AssertCompile((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 16);2421 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCDynMapCacheMisses);2422 2423 /*2424 * Update the page tables.2425 */2426 unsigned iPage = pVM->pgm.s.iDynPageMapLast;2427 unsigned i;2428 for (i = 0; i < (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT); i++)2429 {2430 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);2431 if (!pVM->pgm.s.aLockedDynPageMapCache[iPage])2432 break;2433 iPage++;2434 }2435 AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT));2436 2437 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;2438 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;2439 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;2440 pVM->pgm.s.aLockedDynPageMapCache[iPage] = 0;2441 2442 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);2443 *ppv = pv;2444 ASMInvalidatePage(pv);2445 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));2446 return VINF_SUCCESS;2447 }2448 2449 2450 /**2451 * Temporarily lock a dynamic page to prevent it from being reused.2452 *2453 * @param pVM VM handle.2454 * @param GCPage GC address of page2455 */2456 VMMDECL(void) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)2457 {2458 unsigned iPage;2459 2460 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));2461 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;2462 ASMAtomicIncU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);2463 Log4(("PGMDynLockHCPage %RRv iPage=%d\n", GCPage, iPage));2464 }2465 2466 2467 /**2468 * Unlock a dynamic page2469 *2470 * @param pVM VM handle.2471 * @param GCPage GC address of page2472 */2473 VMMDECL(void) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)2474 {2475 unsigned iPage;2476 2477 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache) == 2* RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache));2478 AssertCompileMemberSize(VM, pgm.s.aLockedDynPageMapCache, sizeof(uint32_t) * (MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)));2479 2480 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));2481 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;2482 Assert(pVM->pgm.s.aLockedDynPageMapCache[iPage]);2483 ASMAtomicDecU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);2484 Log4(("PGMDynUnlockHCPage %RRv iPage=%d\n", GCPage, iPage));2485 }2486 2487 2488 # ifdef VBOX_STRICT2489 /**2490 * Check for lock leaks.2491 *2492 * @param pVM VM handle.2493 */2494 VMMDECL(void) PGMDynCheckLocks(PVM pVM)2495 {2496 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache); i++)2497 Assert(!pVM->pgm.s.aLockedDynPageMapCache[i]);2498 }2499 # endif /* VBOX_STRICT */2500 2501 # endif /* IN_RC */2502 2309 #endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 2503 2504 2310 #if !defined(IN_R0) || defined(LOG_ENABLED) 2505 2311 -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r31207 r31402 383 383 *pfLockTaken = false; 384 384 385 # if defined(IN_RC) && defined(VBOX_STRICT)386 PGMDynCheckLocks(pVM);387 # endif388 389 385 # if ( PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT \ 390 386 || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \ … … 433 429 if (uErr & X86_TRAP_PF_RSVD) 434 430 { 431 /** @todo This is not complete code. take locks */ 435 432 Assert(uErr & X86_TRAP_PF_P); 436 433 PPGMPAGE pPage; … … 563 560 return VINF_SUCCESS; 564 561 } 565 #ifndef IN_RC566 562 AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u)); 567 563 AssertMsg(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u)); 568 #else569 /* Ugly hack, proper fix is comming up later. */570 if ( !(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u)571 || !(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u) )572 {573 rc = PGM_GST_NAME(Walk)(pVCpu, pvFault, &GstWalk);574 if (RT_FAILURE_NP(rc))575 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, &GstWalk, uErr));576 }577 #endif578 564 } 579 565 … … 1148 1134 } 1149 1135 1150 # if defined(IN_RC)1151 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */1152 PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);1153 # endif1154 1155 1136 /* 1156 1137 * Get the guest PD entry and calc big page. … … 1295 1276 LogFlow(("Skipping flush for big page containing %RGv (PD=%X .u=%RX64)-> nothing has changed!\n", GCPtrPage, iPDSrc, PdeSrc.u)); 1296 1277 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage4MBPagesSkip)); 1297 # if defined(IN_RC) 1298 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */ 1299 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 1300 # endif 1278 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 1301 1279 return VINF_SUCCESS; 1302 1280 } … … 1335 1313 } 1336 1314 } 1337 # if defined(IN_RC) 1338 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */ 1339 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 1340 # endif 1315 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 1341 1316 return rc; 1342 1317 … … 1785 1760 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK); 1786 1761 Assert(pShwPde); 1787 # endif1788 1789 # if defined(IN_RC)1790 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */1791 PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);1792 1762 # endif 1793 1763 … … 2021 1991 } 2022 1992 } 2023 # if defined(IN_RC) 2024 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */ 2025 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 2026 # endif 1993 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 2027 1994 return VINF_SUCCESS; 2028 1995 } … … 2050 2017 ASMAtomicWriteSize(pPdeDst, 0); 2051 2018 2052 # if defined(IN_RC) 2053 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */ 2054 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 2055 # endif 2019 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 2056 2020 PGM_INVL_VCPU_TLBS(pVCpu); 2057 2021 return VINF_PGM_SYNCPAGE_MODIFIED_PDE; … … 2564 2528 Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/ 2565 2529 2566 # if defined(IN_RC)2567 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */2568 PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);2569 # endif2570 2571 2530 /* 2572 2531 * Sync page directory entry. … … 2646 2605 } 2647 2606 ASMAtomicWriteSize(pPdeDst, PdeDst.u); 2648 # if defined(IN_RC) 2649 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 2650 # endif 2607 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 2651 2608 return VINF_SUCCESS; 2652 2609 } … … 2654 2611 { 2655 2612 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 2656 # if defined(IN_RC) 2657 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 2658 # endif 2613 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 2659 2614 return VINF_PGM_SYNC_CR3; 2660 2615 } … … 2687 2642 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D)); 2688 2643 ASMAtomicWriteSize(pPdeDst, PdeDst.u); 2689 # if defined(IN_RC) 2690 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 2691 # endif 2644 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 2692 2645 2693 2646 /* … … 2768 2721 2769 2722 /** 2770 * @todo It might be more efficient to sync only a part of the 4MB page (similar to what we do for 4kb PDs). 2723 * @todo It might be more efficient to sync only a part of the 4MB 2724 * page (similar to what we do for 4KB PDs). 2771 2725 */ 2772 2726 … … 2795 2749 } 2796 2750 ASMAtomicWriteSize(pPdeDst, PdeDst.u); 2797 # if defined(IN_RC) 2798 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 2799 # endif 2751 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 2800 2752 2801 2753 /* … … 3391 3343 # endif 3392 3344 3393 # if defined(IN_RC)3394 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */3395 PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);3396 # endif3397 3398 3345 if (!pPdeDst->n.u1Present) 3399 3346 { … … 3401 3348 if (rc != VINF_SUCCESS) 3402 3349 { 3403 # if defined(IN_RC) 3404 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */ 3405 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 3406 # endif 3350 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 3407 3351 pgmUnlock(pVM); 3408 3352 AssertRC(rc); … … 3449 3393 } 3450 3394 } 3451 # if defined(IN_RC) 3452 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */ 3453 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 3454 # endif 3395 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst); 3455 3396 pgmUnlock(pVM); 3456 3397 return rc; … … 4359 4300 AssertReturn(pPageCR3, VERR_INTERNAL_ERROR_2); 4360 4301 HCPhysGuestCR3 = PGM_PAGE_GET_HCPHYS(pPageCR3); 4361 /** @todo this needs some reworking wrt. locking .*/4302 /** @todo this needs some reworking wrt. locking? */ 4362 4303 # if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 4363 4304 HCPtrGuestCR3 = NIL_RTHCPTR; -
trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp
r31174 r31402 247 247 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(pVCpu); 248 248 AssertFatal(pShw32BitPd); 249 #ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmPoolFree. */ 250 PGMDynLockHCPage(pVM, (uint8_t *)pShw32BitPd); 251 #endif 249 252 250 /* Free any previous user, unless it's us. */ 253 251 Assert( (pShw32BitPd->a[iNewPDE].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING) … … 260 258 pShw32BitPd->a[iNewPDE].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US 261 259 | (uint32_t)pMap->aPTs[i].HCPhysPT; 262 #ifdef IN_RC 263 /* Unlock dynamic mappings again. */ 264 PGMDynUnlockHCPage(pVM, (uint8_t *)pShw32BitPd); 265 #endif 260 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShw32BitPd); 266 261 break; 267 262 } … … 274 269 PX86PDPT pShwPdpt = pgmShwGetPaePDPTPtr(pVCpu); 275 270 Assert(pShwPdpt); 276 #ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmShwSyncPaePDPtr. */277 PGMDynLockHCPage(pVM, (uint8_t *)pShwPdpt);278 #endif279 271 280 272 /* … … 302 294 } 303 295 Assert(pShwPaePd); 304 #ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmPoolFree. */305 PGMDynLockHCPage(pVM, (uint8_t *)pShwPaePd);306 #endif307 296 308 297 /* … … 357 346 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING; 358 347 359 #ifdef IN_RC 360 /* Unlock dynamic mappings again. */ 361 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPaePd); 362 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPdpt); 363 #endif 348 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShwPaePd); 349 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShwPdpt); 364 350 break; 365 351 } … … 406 392 if ( PGMGetGuestMode(pVCpu) >= PGMMODE_PAE 407 393 && pShwPageCR3 != pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)) 408 {409 394 pCurrentShwPdpt = pgmShwGetPaePDPTPtr(pVCpu); 410 #ifdef IN_RC /* Lock mapping to prevent it from being reused (currently not possible). */411 if (pCurrentShwPdpt)412 PGMDynLockHCPage(pVM, (uint8_t *)pCurrentShwPdpt);413 #endif414 }415 395 416 396 unsigned i = pMap->cPTs; … … 503 483 } 504 484 } 505 #ifdef IN_RC 506 /* Unlock dynamic mappings again. */ 507 if (pCurrentShwPdpt) 508 PGMDynUnlockHCPage(pVM, (uint8_t *)pCurrentShwPdpt); 509 #endif 485 486 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pCurrentShwPdpt); 510 487 } 511 488 #endif /* !IN_RING0 */ -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r31208 r31402 738 738 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER); 739 739 740 #if def IN_RC740 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 741 741 /* 742 742 * Map it by HCPhys. 743 743 */ 744 return PGMDynMapHCPage(pVM, HCPhys, ppv); 745 746 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 747 /* 748 * Map it by HCPhys. 749 */ 750 return pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv); 744 return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS); 751 745 752 746 #else … … 824 818 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage); 825 819 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg); 826 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 827 pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv); 828 # else 829 PGMDynMapHCPage(pVM, HCPhys, ppv); 830 # endif 820 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS); 831 821 return VINF_SUCCESS; 832 822 … … 1138 1128 */ 1139 1129 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1140 *ppv = pgm DynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK));1130 *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS); 1141 1131 #else 1142 1132 PPGMPAGEMAPTLBE pTlbe; … … 1176 1166 */ 1177 1167 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1178 *ppv = pgm DynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */1168 *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */ 1179 1169 #else 1180 1170 PPGMPAGEMAPTLBE pTlbe; … … 1234 1224 if (RT_SUCCESS(rc)) 1235 1225 { 1236 *ppv = pgm DynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */1226 *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */ 1237 1227 # if 0 1238 1228 pLock->pvMap = 0; … … 1345 1335 else 1346 1336 { 1347 *ppv = pgm DynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */1337 *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */ 1348 1338 # if 0 1349 1339 pLock->pvMap = 0; … … 1493 1483 pLock->u32Dummy = 0; 1494 1484 1495 #else /* IN_RING3 */1485 #else 1496 1486 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap; 1497 1487 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK); -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r31170 r31402 88 88 } 89 89 90 /** @def PGMPOOL_PAGE_2_LOCKED_PTR91 * Maps a pool page pool into the current context and lock it (RC only).92 *93 * @returns VBox status code.94 * @param pVM The VM handle.95 * @param pPage The pool page.96 *97 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the98 * small page window employeed by that function. Be careful.99 * @remark There is no need to assert on the result.100 */101 #if defined(IN_RC)102 DECLINLINE(void *) PGMPOOL_PAGE_2_LOCKED_PTR(PVM pVM, PPGMPOOLPAGE pPage)103 {104 void *pv = pgmPoolMapPageInlined(pVM, pPage);105 106 /* Make sure the dynamic mapping will not be reused. */107 if (pv)108 PGMDynLockHCPage(pVM, (uint8_t *)pv);109 110 return pv;111 }112 #else113 # define PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage) PGMPOOL_PAGE_2_PTR(pVM, pPage)114 #endif115 116 /** @def PGMPOOL_UNLOCK_PTR117 * Unlock a previously locked dynamic caching (RC only).118 *119 * @returns VBox status code.120 * @param pVM The VM handle.121 * @param pPage The pool page.122 *123 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the124 * small page window employeed by that function. Be careful.125 * @remark There is no need to assert on the result.126 */127 #if defined(IN_RC)128 DECLINLINE(void) PGMPOOL_UNLOCK_PTR(PVM pVM, void *pvPage)129 {130 if (pvPage)131 PGMDynUnlockHCPage(pVM, (uint8_t *)pvPage);132 }133 #else134 # define PGMPOOL_UNLOCK_PTR(pVM, pPage) do {} while (0)135 #endif136 137 90 138 91 /** … … 247 200 { 248 201 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT)); 249 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);202 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 250 203 const unsigned iShw = off / sizeof(X86PTE); 251 204 LogFlow(("PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT iShw=%x\n", iShw)); … … 270 223 { 271 224 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT)); 272 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);225 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 273 226 if (!((off ^ pPage->GCPhys) & (PAGE_SIZE / 2))) 274 227 { … … 300 253 unsigned iShwPdpt = iGst / 256; 301 254 unsigned iShw = (iGst % 256) * 2; 302 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);255 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 303 256 304 257 LogFlow(("pgmPoolMonitorChainChanging PAE for 32 bits: iGst=%x iShw=%x idx = %d page idx=%d\n", iGst, iShw, iShwPdpt, pPage->enmKind - PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD)); … … 363 316 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT: 364 317 { 365 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);318 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 366 319 const unsigned iShw = off / sizeof(X86PTEPAE); 367 320 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT)); … … 409 362 case PGMPOOLKIND_32BIT_PD: 410 363 { 411 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);364 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 412 365 const unsigned iShw = off / sizeof(X86PTE); // ASSUMING 32-bit guest paging! 413 366 … … 489 442 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD: 490 443 { 491 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);444 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 492 445 const unsigned iShw = off / sizeof(X86PDEPAE); 493 446 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD)); … … 566 519 const unsigned offPdpt = GCPhysFault - pPage->GCPhys; 567 520 568 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);521 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 569 522 const unsigned iShw = offPdpt / sizeof(X86PDPE); 570 523 if (iShw < X86_PG_PAE_PDPE_ENTRIES) /* don't use RT_ELEMENTS(uShw.pPDPT->a), because that's for long mode only */ … … 633 586 { 634 587 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD)); 635 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);588 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 636 589 const unsigned iShw = off / sizeof(X86PDEPAE); 637 590 Assert(!(uShw.pPDPae->a[iShw].u & PGM_PDFLAGS_MAPPING)); … … 673 626 * - messing with the bits of pd pointers without changing the physical address 674 627 */ 675 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);628 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 676 629 const unsigned iShw = off / sizeof(X86PDPE); 677 630 if (uShw.pPDPT->a[iShw].n.u1Present) … … 703 656 * - messing with the bits of pd pointers without changing the physical address 704 657 */ 705 uShw.pv = PGMPOOL_PAGE_2_ LOCKED_PTR(pVM, pPage);658 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 706 659 const unsigned iShw = off / sizeof(X86PDPE); 707 660 if (uShw.pPML4->a[iShw].n.u1Present) … … 730 683 AssertFatalMsgFailed(("enmKind=%d\n", pPage->enmKind)); 731 684 } 732 PGM POOL_UNLOCK_PTR(pVM, uShw.pv);685 PGM_DYNMAP_UNUSED_HINT_VM(pVM, uShw.pv); 733 686 734 687 /* next */ … … 960 913 while (pRegFrame->rcx) 961 914 { 962 #if def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0963 uint32_t iPrevSubset = PGM DynMapPushAutoSubset(pVCpu);915 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 916 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); 964 917 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, uIncrement); 965 PGM DynMapPopAutoSubset(pVCpu, iPrevSubset);918 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); 966 919 #else 967 920 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, uIncrement); … … 1012 965 * Clear all the pages. ASSUMES that pvFault is readable. 1013 966 */ 1014 #if def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R01015 uint32_t iPrevSubset = PGM DynMapPushAutoSubset(pVCpu);967 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 968 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); 1016 969 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, DISGetParamSize(pDis, &pDis->param1)); 1017 PGM DynMapPopAutoSubset(pVCpu, iPrevSubset);970 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); 1018 971 #else 1019 972 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, DISGetParamSize(pDis, &pDis->param1)); … … 1113 1066 if (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT) 1114 1067 { 1115 void *pvShw = PGMPOOL_PAGE_2_ LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);1068 void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage); 1116 1069 void *pvGst; 1117 1070 int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc); … … 1421 1374 if (pTempPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT) 1422 1375 { 1423 PX86PTPAE pShwPT2 = (PX86PTPAE)PGMPOOL_PAGE_2_ LOCKED_PTR(pPool->CTX_SUFF(pVM), pTempPage);1376 PX86PTPAE pShwPT2 = (PX86PTPAE)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pTempPage); 1424 1377 1425 1378 for (unsigned j = 0; j < RT_ELEMENTS(pShwPT->a); j++) … … 1539 1492 pPage->fDirty = false; 1540 1493 1541 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1542 uint32_t iPrevSubset = PGMDynMapPushAutoSubset(VMMGetCpu(pVM)); 1494 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 1495 PVMCPU pVCpu = VMMGetCpu(pVM); 1496 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); 1543 1497 #endif 1544 1498 … … 1557 1511 /* Flush those PTEs that have changed. */ 1558 1512 STAM_PROFILE_START(&pPool->StatTrackDeref,a); 1559 void *pvShw = PGMPOOL_PAGE_2_ LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);1513 void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage); 1560 1514 void *pvGst; 1561 1515 bool fFlush; … … 1589 1543 Log(("Removed dirty page %RGp cMods=%d cChanges=%d\n", pPage->GCPhys, pPage->cModifications, cChanges)); 1590 1544 1591 #if def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R01592 PGM DynMapPopAutoSubset(VMMGetCpu(pVM), iPrevSubset);1545 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_R0) || defined(IN_RC) 1546 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); 1593 1547 #endif 1594 1548 } … … 1627 1581 * references to physical pages. (the HCPhys linear lookup is *extremely* expensive!) 1628 1582 */ 1629 void *pvShw = PGMPOOL_PAGE_2_ LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);1583 void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage); 1630 1584 void *pvGst; 1631 1585 int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc); … … 3352 3306 else 3353 3307 { 3354 # if def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R03308 # if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 3355 3309 /* Start a subset here because pgmPoolTrackFlushGCPhysPTsSlow and 3356 3310 pgmPoolTrackFlushGCPhysPTs will/may kill the pool otherwise. */ 3357 uint32_t iPrevSubset = PGM DynMapPushAutoSubset(pVCpu);3311 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); 3358 3312 # endif 3359 3313 … … 3370 3324 *pfFlushTLBs = true; 3371 3325 3372 # if def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R03373 PGM DynMapPopAutoSubset(pVCpu, iPrevSubset);3326 # if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_R0) || defined(IN_RC) 3327 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); 3374 3328 # endif 3375 3329 } … … 3663 3617 AssertFatalMsgFailed(("enmKind=%d iUser=%#x iUserTable=%#x\n", pUserPage->enmKind, pUser->iUser, pUser->iUserTable)); 3664 3618 } 3619 PGM_DYNMAP_UNUSED_HINT_VM(pPool->CTX_SUFF(pVM), u.pau64); 3665 3620 } 3666 3621 … … 4435 4390 * Map the shadow page and take action according to the page kind. 4436 4391 */ 4437 void *pvShw = PGMPOOL_PAGE_2_ LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);4392 void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage); 4438 4393 switch (pPage->enmKind) 4439 4394 { … … 4539 4494 STAM_PROFILE_STOP(&pPool->StatZeroPage, z); 4540 4495 pPage->fZeroed = true; 4541 PGM POOL_UNLOCK_PTR(pPool->CTX_SUFF(pVM), pvShw);4496 PGM_DYNMAP_UNUSED_HINT_VM(pPool->CTX_SUFF(pVM), pvShw); 4542 4497 Assert(!pPage->cPresent); 4543 4498 } … … 4596 4551 } 4597 4552 4598 #if def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R04553 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) 4599 4554 /* Start a subset so we won't run out of mapping space. */ 4600 4555 PVMCPU pVCpu = VMMGetCpu(pVM); 4601 uint32_t iPrevSubset = PGM DynMapPushAutoSubset(pVCpu);4556 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); 4602 4557 #endif 4603 4558 … … 4629 4584 pgmPoolCacheFlushPage(pPool, pPage); 4630 4585 4631 #if def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R04586 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_R0) || defined(IN_RC) 4632 4587 /* Heavy stuff done. */ 4633 PGM DynMapPopAutoSubset(pVCpu, iPrevSubset);4588 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); 4634 4589 #endif 4635 4590 -
trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp
r30493 r31402 693 693 STAM_PROFILE_ADV_STOP(&pVM->trpm.s.aStatGCTraps[iOrgTrap], o); 694 694 695 CPUMGCCallGuestTrapHandler(pRegFrame, GuestIdte.Gen.u16SegSel | 1, pVM->trpm.s.aGuestTrapHandler[iGate], eflags.u32, ss_r0, (RTRCPTR)esp_r0); 695 PGMRZDynMapReleaseAutoSet(pVCpu); 696 CPUMGCCallGuestTrapHandler(pRegFrame, GuestIdte.Gen.u16SegSel | 1, pVM->trpm.s.aGuestTrapHandler[iGate], 697 eflags.u32, ss_r0, (RTRCPTR)esp_r0); 696 698 /* does not return */ 697 699 #else -
trunk/src/VBox/VMM/VMMGC/PGMGC.cpp
r28800 r31402 5 5 6 6 /* 7 * Copyright (C) 2006-20 07Oracle Corporation7 * Copyright (C) 2006-2010 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as -
trunk/src/VBox/VMM/VMMGC/TRPMGCHandlers.cpp
r31100 r31402 46 46 #include <iprt/assert.h> 47 47 48 48 49 /******************************************************************************* 49 50 * Defined Constants And Macros * … … 130 131 * @param rc The VBox status code to return. 131 132 * @param pRegFrame Pointer to the register frame for the trap. 133 * 134 * @remarks This must not be used for hypervisor traps, only guest traps. 132 135 */ 133 136 static int trpmGCExitTrap(PVM pVM, PVMCPU pVCpu, int rc, PCPUMCTXCORE pRegFrame) … … 231 234 */ 232 235 else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) 236 { 233 237 #if 1 238 PGMRZDynMapReleaseAutoSet(pVCpu); 239 PGMRZDynMapStartAutoSet(pVCpu); 234 240 rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 235 241 #else 236 242 rc = VINF_PGM_SYNC_CR3; 237 243 #endif 244 } 238 245 /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */ 239 246 else if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST) … … 246 253 && ( pRegFrame->eflags.Bits.u2IOPL < (unsigned)(pRegFrame->ss & X86_SEL_RPL) || pRegFrame->eflags.Bits.u1VM)) 247 254 , ("rc=%Rrc\neflags=%RX32 ss=%RTsel IOPL=%d\n", rc, pRegFrame->eflags.u32, pRegFrame->ss, pRegFrame->eflags.Bits.u2IOPL)); 255 PGMRZDynMapReleaseAutoSet(pVCpu); 248 256 return rc; 249 257 } … … 270 278 271 279 /* 272 * We currently don't make sure of the X86_DR7_GD bit, but280 * We currently don't make use of the X86_DR7_GD bit, but 273 281 * there might come a time when we do. 274 282 */ 275 if ((uDr6 & X86_DR6_BD) == X86_DR6_BD) 276 { 277 AssertReleaseMsgFailed(("X86_DR6_BD isn't used, but it's set! dr7=%RTreg(%RTreg) dr6=%RTreg\n", 278 ASMGetDR7(), CPUMGetHyperDR7(pVCpu), uDr6)); 279 return VERR_NOT_IMPLEMENTED; 280 } 281 283 AssertReleaseMsgReturn((uDr6 & X86_DR6_BD) != X86_DR6_BD, 284 ("X86_DR6_BD isn't used, but it's set! dr7=%RTreg(%RTreg) dr6=%RTreg\n", 285 ASMGetDR7(), CPUMGetHyperDR7(pVCpu), uDr6), 286 VERR_NOT_IMPLEMENTED); 282 287 AssertReleaseMsg(!(uDr6 & X86_DR6_BT), ("X86_DR6_BT is impossible!\n")); 283 288 … … 285 290 * Now leave the rest to the DBGF. 286 291 */ 292 PGMRZDynMapStartAutoSet(pVCpu); 287 293 int rc = DBGFRZTrap01Handler(pVM, pVCpu, pRegFrame, uDr6); 288 294 if (rc == VINF_EM_RAW_GUEST_TRAP) … … 296 302 297 303 /** 304 * \#DB (Debug event) handler for the hypervisor code. 305 * 306 * This is mostly the same as TRPMGCTrap01Handler, but we skip the PGM auto 307 * mapping set as well as the default trap exit path since they are both really 308 * bad ideas in this context. 309 * 310 * @returns VBox status code. 311 * VINF_SUCCESS means we completely handled this trap, 312 * other codes are passed execution to host context. 313 * 314 * @param pTrpmCpu Pointer to TRPMCPU data (within VM). 315 * @param pRegFrame Pointer to the register frame for the trap. 316 * @internal 317 */ 318 DECLASM(int) TRPMGCHyperTrap01Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame) 319 { 320 RTGCUINTREG uDr6 = ASMGetAndClearDR6(); 321 PVM pVM = TRPMCPU_2_VM(pTrpmCpu); 322 PVMCPU pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu); 323 324 LogFlow(("TRPMGCHyper01: cs:eip=%04x:%08x uDr6=%RTreg\n", pRegFrame->cs, pRegFrame->eip, uDr6)); 325 326 /* 327 * We currently don't make use of the X86_DR7_GD bit, but 328 * there might come a time when we do. 329 */ 330 AssertReleaseMsgReturn((uDr6 & X86_DR6_BD) != X86_DR6_BD, 331 ("X86_DR6_BD isn't used, but it's set! dr7=%RTreg(%RTreg) dr6=%RTreg\n", 332 ASMGetDR7(), CPUMGetHyperDR7(pVCpu), uDr6), 333 VERR_NOT_IMPLEMENTED); 334 AssertReleaseMsg(!(uDr6 & X86_DR6_BT), ("X86_DR6_BT is impossible!\n")); 335 336 /* 337 * Now leave the rest to the DBGF. 338 */ 339 int rc = DBGFRZTrap01Handler(pVM, pVCpu, pRegFrame, uDr6); 340 AssertStmt(rc != VINF_EM_RAW_GUEST_TRAP, rc = VERR_INTERNAL_ERROR_3); 341 342 Log6(("TRPMGCHyper01: %Rrc (%04x:%08x %RTreg)\n", rc, pRegFrame->cs, pRegFrame->eip, uDr6)); 343 return rc; 344 } 345 346 347 /** 298 348 * NMI handler, for when we are using NMIs to debug things. 299 349 * … … 311 361 LogFlow(("TRPMGCTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs, pRegFrame->eip)); 312 362 RTLogComPrintf("TRPMGCTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs, pRegFrame->eip); 363 return VERR_TRPM_DONT_PANIC; 364 } 365 366 367 /** 368 * NMI handler, for when we are using NMIs to debug things. 369 * 370 * This is the handler we're most likely to hit when the NMI fires (it is 371 * unlikely that we'll be stuck in guest code). 372 * 373 * @returns VBox status code. 374 * VINF_SUCCESS means we completely handled this trap, 375 * other codes are passed execution to host context. 376 * 377 * @param pTrpmCpu Pointer to TRPMCPU data (within VM). 378 * @param pRegFrame Pointer to the register frame for the trap. 379 * @internal 380 * @remark This is not hooked up unless you're building with VBOX_WITH_NMI defined. 381 */ 382 DECLASM(int) TRPMGCHyperTrap02Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame) 383 { 384 LogFlow(("TRPMGCHyperTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs, pRegFrame->eip)); 385 RTLogComPrintf("TRPMGCHyperTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs, pRegFrame->eip); 313 386 return VERR_TRPM_DONT_PANIC; 314 387 } … … 332 405 PVMCPU pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu); 333 406 int rc; 334 335 /* 336 * Both PATM are using INT3s, let them have a go first. 407 PGMRZDynMapStartAutoSet(pVCpu); 408 409 /* 410 * PATM is using INT3s, let them have a go first. 337 411 */ 338 412 if ( (pRegFrame->ss & X86_SEL_RPL) == 1 … … 357 431 358 432 /** 433 * \#BP (Breakpoint) handler. 434 * 435 * This is similar to TRPMGCTrap03Handler but we bits which are potentially 436 * harmful to us (common trap exit and the auto mapping set). 437 * 438 * @returns VBox status code. 439 * VINF_SUCCESS means we completely handled this trap, 440 * other codes are passed execution to host context. 441 * 442 * @param pTrpmCpu Pointer to TRPMCPU data (within VM). 443 * @param pRegFrame Pointer to the register frame for the trap. 444 * @internal 445 */ 446 DECLASM(int) TRPMGCHyperTrap03Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame) 447 { 448 LogFlow(("TRPMGCHyper03: %04x:%08x\n", pRegFrame->cs, pRegFrame->eip)); 449 PVM pVM = TRPMCPU_2_VM(pTrpmCpu); 450 PVMCPU pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu); 451 452 /* 453 * Hand it over to DBGF. 454 */ 455 int rc = DBGFRZTrap03Handler(pVM, pVCpu, pRegFrame); 456 AssertStmt(rc != VINF_EM_RAW_GUEST_TRAP, rc = VERR_INTERNAL_ERROR_3); 457 458 Log6(("TRPMGCHyper03: %Rrc (%04x:%08x)\n", rc, pRegFrame->cs, pRegFrame->eip)); 459 return rc; 460 } 461 462 463 /** 359 464 * Trap handler for illegal opcode fault (\#UD). 360 465 * … … 373 478 PVMCPU pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu); 374 479 int rc; 480 PGMRZDynMapStartAutoSet(pVCpu); 375 481 376 482 if (CPUMGetGuestCPL(pVCpu, pRegFrame) == 0) … … 402 508 /* 403 509 * UD2 in a patch? 510 * Note! PATMGCHandleIllegalInstrTrap doesn't always return. 404 511 */ 405 512 if ( Cpu.pCurInstr->opcode == OP_ILLUD2 406 513 && PATMIsPatchGCAddr(pVM, pRegFrame->eip)) 407 514 { 515 LogFlow(("TRPMGCTrap06Handler: -> PATMGCHandleIllegalInstrTrap\n")); 408 516 rc = PATMGCHandleIllegalInstrTrap(pVM, pRegFrame); 409 517 /** @todo These tests are completely unnecessary, should just follow the … … 439 547 else if (Cpu.pCurInstr->opcode == OP_MONITOR) 440 548 { 549 LogFlow(("TRPMGCTrap06Handler: -> EMInterpretInstructionCPU\n")); 441 550 uint32_t cbIgnored; 442 551 rc = EMInterpretInstructionCPU(pVM, pVCpu, &Cpu, pRegFrame, PC, &cbIgnored); … … 446 555 /* Never generate a raw trap here; it might be an instruction, that requires emulation. */ 447 556 else 557 { 558 LogFlow(("TRPMGCTrap06Handler: -> VINF_EM_RAW_EMULATE_INSTR\n")); 448 559 rc = VINF_EM_RAW_EMULATE_INSTR; 560 } 449 561 } 450 562 else 451 563 { 564 LogFlow(("TRPMGCTrap06Handler: -> TRPMForwardTrap\n")); 452 565 rc = TRPMForwardTrap(pVCpu, pRegFrame, 0x6, 0, TRPM_TRAP_NO_ERRORCODE, TRPM_TRAP, 0x6); 453 566 Assert(rc == VINF_EM_RAW_GUEST_TRAP); … … 478 591 PVM pVM = TRPMCPU_2_VM(pTrpmCpu); 479 592 PVMCPU pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu); 593 PGMRZDynMapStartAutoSet(pVCpu); 480 594 481 595 int rc = CPUMHandleLazyFPU(pVCpu); … … 500 614 { 501 615 LogFlow(("TRPMGC0b: %04x:%08x\n", pRegFrame->cs, pRegFrame->eip)); 502 PVM pVM = TRPMCPU_2_VM(pTrpmCpu); 616 PVM pVM = TRPMCPU_2_VM(pTrpmCpu); 617 PVMCPU pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu); 618 PGMRZDynMapStartAutoSet(pVCpu); 503 619 504 620 /* … … 574 690 pTrpmCpu->uActiveVector = ~0; 575 691 Log6(("TRPMGC0b: %Rrc (%04x:%08x) (CG)\n", VINF_EM_RAW_RING_SWITCH, pRegFrame->cs, pRegFrame->eip)); 692 PGMRZDynMapReleaseAutoSet(pVCpu); 576 693 return VINF_EM_RAW_RING_SWITCH; 577 694 } … … 582 699 */ 583 700 Log6(("TRPMGC0b: %Rrc (%04x:%08x)\n", VINF_EM_RAW_GUEST_TRAP, pRegFrame->cs, pRegFrame->eip)); 701 PGMRZDynMapReleaseAutoSet(pVCpu); 584 702 return VINF_EM_RAW_GUEST_TRAP; 585 703 } … … 933 1051 LogFlow(("TRPMGC0d: %04x:%08x err=%x\n", pRegFrame->cs, pRegFrame->eip, (uint32_t)pVCpu->trpm.s.uActiveErrorCode)); 934 1052 1053 PGMRZDynMapStartAutoSet(pVCpu); 935 1054 int rc = trpmGCTrap0dHandler(pVM, pTrpmCpu, pRegFrame); 936 1055 switch (rc) … … 994 1113 * This is all PGM stuff. 995 1114 */ 1115 PGMRZDynMapStartAutoSet(pVCpu); 996 1116 int rc = PGMTrap0eHandler(pVCpu, pVCpu->trpm.s.uActiveErrorCode, pRegFrame, (RTGCPTR)pVCpu->trpm.s.uActiveCR2); 997 1117 switch (rc) … … 1009 1129 case VINF_EM_RAW_GUEST_TRAP: 1010 1130 if (PATMIsPatchGCAddr(pVM, pRegFrame->eip)) 1131 { 1132 PGMRZDynMapReleaseAutoSet(pVCpu); 1011 1133 return VINF_PATM_PATCH_TRAP_PF; 1134 } 1012 1135 1013 1136 rc = TRPMForwardTrap(pVCpu, pRegFrame, 0xE, 0, TRPM_TRAP_HAS_ERRORCODE, TRPM_TRAP, 0xe); -
trunk/src/VBox/VMM/VMMGC/TRPMGCHandlersA.asm
r28800 r31402 4 4 ; 5 5 6 ; Copyright (C) 2006-20 07Oracle Corporation6 ; Copyright (C) 2006-2010 Oracle Corporation 7 7 ; 8 8 ; This file is part of VirtualBox Open Source Edition (OSE), as … … 34 34 extern IMPNAME(g_TRPM) ; where there is a pointer to the real symbol. PE imports 35 35 extern IMPNAME(g_TRPMCPU) ; are a bit confusing at first... :-) 36 extern IMPNAME(g_VM) 36 extern IMPNAME(g_VM) 37 37 extern NAME(CPUMGCRestoreInt) 38 38 extern NAME(cpumHandleLazyFPUAsm) 39 39 extern NAME(CPUMHyperSetCtxCore) 40 40 extern NAME(trpmGCTrapInGeneric) 41 extern NAME(TRPMGCHyperTrap0bHandler)42 extern NAME(TRPMGCHyperTrap0dHandler)43 extern NAME(TRPMGCHyperTrap0eHandler)44 41 extern NAME(TRPMGCTrap01Handler) 42 extern NAME(TRPMGCHyperTrap01Handler) 45 43 %ifdef VBOX_WITH_NMI 46 44 extern NAME(TRPMGCTrap02Handler) 45 extern NAME(TRPMGCHyperTrap02Handler) 47 46 %endif 48 47 extern NAME(TRPMGCTrap03Handler) 48 extern NAME(TRPMGCHyperTrap03Handler) 49 49 extern NAME(TRPMGCTrap06Handler) 50 extern NAME(TRPMGCTrap07Handler) 50 51 extern NAME(TRPMGCTrap0bHandler) 52 extern NAME(TRPMGCHyperTrap0bHandler) 51 53 extern NAME(TRPMGCTrap0dHandler) 54 extern NAME(TRPMGCHyperTrap0dHandler) 52 55 extern NAME(TRPMGCTrap0eHandler) 53 extern NAME(TRPMGC Trap07Handler)56 extern NAME(TRPMGCHyperTrap0eHandler) 54 57 55 58 ;; IMPORTANT all COM_ functions trashes esi, some edi and the LOOP_SHORT_WHILE kills ecx. … … 71 74 ; ============================================================= 72 75 dd 0 ; 0 - #DE - F - N - Divide error 73 dd NAME(TRPMGC Trap01Handler); 1 - #DB - F/T - N - Single step, INT 1 instruction76 dd NAME(TRPMGCHyperTrap01Handler) ; 1 - #DB - F/T - N - Single step, INT 1 instruction 74 77 %ifdef VBOX_WITH_NMI 75 dd NAME(TRPMGC Trap02Handler); 2 - - I - N - Non-Maskable Interrupt (NMI)78 dd NAME(TRPMGCHyperTrap02Handler) ; 2 - - I - N - Non-Maskable Interrupt (NMI) 76 79 %else 77 80 dd 0 ; 2 - - I - N - Non-Maskable Interrupt (NMI) 78 81 %endif 79 dd NAME(TRPMGC Trap03Handler); 3 - #BP - T - N - Breakpoint, INT 3 instruction.82 dd NAME(TRPMGCHyperTrap03Handler) ; 3 - #BP - T - N - Breakpoint, INT 3 instruction. 80 83 dd 0 ; 4 - #OF - T - N - Overflow, INTO instruction. 81 84 dd 0 ; 5 - #BR - F - N - BOUND Range Exceeded, BOUND instruction. … … 271 274 mov [esp + CPUMCTXCORE.eflags], eax 272 275 273 %if GC_ARCH_BITS == 64 276 %if GC_ARCH_BITS == 64 274 277 ; zero out the high dwords 275 278 mov dword [esp + CPUMCTXCORE.eax + 4], 0 … … 775 778 mov [esp + CPUMCTXCORE.ss], eax 776 779 777 %if GC_ARCH_BITS == 64 780 %if GC_ARCH_BITS == 64 778 781 ; zero out the high dwords 779 782 mov dword [esp + CPUMCTXCORE.eax + 4], 0 -
trunk/src/VBox/VMM/VMMGC/VMMGC.cpp
r29250 r31402 5 5 6 6 /* 7 * Copyright (C) 2006-20 07Oracle Corporation7 * Copyright (C) 2006-2010 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 91 91 AssertRCReturn(rc, rc); 92 92 93 rc = PGMRCDynMapInit(pVM); 94 AssertRCReturn(rc, rc); 93 95 return VINF_SUCCESS; 94 96 } -
trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp
r30241 r31402 1094 1094 1095 1095 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 1096 bool fStartedSet = PGM DynMapStartOrMigrateAutoSet(pVCpu);1096 bool fStartedSet = PGMR0DynMapStartOrMigrateAutoSet(pVCpu); 1097 1097 #endif 1098 1098 … … 1107 1107 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 1108 1108 if (fStartedSet) 1109 PGM DynMapReleaseAutoSet(pVCpu);1109 PGMRZDynMapReleaseAutoSet(pVCpu); 1110 1110 #endif 1111 1111 … … 1209 1209 1210 1210 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 1211 PGM DynMapStartAutoSet(pVCpu);1211 PGMRZDynMapStartAutoSet(pVCpu); 1212 1212 #endif 1213 1213 … … 1217 1217 1218 1218 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 1219 PGM DynMapReleaseAutoSet(pVCpu);1219 PGMRZDynMapReleaseAutoSet(pVCpu); 1220 1220 #endif 1221 1221 return rc; -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r31394 r31402 2565 2565 #endif 2566 2566 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 2567 PGM DynMapFlushAutoSet(pVCpu);2567 PGMRZDynMapFlushAutoSet(pVCpu); 2568 2568 #endif 2569 2569 -
trunk/src/VBox/VMM/VMMRZ/PGMRZDynMap.cpp
r31270 r31402 1 1 /* $Id$ */ 2 2 /** @file 3 * PGM - Page Manager and Monitor, ring-0dynamic mapping cache.3 * PGM - Page Manager and Monitor, dynamic mapping cache. 4 4 */ 5 5 6 6 /* 7 * Copyright (C) 2008 Oracle Corporation7 * Copyright (C) 2008-2010 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 16 16 */ 17 17 18 18 19 /******************************************************************************* 19 20 * Internal Functions * 20 21 *******************************************************************************/ 21 #define LOG_GROUP LOG_GROUP_PGM 22 #define LOG_GROUP LOG_GROUP_PGM_DYNMAP 22 23 #include <VBox/pgm.h> 23 24 #include "../PGMInternal.h" 24 25 #include <VBox/vm.h> 25 26 #include "../PGMInline.h" 27 #include <VBox/err.h> 28 #include <VBox/param.h> 26 29 #include <VBox/sup.h> 27 #include <VBox/err.h>28 30 #include <iprt/asm.h> 29 31 #include <iprt/asm-amd64-x86.h> 30 #include <iprt/alloc.h>31 32 #include <iprt/assert.h> 32 #include <iprt/cpuset.h> 33 #include <iprt/memobj.h> 34 #include <iprt/mp.h> 35 #include <iprt/semaphore.h> 36 #include <iprt/spinlock.h> 33 #ifndef IN_RC 34 # include <iprt/cpuset.h> 35 # include <iprt/mem.h> 36 # include <iprt/memobj.h> 37 # include <iprt/mp.h> 38 # include <iprt/semaphore.h> 39 # include <iprt/spinlock.h> 40 #endif 37 41 #include <iprt/string.h> 38 42 … … 41 45 * Defined Constants And Macros * 42 46 *******************************************************************************/ 47 #ifdef IN_RING0 43 48 /** The max size of the mapping cache (in pages). */ 44 # define PGMR0DYNMAP_MAX_PAGES((16*_1M) >> PAGE_SHIFT)49 # define PGMR0DYNMAP_MAX_PAGES ((16*_1M) >> PAGE_SHIFT) 45 50 /** The small segment size that is adopted on out-of-memory conditions with a 46 51 * single big segment. */ 47 # define PGMR0DYNMAP_SMALL_SEG_PAGES12852 # define PGMR0DYNMAP_SMALL_SEG_PAGES 128 48 53 /** The number of pages we reserve per CPU. */ 49 # define PGMR0DYNMAP_PAGES_PER_CPU25654 # define PGMR0DYNMAP_PAGES_PER_CPU 256 50 55 /** The minimum number of pages we reserve per CPU. 51 56 * This must be equal or larger than the autoset size. */ 52 #define PGMR0DYNMAP_PAGES_PER_CPU_MIN 64 57 # define PGMR0DYNMAP_PAGES_PER_CPU_MIN 64 58 /** Calcs the overload threshold (safety margin). Current set at 50%. */ 59 # define PGMR0DYNMAP_CALC_OVERLOAD(cPages) ((cPages) / 2) 53 60 /** The number of guard pages. 54 61 * @remarks Never do tuning of the hashing or whatnot with a strict build! */ 55 #if defined(VBOX_STRICT) 56 # define PGMR0DYNMAP_GUARD_PAGES 1 57 #else 58 # define PGMR0DYNMAP_GUARD_PAGES 0 59 #endif 62 # if defined(VBOX_STRICT) 63 # define PGMR0DYNMAP_GUARD_PAGES 1 64 # else 65 # define PGMR0DYNMAP_GUARD_PAGES 0 66 # endif 67 #endif /* IN_RING0 */ 60 68 /** The dummy physical address of guard pages. */ 61 69 #define PGMR0DYNMAP_GUARD_PAGE_HCPHYS UINT32_C(0x7777feed) … … 66 74 * The alternative is to replace the entire PTE with an bad not-present 67 75 * PTE. Either way, XNU will screw us. :-/ */ 68 # define PGMR0DYNMAP_GUARD_NP76 # define PGMR0DYNMAP_GUARD_NP 69 77 #endif 70 78 /** The dummy PTE value for a page. */ … … 72 80 /** The dummy PTE value for a page. */ 73 81 #define PGMR0DYNMAP_GUARD_PAGE_PAE_PTE UINT64_MAX /*X86_PTE_PAE_PG_MASK*/ 74 /** Calcs the overload threshold. Current set at 50%. */ 75 #define PGMR0DYNMAP_CALC_OVERLOAD(cPages) ((cPages) / 2) 76 77 #if 0 78 /* Assertions causes panics if preemption is disabled, this can be used to work around that. */ 79 //#define RTSpinlockAcquire(a,b) do {} while (0) 80 //#define RTSpinlockRelease(a,b) do {} while (0) 81 #endif 82 83 #ifdef IN_RING0 /* Note! Assertions causes panics if preemption is disabled, 84 * disable this to work around that. */ 85 /** 86 * Acquire the spinlock. 87 * This will declare a temporary variable and expands to two statements! 88 */ 89 # define PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis) \ 90 RTSPINLOCKTMP MySpinlockTmp = RTSPINLOCKTMP_INITIALIZER; \ 91 RTSpinlockAcquire((pThis)->hSpinlock, &MySpinlockTmp) 92 /** 93 * Releases the spinlock. 94 */ 95 # define PGMRZDYNMAP_SPINLOCK_RELEASE(pThis) \ 96 RTSpinlockRelease((pThis)->hSpinlock, &MySpinlockTmp) 97 98 /** 99 * Re-acquires the spinlock. 100 */ 101 # define PGMRZDYNMAP_SPINLOCK_REACQUIRE(pThis) \ 102 RTSpinlockAcquire((pThis)->hSpinlock, &MySpinlockTmp) 103 #else 104 # define PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis) do { } while (0) 105 # define PGMRZDYNMAP_SPINLOCK_RELEASE(pThis) do { } while (0) 106 # define PGMRZDYNMAP_SPINLOCK_REACQUIRE(pThis) do { } while (0) 107 #endif 108 82 109 83 110 /** Converts a PGMCPUM::AutoSet pointer into a PVMCPU. */ 84 #define PGMR 0DYNMAP_2_VMCPU(pSet)(RT_FROM_MEMBER(pSet, VMCPU, pgm.s.AutoSet))111 #define PGMRZDYNMAP_SET_2_VMCPU(pSet) (RT_FROM_MEMBER(pSet, VMCPU, pgm.s.AutoSet)) 85 112 86 113 /** Converts a PGMCPUM::AutoSet pointer into a PVM. */ 87 #define PGMR0DYNMAP_2_VM(pSet) (PGMR0DYNMAP_2_VMCPU(pSet)->CTX_SUFF(pVM)) 114 #define PGMRZDYNMAP_SET_2_VM(pSet) (PGMRZDYNMAP_SET_2_VMCPU(pSet)->CTX_SUFF(pVM)) 115 116 /** Converts a PGMCPUM::AutoSet pointer into a PVM. */ 117 #ifdef IN_RC 118 # define PGMRZDYNMAP_SET_2_DYNMAP(pSet) (PGMRZDYNMAP_SET_2_VM(pSet)->pgm.s.pRCDynMap) 119 #else 120 # define PGMRZDYNMAP_SET_2_DYNMAP(pSet) (g_pPGMR0DynMap) 121 #endif 122 123 /** 124 * Gets the set index of the current CPU. 125 * 126 * This always returns 0 when in raw-mode context because there is only ever 127 * one EMT in that context (at least presently). 128 */ 129 #ifdef IN_RC 130 # define PGMRZDYNMAP_CUR_CPU() (0) 131 #else 132 # define PGMRZDYNMAP_CUR_CPU() RTMpCpuIdToSetIndex(RTMpCpuId()) 133 #endif 134 135 /** PGMRZDYNMAP::u32Magic. (Jens Christian Bugge Wesseltoft) */ 136 #define PGMRZDYNMAP_MAGIC UINT32_C(0x19640201) 137 138 139 /** Zaps an set entry. */ 140 #define PGMRZDYNMAP_ZAP_ENTRY(pEntry) \ 141 do \ 142 { \ 143 (pEntry)->iPage = UINT16_MAX; \ 144 (pEntry)->cRefs = 0; \ 145 (pEntry)->cInlinedRefs = 0; \ 146 (pEntry)->cUnrefs = 0; \ 147 } while (0) 88 148 89 149 … … 91 151 * Structures and Typedefs * 92 152 *******************************************************************************/ 153 #ifdef IN_RING0 93 154 /** 94 155 * Ring-0 dynamic mapping cache segment. … … 125 186 * Ring-0 dynamic mapping cache entry. 126 187 * 127 * This structure tracks188 * @sa PGMRZDYNMAPENTRY, PGMRCDYNMAPENTRY. 128 189 */ 129 190 typedef struct PGMR0DYNMAPENTRY … … 147 208 void *pv; 148 209 } uPte; 210 # ifndef IN_RC 149 211 /** CPUs that haven't invalidated this entry after it's last update. */ 150 212 RTCPUSET PendingSet; 213 # endif 151 214 } PGMR0DYNMAPENTRY; 152 /** Pointer to a ring-0 dynamic mapping cache entry. */ 215 /** Pointer a mapping cache entry for the ring-0. 216 * @sa PPGMRZDYNMAPENTRY, PPGMRCDYNMAPENTRY, */ 153 217 typedef PGMR0DYNMAPENTRY *PPGMR0DYNMAPENTRY; 154 218 155 219 156 220 /** 157 * Ring-0 dynamic mapping cache. 158 * 159 * This is initialized during VMMR0 module init but no segments are allocated at 160 * that time. Segments will be added when the first VM is started and removed 161 * again when the last VM shuts down, thus avoid consuming memory while dormant. 162 * At module termination, the remaining bits will be freed up. 221 * Dynamic mapping cache for ring-0. 222 * 223 * This is initialized during VMMR0 module init but no segments are allocated 224 * at that time. Segments will be added when the first VM is started and 225 * removed again when the last VM shuts down, thus avoid consuming memory while 226 * dormant. At module termination, the remaining bits will be freed up. 227 * 228 * @sa PPGMRZDYNMAP, PGMRCDYNMAP. 163 229 */ 164 230 typedef struct PGMR0DYNMAP 165 231 { 166 /** The usual magic number / eye catcher (PGMR 0DYNMAP_MAGIC). */232 /** The usual magic number / eye catcher (PGMRZDYNMAP_MAGIC). */ 167 233 uint32_t u32Magic; 234 # ifndef IN_RC 168 235 /** Spinlock serializing the normal operation of the cache. */ 169 236 RTSPINLOCK hSpinlock; 237 # endif 170 238 /** Array for tracking and managing the pages. */ 171 239 PPGMR0DYNMAPENTRY paPages; … … 180 248 * This is maintained to get trigger adding of more mapping space. */ 181 249 uint32_t cMaxLoad; 250 # ifndef IN_RC 182 251 /** Initialization / termination lock. */ 183 252 RTSEMFASTMUTEX hInitLock; 253 # endif 184 254 /** The number of guard pages. */ 185 255 uint32_t cGuardPages; 186 256 /** The number of users (protected by hInitLock). */ 187 257 uint32_t cUsers; 258 # ifndef IN_RC 188 259 /** Array containing a copy of the original page tables. 189 260 * The entries are either X86PTE or X86PTEPAE according to fLegacyMode. */ … … 193 264 /** The paging mode. */ 194 265 SUPPAGINGMODE enmPgMode; 266 # endif 195 267 } PGMR0DYNMAP; 196 /** Pointer to the ring-0 dynamic mapping cache */197 typedef PGMR0DYNMAP *PPGMR0DYNMAP;198 199 /** PGMR0DYNMAP::u32Magic. (Jens Christian Bugge Wesseltoft) */200 #define PGMR0DYNMAP_MAGIC 0x19640201201 268 202 269 … … 228 295 /** Pointer to paging level data. */ 229 296 typedef PGMR0DYNMAPPGLVL *PPGMR0DYNMAPPGLVL; 297 #endif 298 299 /** Mapping cache entry for the current context. 300 * @sa PGMR0DYNMAPENTRY, PGMRCDYNMAPENTRY */ 301 typedef CTX_MID(PGM,DYNMAPENTRY) PGMRZDYNMAPENTRY; 302 /** Pointer a mapping cache entry for the current context. 303 * @sa PGMR0DYNMAPENTRY, PGMRCDYNMAPENTRY */ 304 typedef PGMRZDYNMAPENTRY *PPGMRZDYNMAPENTRY; 305 306 /** Pointer the mapping cache instance for the current context. 307 * @sa PGMR0DYNMAP, PGMRCDYNMAP */ 308 typedef CTX_MID(PGM,DYNMAP) *PPGMRZDYNMAP; 309 230 310 231 311 … … 233 313 * Global Variables * 234 314 *******************************************************************************/ 315 #ifdef IN_RING0 235 316 /** Pointer to the ring-0 dynamic mapping cache. */ 236 static PPGMR0DYNMAP g_pPGMR0DynMap; 317 static PGMR0DYNMAP *g_pPGMR0DynMap; 318 #endif 237 319 /** For overflow testing. */ 238 320 static bool g_fPGMR0DynMapTestRunning = false; … … 242 324 * Internal Functions * 243 325 *******************************************************************************/ 244 static void pgmR0DynMapReleasePage(PPGMR0DYNMAP pThis, uint32_t iPage, uint32_t cRefs); 245 static int pgmR0DynMapSetup(PPGMR0DYNMAP pThis); 246 static int pgmR0DynMapExpand(PPGMR0DYNMAP pThis); 247 static void pgmR0DynMapTearDown(PPGMR0DYNMAP pThis); 326 static void pgmRZDynMapReleasePage(PPGMRZDYNMAP pThis, uint32_t iPage, uint32_t cRefs); 327 #ifdef IN_RING0 328 static int pgmR0DynMapSetup(PPGMRZDYNMAP pThis); 329 static int pgmR0DynMapExpand(PPGMRZDYNMAP pThis); 330 static void pgmR0DynMapTearDown(PPGMRZDYNMAP pThis); 331 #endif 248 332 #if 0 /*def DEBUG*/ 249 333 static int pgmR0DynMapTest(PVM pVM); … … 252 336 253 337 /** 338 * Initializes the auto mapping sets for a VM. 339 * 340 * @returns VINF_SUCCESS on success, VERR_INTERNAL_ERROR on failure. 341 * @param pVM The VM in question. 342 */ 343 static int pgmRZDynMapInitAutoSetsForVM(PVM pVM) 344 { 345 VMCPUID idCpu = pVM->cCpus; 346 AssertReturn(idCpu > 0 && idCpu <= VMM_MAX_CPU_COUNT, VERR_INTERNAL_ERROR); 347 while (idCpu-- > 0) 348 { 349 PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet; 350 uint32_t j = RT_ELEMENTS(pSet->aEntries); 351 while (j-- > 0) 352 { 353 pSet->aEntries[j].pvPage = NULL; 354 pSet->aEntries[j].HCPhys = NIL_RTHCPHYS; 355 PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[j]); 356 } 357 pSet->cEntries = PGMMAPSET_CLOSED; 358 pSet->iSubset = UINT32_MAX; 359 pSet->iCpu = -1; 360 memset(&pSet->aiHashTable[0], 0xff, sizeof(pSet->aiHashTable)); 361 } 362 363 return VINF_SUCCESS; 364 } 365 366 367 #ifdef IN_RING0 368 369 /** 254 370 * Initializes the ring-0 dynamic mapping cache. 255 371 * … … 263 379 * Create and initialize the cache instance. 264 380 */ 265 PPGMR 0DYNMAP pThis = (PPGMR0DYNMAP)RTMemAllocZ(sizeof(*pThis));381 PPGMRZDYNMAP pThis = (PPGMRZDYNMAP)RTMemAllocZ(sizeof(*pThis)); 266 382 AssertLogRelReturn(pThis, VERR_NO_MEMORY); 267 383 int rc = VINF_SUCCESS; … … 295 411 if (RT_SUCCESS(rc)) 296 412 { 297 pThis->u32Magic = PGMR 0DYNMAP_MAGIC;413 pThis->u32Magic = PGMRZDYNMAP_MAGIC; 298 414 g_pPGMR0DynMap = pThis; 299 415 return VINF_SUCCESS; … … 322 438 * is just a mirror image of PGMR0DynMapInit. 323 439 */ 324 PPGMR 0DYNMAP pThis = g_pPGMR0DynMap;440 PPGMRZDYNMAP pThis = g_pPGMR0DynMap; 325 441 if (pThis) 326 442 { … … 359 475 * Initialize the auto sets. 360 476 */ 361 VMCPUID idCpu = pVM->cCpus; 362 AssertReturn(idCpu > 0 && idCpu <= VMM_MAX_CPU_COUNT, VERR_INTERNAL_ERROR); 363 while (idCpu-- > 0) 364 { 365 PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet; 366 uint32_t j = RT_ELEMENTS(pSet->aEntries); 367 while (j-- > 0) 368 { 369 pSet->aEntries[j].iPage = UINT16_MAX; 370 pSet->aEntries[j].cRefs = 0; 371 pSet->aEntries[j].pvPage = NULL; 372 pSet->aEntries[j].HCPhys = NIL_RTHCPHYS; 373 } 374 pSet->cEntries = PGMMAPSET_CLOSED; 375 pSet->iSubset = UINT32_MAX; 376 pSet->iCpu = -1; 377 memset(&pSet->aiHashTable[0], 0xff, sizeof(pSet->aiHashTable)); 378 } 477 int rc = pgmRZDynMapInitAutoSetsForVM(pVM); 478 if (RT_FAILURE(rc)) 479 return rc; 379 480 380 481 /* … … 387 488 * Reference and if necessary setup or expand the cache. 388 489 */ 389 PPGMR 0DYNMAP pThis = g_pPGMR0DynMap;490 PPGMRZDYNMAP pThis = g_pPGMR0DynMap; 390 491 AssertPtrReturn(pThis, VERR_INTERNAL_ERROR); 391 intrc = RTSemFastMutexRequest(pThis->hInitLock);492 rc = RTSemFastMutexRequest(pThis->hInitLock); 392 493 AssertLogRelRCReturn(rc, rc); 393 494 … … 430 531 return; 431 532 432 PPGMR 0DYNMAP pThis = g_pPGMR0DynMap;533 PPGMRZDYNMAP pThis = g_pPGMR0DynMap; 433 534 AssertPtrReturnVoid(pThis); 434 535 … … 463 564 LogRel(("PGMR0DynMapTermVM: %d dangling refs to %#x\n", cRefs, iPage)); 464 565 if (iPage < pThis->cPages && cRefs > 0) 465 pgmR 0DynMapReleasePage(pThis, iPage, cRefs);566 pgmRZDynMapReleasePage(pThis, iPage, cRefs); 466 567 else 467 568 AssertLogRelMsgFailed(("cRefs=%d iPage=%#x cPages=%u\n", cRefs, iPage, pThis->cPages)); 468 569 469 pSet->aEntries[j].iPage = UINT16_MAX; 470 pSet->aEntries[j].cRefs = 0; 471 pSet->aEntries[j].pvPage = NULL; 472 pSet->aEntries[j].HCPhys = NIL_RTHCPHYS; 570 PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[j]); 473 571 } 474 572 pSet->cEntries = PGMMAPSET_CLOSED; … … 512 610 { 513 611 Assert(!pvUser2); 514 PPGMR 0DYNMAP pThis = (PPGMR0DYNMAP)pvUser1;612 PPGMRZDYNMAP pThis = (PPGMRZDYNMAP)pvUser1; 515 613 Assert(pThis == g_pPGMR0DynMap); 516 PPGMR 0DYNMAPENTRY paPages = pThis->paPages;614 PPGMRZDYNMAPENTRY paPages = pThis->paPages; 517 615 uint32_t iPage = pThis->cPages; 518 616 while (iPage-- > 0) … … 527 625 * @param pThis The dynamic mapping cache instance. 528 626 */ 529 static int pgmR0DynMapTlbShootDown(PPGMR 0DYNMAP pThis)627 static int pgmR0DynMapTlbShootDown(PPGMRZDYNMAP pThis) 530 628 { 531 629 int rc = RTMpOnAll(pgmR0DynMapShootDownTlbs, pThis, NULL); … … 548 646 * @param pcMinPages The minimal size in pages. 549 647 */ 550 static uint32_t pgmR0DynMapCalcNewSize(PPGMR 0DYNMAP pThis, uint32_t *pcMinPages)648 static uint32_t pgmR0DynMapCalcNewSize(PPGMRZDYNMAP pThis, uint32_t *pcMinPages) 551 649 { 552 650 Assert(pThis->cPages <= PGMR0DYNMAP_MAX_PAGES); … … 594 692 * @param pPgLvl The paging level data. 595 693 */ 596 void pgmR0DynMapPagingArrayInit(PPGMR 0DYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl)694 void pgmR0DynMapPagingArrayInit(PPGMRZDYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl) 597 695 { 598 696 RTCCUINTREG cr4 = ASMGetCR4(); … … 704 802 * @param ppvPTE Where to store the PTE address. 705 803 */ 706 static int pgmR0DynMapPagingArrayMapPte(PPGMR 0DYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl, void *pvPage,804 static int pgmR0DynMapPagingArrayMapPte(PPGMRZDYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl, void *pvPage, 707 805 PPGMR0DYNMAPSEG pSeg, uint32_t cMaxPTs, void **ppvPTE) 708 806 { … … 791 889 * @param pPage The page. 792 890 */ 793 DECLINLINE(void) pgmR0DynMapSetupGuardPage(PPGMR 0DYNMAP pThis, PPGMR0DYNMAPENTRY pPage)891 DECLINLINE(void) pgmR0DynMapSetupGuardPage(PPGMRZDYNMAP pThis, PPGMRZDYNMAPENTRY pPage) 794 892 { 795 893 memset(pPage->pvPage, 0xfd, PAGE_SIZE); … … 815 913 * @param cPages The size of the new segment, give as a page count. 816 914 */ 817 static int pgmR0DynMapAddSeg(PPGMR 0DYNMAP pThis, uint32_t cPages)915 static int pgmR0DynMapAddSeg(PPGMRZDYNMAP pThis, uint32_t cPages) 818 916 { 819 917 int rc2; … … 838 936 } 839 937 840 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; 841 RTSpinlockAcquire(pThis->hSpinlock, &Tmp); 938 PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis); 842 939 843 940 memcpy(pvPages, pThis->paPages, sizeof(pThis->paPages[0]) * pThis->cPages); 844 941 void *pvToFree = pThis->paPages; 845 pThis->paPages = (PPGMR 0DYNMAPENTRY)pvPages;846 847 RTSpinlockRelease(pThis->hSpinlock, &Tmp);942 pThis->paPages = (PPGMRZDYNMAPENTRY)pvPages; 943 944 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 848 945 RTMemFree(pvToFree); 849 946 … … 882 979 pThis->paPages[iPage].cRefs = 0; 883 980 pThis->paPages[iPage].uPte.pPae = 0; 981 #ifndef IN_RC 884 982 RTCpuSetFill(&pThis->paPages[iPage].PendingSet); 983 #endif 885 984 886 985 /* Map its page table, retry until we've got a clean run (paranoia). */ … … 983 1082 * @param pThis The dynamic mapping cache instance. 984 1083 */ 985 static int pgmR0DynMapSetup(PPGMR 0DYNMAP pThis)1084 static int pgmR0DynMapSetup(PPGMRZDYNMAP pThis) 986 1085 { 987 1086 /* … … 1026 1125 * @param pThis The dynamic mapping cache instance. 1027 1126 */ 1028 static int pgmR0DynMapExpand(PPGMR 0DYNMAP pThis)1127 static int pgmR0DynMapExpand(PPGMRZDYNMAP pThis) 1029 1128 { 1030 1129 /* … … 1069 1168 * @param pThis The dynamic mapping cache instance. 1070 1169 */ 1071 static void pgmR0DynMapTearDown(PPGMR 0DYNMAP pThis)1170 static void pgmR0DynMapTearDown(PPGMRZDYNMAP pThis) 1072 1171 { 1073 1172 /* 1074 1173 * Restore the original page table entries 1075 1174 */ 1076 PPGMR 0DYNMAPENTRY paPages = pThis->paPages;1175 PPGMRZDYNMAPENTRY paPages = pThis->paPages; 1077 1176 uint32_t iPage = pThis->cPages; 1078 1177 if (pThis->fLegacyMode) … … 1145 1244 } 1146 1245 1246 #endif /* IN_RING0 */ 1247 #ifdef IN_RC 1248 1249 /** 1250 * Initializes the dynamic mapping cache in raw-mode context. 1251 * 1252 * @returns VBox status code. 1253 * @param pVM The VM handle. 1254 */ 1255 VMMRCDECL(int) PGMRCDynMapInit(PVM pVM) 1256 { 1257 /* 1258 * Allocate and initialize the instance data and page array. 1259 */ 1260 PPGMRZDYNMAP pThis; 1261 size_t const cPages = MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE; 1262 size_t const cb = RT_ALIGN_Z(sizeof(*pThis), 32) 1263 + sizeof(PGMRZDYNMAPENTRY) * cPages; 1264 int rc = MMHyperAlloc(pVM, cb, 32, MM_TAG_PGM, (void **)&pThis); 1265 if (RT_FAILURE(rc)) 1266 return rc; 1267 1268 pThis->u32Magic = PGMRZDYNMAP_MAGIC; 1269 pThis->paPages = RT_ALIGN_PT(pThis + 1, 32, PPGMRZDYNMAPENTRY); 1270 pThis->cPages = cPages; 1271 pThis->fLegacyMode = PGMGetHostMode(pVM) == PGMMODE_32_BIT; 1272 pThis->cLoad = 0; 1273 pThis->cMaxLoad = 0; 1274 pThis->cGuardPages = 0; 1275 pThis->cUsers = 1; 1276 1277 for (size_t iPage = 0; iPage < cPages; iPage++) 1278 { 1279 pThis->paPages[iPage].HCPhys = NIL_RTHCPHYS; 1280 pThis->paPages[iPage].pvPage = pVM->pgm.s.pbDynPageMapBaseGC + iPage * PAGE_SIZE; 1281 pThis->paPages[iPage].cRefs = 0; 1282 if (pThis->fLegacyMode) 1283 pThis->paPages[iPage].uPte.pLegacy = &pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage]; 1284 else 1285 pThis->paPages[iPage].uPte.pPae = &pVM->pgm.s.paDynPageMapPaePTEsGC[iPage]; 1286 } 1287 1288 pVM->pgm.s.pRCDynMap = pThis; 1289 1290 /* 1291 * Initialize the autosets the VM. 1292 */ 1293 rc = pgmRZDynMapInitAutoSetsForVM(pVM); 1294 if (RT_FAILURE(rc)) 1295 return rc; 1296 1297 return VINF_SUCCESS; 1298 } 1299 1300 #endif /* IN_RC */ 1147 1301 1148 1302 /** … … 1153 1307 * @param cRefs The number of references to release. 1154 1308 */ 1155 DECLINLINE(void) pgmR 0DynMapReleasePageLocked(PPGMR0DYNMAP pThis, uint32_t iPage, int32_t cRefs)1309 DECLINLINE(void) pgmRZDynMapReleasePageLocked(PPGMRZDYNMAP pThis, uint32_t iPage, int32_t cRefs) 1156 1310 { 1157 1311 cRefs = ASMAtomicSubS32(&pThis->paPages[iPage].cRefs, cRefs) - cRefs; … … 1169 1323 * @param cRefs The number of references to release. 1170 1324 */ 1171 static void pgmR0DynMapReleasePage(PPGMR0DYNMAP pThis, uint32_t iPage, uint32_t cRefs) 1172 { 1173 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; 1174 RTSpinlockAcquire(pThis->hSpinlock, &Tmp); 1175 pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs); 1176 RTSpinlockRelease(pThis->hSpinlock, &Tmp); 1325 static void pgmRZDynMapReleasePage(PPGMRZDYNMAP pThis, uint32_t iPage, uint32_t cRefs) 1326 { 1327 PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis); 1328 pgmRZDynMapReleasePageLocked(pThis, iPage, cRefs); 1329 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 1177 1330 } 1178 1331 … … 1186 1339 * @param iPage The page index pgmR0DynMapPage hashed HCPhys to. 1187 1340 * @param pVCpu The current CPU, for statistics. 1188 */ 1189 static uint32_t pgmR0DynMapPageSlow(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, uint32_t iPage, PVMCPU pVCpu) 1190 { 1191 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageSlow); 1341 * @param pfNew Set to @c true if a new entry was made and @c false if 1342 * an old entry was found and reused. 1343 */ 1344 static uint32_t pgmR0DynMapPageSlow(PPGMRZDYNMAP pThis, RTHCPHYS HCPhys, uint32_t iPage, PVMCPU pVCpu, bool *pfNew) 1345 { 1346 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageSlow); 1192 1347 1193 1348 /* … … 1199 1354 #endif 1200 1355 uint32_t const cPages = pThis->cPages; 1201 PPGMR 0DYNMAPENTRY paPages = pThis->paPages;1356 PPGMRZDYNMAPENTRY paPages = pThis->paPages; 1202 1357 uint32_t iFreePage; 1203 1358 if (!paPages[iPage].cRefs) … … 1217 1372 if (paPages[iFreePage].HCPhys == HCPhys) 1218 1373 { 1219 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageSlowLoopHits); 1374 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageSlowLoopHits); 1375 *pfNew = false; 1220 1376 return iFreePage; 1221 1377 } … … 1228 1384 return UINT32_MAX; 1229 1385 } 1230 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapPageSlowLoopMisses);1386 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageSlowLoopMisses); 1231 1387 #ifdef VBOX_WITH_STATISTICS 1232 1388 fLooped = true; … … 1240 1396 for (uint32_t iPage2 = (iPage + 3) % cPages; iPage2 != iPage; iPage2 = (iPage2 + 1) % cPages) 1241 1397 if (paPages[iPage2].HCPhys == HCPhys) 1242 STAM_COUNTER_INC(&pVCpu->pgm.s.StatR 0DynMapPageSlowLostHits);1398 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZDynMapPageSlowLostHits); 1243 1399 #endif 1244 1400 … … 1246 1402 * Setup the new entry. 1247 1403 */ 1404 *pfNew = true; 1248 1405 /*Log6(("pgmR0DynMapPageSlow: old - %RHp %#x %#llx\n", paPages[iFreePage].HCPhys, paPages[iFreePage].cRefs, paPages[iFreePage].uPte.pPae->u));*/ 1249 1406 paPages[iFreePage].HCPhys = HCPhys; 1407 #ifndef IN_RC 1250 1408 RTCpuSetFill(&paPages[iFreePage].PendingSet); 1409 #endif 1251 1410 if (pThis->fLegacyMode) 1252 1411 { … … 1286 1445 * @param ppvPage Where to the page address. 1287 1446 */ 1288 DECLINLINE(uint32_t) pgmR0DynMapPage(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, int32_t iRealCpu, PVMCPU pVCpu, void **ppvPage) 1289 { 1290 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; 1291 RTSpinlockAcquire(pThis->hSpinlock, &Tmp); 1447 DECLINLINE(uint32_t) pgmR0DynMapPage(PPGMRZDYNMAP pThis, RTHCPHYS HCPhys, int32_t iRealCpu, PVMCPU pVCpu, void **ppvPage) 1448 { 1449 PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis); 1292 1450 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys)); 1293 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapPage);1451 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPage); 1294 1452 1295 1453 /* … … 1301 1459 * to pgmR0DynMapPageSlow(). 1302 1460 */ 1461 bool fNew = false; 1303 1462 uint32_t const cPages = pThis->cPages; 1304 1463 uint32_t iPage = (HCPhys >> PAGE_SHIFT) % cPages; 1305 PPGMR 0DYNMAPENTRY paPages = pThis->paPages;1464 PPGMRZDYNMAPENTRY paPages = pThis->paPages; 1306 1465 if (RT_LIKELY(paPages[iPage].HCPhys == HCPhys)) 1307 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapPageHits0);1466 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageHits0); 1308 1467 else 1309 1468 { … … 1312 1471 { 1313 1472 iPage = iPage2; 1314 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapPageHits1);1473 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageHits1); 1315 1474 } 1316 1475 else … … 1320 1479 { 1321 1480 iPage = iPage2; 1322 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapPageHits2);1481 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageHits2); 1323 1482 } 1324 1483 else 1325 1484 { 1326 iPage = pgmR0DynMapPageSlow(pThis, HCPhys, iPage, pVCpu );1485 iPage = pgmR0DynMapPageSlow(pThis, HCPhys, iPage, pVCpu, &fNew); 1327 1486 if (RT_UNLIKELY(iPage == UINT32_MAX)) 1328 1487 { 1329 RTSpinlockRelease(pThis->hSpinlock, &Tmp);1488 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 1330 1489 *ppvPage = NULL; 1331 1490 return iPage; … … 1349 1508 { 1350 1509 ASMAtomicDecS32(&paPages[iPage].cRefs); 1351 RTSpinlockRelease(pThis->hSpinlock, &Tmp);1510 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 1352 1511 *ppvPage = NULL; 1353 1512 AssertLogRelMsgFailedReturn(("cRefs=%d iPage=%p HCPhys=%RHp\n", cRefs, iPage, HCPhys), UINT32_MAX); … … 1355 1514 void *pvPage = paPages[iPage].pvPage; 1356 1515 1516 #ifndef IN_RC 1357 1517 /* 1358 1518 * Invalidate the entry? … … 1361 1521 if (RT_UNLIKELY(fInvalidateIt)) 1362 1522 RTCpuSetDelByIndex(&paPages[iPage].PendingSet, iRealCpu); 1363 1364 RTSpinlockRelease(pThis->hSpinlock, &Tmp); 1523 #endif 1524 1525 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 1365 1526 1366 1527 /* 1367 1528 * Do the actual invalidation outside the spinlock. 1368 1529 */ 1530 #ifdef IN_RC 1531 if (RT_UNLIKELY(fNew)) 1532 #else 1369 1533 if (RT_UNLIKELY(fInvalidateIt)) 1370 { 1371 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageInvlPg); 1534 #endif 1535 { 1536 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageInvlPg); 1372 1537 ASMInvalidatePage(pvPage); 1373 1538 } … … 1383 1548 * @returns VBox status code. 1384 1549 */ 1385 VMMR0DECL(int) PGMR0DynMapAssertIntegrity(void)1550 static int pgmRZDynMapAssertIntegrity(PPGMRZDYNMAP pThis) 1386 1551 { 1387 1552 /* 1388 1553 * Basic pool stuff that doesn't require any lock, just assumes we're a user. 1389 1554 */ 1390 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;1391 1555 if (!pThis) 1392 1556 return VINF_SUCCESS; 1393 1557 AssertPtrReturn(pThis, VERR_INVALID_POINTER); 1394 AssertReturn(pThis->u32Magic == PGMR 0DYNMAP_MAGIC, VERR_INVALID_MAGIC);1558 AssertReturn(pThis->u32Magic == PGMRZDYNMAP_MAGIC, VERR_INVALID_MAGIC); 1395 1559 if (!pThis->cUsers) 1396 1560 return VERR_INVALID_PARAMETER; … … 1398 1562 1399 1563 int rc = VINF_SUCCESS; 1400 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; 1401 RTSpinlockAcquire(pThis->hSpinlock, &Tmp); 1564 PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis); 1402 1565 1403 1566 #define CHECK_RET(expr, a) \ … … 1405 1568 if (RT_UNLIKELY(!(expr))) \ 1406 1569 { \ 1407 RTSpinlockRelease(pThis->hSpinlock, &Tmp); \1570 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); \ 1408 1571 RTAssertMsg1Weak(#expr, __LINE__, __FILE__, __PRETTY_FUNCTION__); \ 1409 1572 RTAssertMsg2Weak a; \ … … 1417 1580 uint32_t cGuard = 0; 1418 1581 uint32_t cLoad = 0; 1419 PPGMR 0DYNMAPENTRY paPages = pThis->paPages;1582 PPGMRZDYNMAPENTRY paPages = pThis->paPages; 1420 1583 uint32_t iPage = pThis->cPages; 1421 1584 if (pThis->fLegacyMode) 1422 1585 { 1586 #ifdef IN_RING0 1423 1587 PCX86PGUINT paSavedPTEs = (PCX86PGUINT)pThis->pvSavedPTEs; NOREF(paSavedPTEs); 1588 #endif 1424 1589 while (iPage-- > 0) 1425 1590 { … … 1440 1605 { 1441 1606 CHECK_RET(!(paPages[iPage].HCPhys & PAGE_OFFSET_MASK), ("#%u: %RHp\n", iPage, paPages[iPage].HCPhys)); 1442 X86PGUINT uPte = (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT)) 1443 | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D 1607 X86PGUINT uPte = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D 1608 #ifdef IN_RING0 1609 | (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT)) 1610 #endif 1444 1611 | (paPages[iPage].HCPhys & X86_PTE_PAE_PG_MASK); 1445 1612 CHECK_RET(paPages[iPage].uPte.pLegacy->u == uPte, … … 1448 1615 cLoad++; 1449 1616 } 1617 #ifdef IN_RING0 1450 1618 else 1451 1619 CHECK_RET(paPages[iPage].uPte.pLegacy->u == paSavedPTEs[iPage], 1452 1620 ("#%u: %#x %#x", iPage, paPages[iPage].uPte.pLegacy->u, paSavedPTEs[iPage])); 1621 #endif 1453 1622 } 1454 1623 } 1455 1624 else 1456 1625 { 1626 #ifdef IN_RING0 1457 1627 PCX86PGPAEUINT paSavedPTEs = (PCX86PGPAEUINT)pThis->pvSavedPTEs; NOREF(paSavedPTEs); 1628 #endif 1458 1629 while (iPage-- > 0) 1459 1630 { … … 1474 1645 { 1475 1646 CHECK_RET(!(paPages[iPage].HCPhys & PAGE_OFFSET_MASK), ("#%u: %RHp\n", iPage, paPages[iPage].HCPhys)); 1476 X86PGPAEUINT uPte = (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT)) 1477 | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D 1647 X86PGPAEUINT uPte = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D 1648 #ifdef IN_RING0 1649 | (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT)) 1650 #endif 1478 1651 | (paPages[iPage].HCPhys & X86_PTE_PAE_PG_MASK); 1479 1652 CHECK_RET(paPages[iPage].uPte.pPae->u == uPte, … … 1482 1655 cLoad++; 1483 1656 } 1657 #ifdef IN_RING0 1484 1658 else 1485 1659 CHECK_RET(paPages[iPage].uPte.pPae->u == paSavedPTEs[iPage], 1486 1660 ("#%u: %#llx %#llx", iPage, paPages[iPage].uPte.pPae->u, paSavedPTEs[iPage])); 1661 #endif 1487 1662 } 1488 1663 } … … 1492 1667 1493 1668 #undef CHECK_RET 1494 RTSpinlockRelease(pThis->hSpinlock, &Tmp);1669 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 1495 1670 return VINF_SUCCESS; 1496 1671 } 1672 1673 #ifdef IN_RING0 1674 /** 1675 * Assert the the integrity of the pool. 1676 * 1677 * @returns VBox status code. 1678 */ 1679 VMMR0DECL(int) PGMR0DynMapAssertIntegrity(void) 1680 { 1681 return pgmRZDynMapAssertIntegrity(g_pPGMR0DynMap); 1682 } 1683 #endif /* IN_RING0 */ 1684 1685 #ifdef IN_RC 1686 /** 1687 * Assert the the integrity of the pool. 1688 * 1689 * @returns VBox status code. 1690 */ 1691 VMMRCDECL(int) PGMRCDynMapAssertIntegrity(PVM pVM) 1692 { 1693 return pgmRZDynMapAssertIntegrity((PPGMRZDYNMAP)pVM->pgm.s.pRCDynMap); 1694 } 1695 #endif /* IN_RC */ 1696 1697 1698 /** 1699 * As a final resort for a (somewhat) full auto set or full cache, try merge 1700 * duplicate entries and flush the ones we can. 1701 * 1702 * @param pSet The set. 1703 */ 1704 static void pgmDynMapOptimizeAutoSet(PPGMMAPSET pSet) 1705 { 1706 LogFlow(("pgmDynMapOptimizeAutoSet\n")); 1707 1708 for (uint32_t i = 0 ; i < pSet->cEntries; i++) 1709 { 1710 /* 1711 * Try merge entries. 1712 */ 1713 uint16_t const iPage = pSet->aEntries[i].iPage; 1714 uint32_t j = i + 1; 1715 while ( j < pSet->cEntries 1716 && ( pSet->iSubset == UINT32_MAX 1717 || pSet->iSubset < pSet->cEntries) ) 1718 { 1719 if (pSet->aEntries[j].iPage != iPage) 1720 j++; 1721 else 1722 { 1723 uint32_t const cHardRefs = (uint32_t)pSet->aEntries[i].cRefs 1724 + (uint32_t)pSet->aEntries[j].cRefs; 1725 uint32_t cInlinedRefs = (uint32_t)pSet->aEntries[i].cInlinedRefs 1726 + (uint32_t)pSet->aEntries[j].cInlinedRefs; 1727 uint32_t cUnrefs = (uint32_t)pSet->aEntries[i].cUnrefs 1728 + (uint32_t)pSet->aEntries[j].cUnrefs; 1729 uint32_t cSub = RT_MIN(cUnrefs, cInlinedRefs); 1730 cInlinedRefs -= cSub; 1731 cUnrefs -= cSub; 1732 1733 if ( cHardRefs < UINT16_MAX 1734 && cInlinedRefs < UINT16_MAX 1735 && cUnrefs < UINT16_MAX) 1736 { 1737 /* merge j into i removing j. */ 1738 Log2(("pgmDynMapOptimizeAutoSet: Merging #%u into #%u\n", j, i)); 1739 pSet->aEntries[i].cRefs = cHardRefs; 1740 pSet->aEntries[i].cInlinedRefs = cInlinedRefs; 1741 pSet->aEntries[i].cUnrefs = cUnrefs; 1742 pSet->cEntries--; 1743 if (j < pSet->cEntries) 1744 { 1745 pSet->aEntries[j] = pSet->aEntries[pSet->cEntries]; 1746 PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[pSet->cEntries]); 1747 } 1748 else 1749 PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[j]); 1750 } 1751 #if 0 /* too complicated, skip it. */ 1752 else 1753 { 1754 /* migrate the max number of refs from j into i and quit the inner loop. */ 1755 uint32_t cMigrate = UINT16_MAX - 1 - pSet->aEntries[i].cRefs; 1756 Assert(pSet->aEntries[j].cRefs > cMigrate); 1757 pSet->aEntries[j].cRefs -= cMigrate; 1758 pSet->aEntries[i].cRefs = UINT16_MAX - 1; 1759 break; 1760 } 1761 #endif 1762 } 1763 } 1764 1765 /* 1766 * Try make use of the unused hinting (cUnrefs) to evict entries 1767 * from both the set as well as the mapping cache. 1768 */ 1769 1770 uint32_t const cTotalRefs = (uint32_t)pSet->aEntries[i].cRefs + pSet->aEntries[i].cInlinedRefs; 1771 Log2(("pgmDynMapOptimizeAutoSet: #%u/%u/%u pvPage=%p iPage=%u cRefs=%u cInlinedRefs=%u cUnrefs=%u cTotalRefs=%u\n", 1772 i, 1773 pSet->iSubset, 1774 pSet->cEntries, 1775 pSet->aEntries[i].pvPage, 1776 pSet->aEntries[i].iPage, 1777 pSet->aEntries[i].cRefs, 1778 pSet->aEntries[i].cInlinedRefs, 1779 pSet->aEntries[i].cUnrefs, 1780 cTotalRefs)); 1781 Assert(cTotalRefs >= pSet->aEntries[i].cUnrefs); 1782 1783 if ( cTotalRefs == pSet->aEntries[i].cUnrefs 1784 && ( pSet->iSubset == UINT32_MAX 1785 || pSet->iSubset < pSet->cEntries) 1786 ) 1787 { 1788 Log2(("pgmDynMapOptimizeAutoSet: Releasing iPage=%d/%p\n", pSet->aEntries[i].iPage, pSet->aEntries[i].pvPage)); 1789 //LogFlow(("pgmDynMapOptimizeAutoSet: Releasing iPage=%d/%p\n", pSet->aEntries[i].iPage, pSet->aEntries[i].pvPage)); 1790 pgmRZDynMapReleasePage(PGMRZDYNMAP_SET_2_DYNMAP(pSet), 1791 pSet->aEntries[i].iPage, 1792 pSet->aEntries[i].cRefs); 1793 pSet->cEntries--; 1794 if (i < pSet->cEntries) 1795 { 1796 pSet->aEntries[i] = pSet->aEntries[pSet->cEntries]; 1797 PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[pSet->cEntries]); 1798 } 1799 1800 i--; 1801 } 1802 } 1803 } 1804 1805 1497 1806 1498 1807 … … 1505 1814 * @param pVCpu The shared data for the current virtual CPU. 1506 1815 */ 1507 VMMDECL(void) PGMDynMapStartAutoSet(PVMCPU pVCpu) 1508 { 1816 VMMDECL(void) PGMRZDynMapStartAutoSet(PVMCPU pVCpu) 1817 { 1818 LogFlow(("PGMRZDynMapStartAutoSet:\n")); 1509 1819 Assert(pVCpu->pgm.s.AutoSet.cEntries == PGMMAPSET_CLOSED); 1510 1820 Assert(pVCpu->pgm.s.AutoSet.iSubset == UINT32_MAX); 1511 1821 pVCpu->pgm.s.AutoSet.cEntries = 0; 1512 pVCpu->pgm.s.AutoSet.iCpu = RTMpCpuIdToSetIndex(RTMpCpuId()); 1513 } 1514 1515 1822 pVCpu->pgm.s.AutoSet.iCpu = PGMRZDYNMAP_CUR_CPU(); 1823 } 1824 1825 1826 #ifdef IN_RING0 1516 1827 /** 1517 1828 * Starts or migrates the autoset of a virtual CPU. … … 1526 1837 * @thread EMT 1527 1838 */ 1528 VMM DECL(bool) PGMDynMapStartOrMigrateAutoSet(PVMCPU pVCpu)1839 VMMR0DECL(bool) PGMR0DynMapStartOrMigrateAutoSet(PVMCPU pVCpu) 1529 1840 { 1530 1841 bool fStartIt = pVCpu->pgm.s.AutoSet.cEntries == PGMMAPSET_CLOSED; 1531 1842 if (fStartIt) 1532 PGM DynMapStartAutoSet(pVCpu);1843 PGMRZDynMapStartAutoSet(pVCpu); 1533 1844 else 1534 PGM DynMapMigrateAutoSet(pVCpu);1845 PGMR0DynMapMigrateAutoSet(pVCpu); 1535 1846 return fStartIt; 1536 1847 } 1848 #endif /* IN_RING0 */ 1537 1849 1538 1850 … … 1551 1863 && RT_LIKELY(cEntries <= RT_ELEMENTS(pSet->aEntries))) 1552 1864 { 1553 PPGMR0DYNMAP pThis = g_pPGMR0DynMap; 1554 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; 1555 RTSpinlockAcquire(pThis->hSpinlock, &Tmp); 1865 PPGMRZDYNMAP pThis = PGMRZDYNMAP_SET_2_DYNMAP(pSet); 1866 PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis); 1556 1867 1557 1868 uint32_t i = cEntries; … … 1562 1873 int32_t cRefs = pSet->aEntries[i].cRefs; 1563 1874 Assert(cRefs > 0); 1564 pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs); 1565 1566 pSet->aEntries[i].iPage = UINT16_MAX; 1567 pSet->aEntries[i].cRefs = 0; 1875 pgmRZDynMapReleasePageLocked(pThis, iPage, cRefs); 1876 1877 PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[i]); 1568 1878 } 1569 1879 1570 1880 Assert(pThis->cLoad <= pThis->cPages - pThis->cGuardPages); 1571 RTSpinlockRelease(pThis->hSpinlock, &Tmp);1881 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 1572 1882 } 1573 1883 } … … 1580 1890 * @param pVCpu The shared data for the current virtual CPU. 1581 1891 */ 1582 VMMDECL(void) PGM DynMapReleaseAutoSet(PVMCPU pVCpu)1892 VMMDECL(void) PGMRZDynMapReleaseAutoSet(PVMCPU pVCpu) 1583 1893 { 1584 1894 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet; … … 1593 1903 pSet->iCpu = -1; 1594 1904 1595 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]); 1905 #ifdef IN_RC 1906 if (RT_ELEMENTS(pSet->aEntries) > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE) 1907 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / (MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)) % 11]); 1908 else 1909 #endif 1910 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]); 1596 1911 AssertMsg(cEntries < PGMMAPSET_MAX_FILL, ("%u\n", cEntries)); 1597 1912 if (cEntries > RT_ELEMENTS(pSet->aEntries) * 50 / 100) 1598 Log(("PGMDynMapReleaseAutoSet: cEntries=%d\n", pSet->cEntries)); 1913 Log(("PGMRZDynMapReleaseAutoSet: cEntries=%d\n", cEntries)); 1914 else 1915 LogFlow(("PGMRZDynMapReleaseAutoSet: cEntries=%d\n", cEntries)); 1599 1916 1600 1917 pgmDynMapFlushAutoSetWorker(pSet, cEntries); … … 1607 1924 * @param pVCpu The shared data for the current virtual CPU. 1608 1925 */ 1609 VMMDECL(void) PGM DynMapFlushAutoSet(PVMCPU pVCpu)1926 VMMDECL(void) PGMRZDynMapFlushAutoSet(PVMCPU pVCpu) 1610 1927 { 1611 1928 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet; 1612 AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags()));1929 AssertMsg(pSet->iCpu == PGMRZDYNMAP_CUR_CPU(), ("%d %d efl=%#x\n", pSet->iCpu, PGMRZDYNMAP_CUR_CPU(), ASMGetFlags())); 1613 1930 1614 1931 /* … … 1617 1934 uint32_t cEntries = pSet->cEntries; 1618 1935 AssertReturnVoid(cEntries != PGMMAPSET_CLOSED); 1619 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]); 1936 #ifdef IN_RC 1937 if (RT_ELEMENTS(pSet->aEntries) > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE) 1938 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / (MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)) % 11]); 1939 else 1940 #endif 1941 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]); 1620 1942 if (cEntries >= RT_ELEMENTS(pSet->aEntries) * 45 / 100) 1621 1943 { … … 1626 1948 1627 1949 pgmDynMapFlushAutoSetWorker(pSet, cEntries); 1628 AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags())); 1629 } 1630 } 1631 1632 1950 AssertMsg(pSet->iCpu == PGMRZDYNMAP_CUR_CPU(), ("%d %d efl=%#x\n", pSet->iCpu, PGMRZDYNMAP_CUR_CPU(), ASMGetFlags())); 1951 } 1952 } 1953 1954 1955 #ifndef IN_RC 1633 1956 /** 1634 1957 * Migrates the automatic mapping set of the current vCPU if it's active and … … 1644 1967 * @thread EMT 1645 1968 */ 1646 VMMDECL(void) PGMDynMapMigrateAutoSet(PVMCPU pVCpu) 1647 { 1969 VMMR0DECL(void) PGMR0DynMapMigrateAutoSet(PVMCPU pVCpu) 1970 { 1971 LogFlow(("PGMR0DynMapMigrateAutoSet\n")); 1648 1972 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet; 1649 int32_t iRealCpu = RTMpCpuIdToSetIndex(RTMpCpuId());1973 int32_t iRealCpu = PGMRZDYNMAP_CUR_CPU(); 1650 1974 if (pSet->iCpu != iRealCpu) 1651 1975 { … … 1656 1980 if (i != 0 && RT_LIKELY(i <= RT_ELEMENTS(pSet->aEntries))) 1657 1981 { 1658 PPGMR0DYNMAP pThis = g_pPGMR0DynMap; 1659 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; 1660 RTSpinlockAcquire(pThis->hSpinlock, &Tmp); 1982 PPGMRZDYNMAP pThis = PGMRZDYNMAP_SET_2_DYNMAP(pSet); 1983 PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis); 1661 1984 1662 1985 while (i-- > 0) … … 1668 1991 { 1669 1992 RTCpuSetDelByIndex(&pThis->paPages[iPage].PendingSet, iRealCpu); 1670 RTSpinlockRelease(pThis->hSpinlock, &Tmp);1993 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 1671 1994 1672 1995 ASMInvalidatePage(pThis->paPages[iPage].pvPage); 1673 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapMigrateInvlPg);1674 1675 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);1996 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapMigrateInvlPg); 1997 1998 PGMRZDYNMAP_SPINLOCK_REACQUIRE(pThis); 1676 1999 } 1677 2000 } 1678 2001 1679 RTSpinlockRelease(pThis->hSpinlock, &Tmp);2002 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 1680 2003 } 1681 2004 } … … 1683 2006 } 1684 2007 } 2008 #endif /* !IN_RC */ 1685 2009 1686 2010 … … 1706 2030 pSet->cEntries = iSubset; 1707 2031 1708 PPGMR0DYNMAP pThis = g_pPGMR0DynMap; 1709 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; 1710 RTSpinlockAcquire(pThis->hSpinlock, &Tmp); 2032 PPGMRZDYNMAP pThis = PGMRZDYNMAP_SET_2_DYNMAP(pSet); 2033 PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis); 1711 2034 1712 2035 while (i-- > iSubset) … … 1716 2039 int32_t cRefs = pSet->aEntries[i].cRefs; 1717 2040 Assert(cRefs > 0); 1718 pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs); 1719 1720 pSet->aEntries[i].iPage = UINT16_MAX; 1721 pSet->aEntries[i].cRefs = 0; 1722 } 1723 1724 RTSpinlockRelease(pThis->hSpinlock, &Tmp); 2041 pgmRZDynMapReleasePageLocked(pThis, iPage, cRefs); 2042 2043 PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[i]); 2044 } 2045 2046 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 1725 2047 } 1726 2048 } … … 1738 2060 * 1739 2061 * @returns The index of the previous subset. Pass this to 1740 * PGMDynMapPopAutoSubset when poping it.2062 * PGMDynMapPopAutoSubset when popping it. 1741 2063 * @param pVCpu Pointer to the virtual cpu data. 1742 2064 */ 1743 VMMDECL(uint32_t) PGM DynMapPushAutoSubset(PVMCPU pVCpu)2065 VMMDECL(uint32_t) PGMRZDynMapPushAutoSubset(PVMCPU pVCpu) 1744 2066 { 1745 2067 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet; 1746 2068 AssertReturn(pSet->cEntries != PGMMAPSET_CLOSED, UINT32_MAX); 1747 2069 uint32_t iPrevSubset = pSet->iSubset; 1748 LogFlow(("PGMDynMapPushAutoSubset: pVCpu=%p iPrevSubset=%u\n", pVCpu, iPrevSubset)); 2070 LogFlow(("PGMRZDynMapPushAutoSubset: pVCpu=%p iPrevSubset=%u\n", pVCpu, iPrevSubset)); 2071 2072 #ifdef IN_RC 2073 /* kludge */ 2074 if (pSet->cEntries > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE / 2) 2075 { 2076 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetOptimize); 2077 pgmDynMapOptimizeAutoSet(pSet); 2078 } 2079 #endif 1749 2080 1750 2081 pSet->iSubset = pSet->cEntries; 1751 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapSubsets); 2082 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSubsets); 2083 1752 2084 return iPrevSubset; 1753 2085 } … … 1760 2092 * @param iPrevSubset What PGMDynMapPushAutoSubset returned. 1761 2093 */ 1762 VMMDECL(void) PGM DynMapPopAutoSubset(PVMCPU pVCpu, uint32_t iPrevSubset)2094 VMMDECL(void) PGMRZDynMapPopAutoSubset(PVMCPU pVCpu, uint32_t iPrevSubset) 1763 2095 { 1764 2096 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet; 1765 2097 uint32_t cEntries = pSet->cEntries; 1766 LogFlow(("PGM DynMapPopAutoSubset: pVCpu=%p iPrevSubset=%u iSubset=%u cEntries=%u\n", pVCpu, iPrevSubset, pSet->iSubset, cEntries));2098 LogFlow(("PGMRZDynMapPopAutoSubset: pVCpu=%p iPrevSubset=%u iSubset=%u cEntries=%u\n", pVCpu, iPrevSubset, pSet->iSubset, cEntries)); 1767 2099 AssertReturnVoid(cEntries != PGMMAPSET_CLOSED); 1768 2100 AssertReturnVoid(pSet->iSubset >= iPrevSubset || iPrevSubset == UINT32_MAX); 1769 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]); 2101 #ifdef IN_RC 2102 if (RT_ELEMENTS(pSet->aEntries) > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE) 2103 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / (MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)) % 11]); 2104 else 2105 #endif 2106 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]); 1770 2107 if ( cEntries >= RT_ELEMENTS(pSet->aEntries) * 40 / 100 1771 2108 && cEntries != pSet->iSubset) … … 1779 2116 1780 2117 /** 1781 * As a final resort for a full auto set, try merge duplicate entries. 1782 * 1783 * @param pSet The set. 1784 */ 1785 static void pgmDynMapOptimizeAutoSet(PPGMMAPSET pSet) 1786 { 1787 for (uint32_t i = 0 ; i < pSet->cEntries; i++) 1788 { 1789 uint16_t const iPage = pSet->aEntries[i].iPage; 1790 uint32_t j = i + 1; 1791 while (j < pSet->cEntries) 1792 { 1793 if (pSet->aEntries[j].iPage != iPage) 1794 j++; 1795 else if ((uint32_t)pSet->aEntries[i].cRefs + (uint32_t)pSet->aEntries[j].cRefs < UINT16_MAX) 1796 { 1797 /* merge j into i removing j. */ 1798 pSet->aEntries[i].cRefs += pSet->aEntries[j].cRefs; 1799 pSet->cEntries--; 1800 if (j < pSet->cEntries) 2118 * Indicates that the given page is unused and its mapping can be re-used. 2119 * 2120 * @param pVCpu The current CPU. 2121 * @param pvHint The page that is now unused. This does not have to 2122 * point at the start of the page. NULL is ignored. 2123 */ 2124 #ifdef LOG_ENABLED 2125 void pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint, RT_SRC_POS_DECL) 2126 #else 2127 void pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint) 2128 #endif 2129 { 2130 /* 2131 * Ignore NULL pointers and mask off the page offset bits. 2132 */ 2133 if (pvHint == NULL) 2134 return; 2135 pvHint = (void *)((uintptr_t)pvHint & ~(uintptr_t)PAGE_OFFSET_MASK); 2136 2137 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet; 2138 uint32_t iEntry = pSet->cEntries; 2139 AssertReturnVoid(iEntry > 0); 2140 2141 /* 2142 * Find the entry in the usual unrolled fashion. 2143 */ 2144 #define IS_MATCHING_ENTRY(pSet, iEntry, pvHint) \ 2145 ( (pSet)->aEntries[(iEntry)].pvPage == (pvHint) \ 2146 && (uint32_t)(pSet)->aEntries[(iEntry)].cRefs + (pSet)->aEntries[(iEntry)].cInlinedRefs \ 2147 > (pSet)->aEntries[(iEntry)].cUnrefs ) 2148 if ( iEntry >= 1 && IS_MATCHING_ENTRY(pSet, iEntry - 1, pvHint)) 2149 iEntry = iEntry - 1; 2150 else if (iEntry >= 2 && IS_MATCHING_ENTRY(pSet, iEntry - 2, pvHint)) 2151 iEntry = iEntry - 2; 2152 else if (iEntry >= 3 && IS_MATCHING_ENTRY(pSet, iEntry - 3, pvHint)) 2153 iEntry = iEntry - 3; 2154 else if (iEntry >= 4 && IS_MATCHING_ENTRY(pSet, iEntry - 4, pvHint)) 2155 iEntry = iEntry - 4; 2156 else if (iEntry >= 5 && IS_MATCHING_ENTRY(pSet, iEntry - 5, pvHint)) 2157 iEntry = iEntry - 5; 2158 else if (iEntry >= 6 && IS_MATCHING_ENTRY(pSet, iEntry - 6, pvHint)) 2159 iEntry = iEntry - 6; 2160 else if (iEntry >= 7 && IS_MATCHING_ENTRY(pSet, iEntry - 7, pvHint)) 2161 iEntry = iEntry - 7; 2162 else 2163 { 2164 /* 2165 * Loop till we find it. 2166 */ 2167 bool fFound = false; 2168 if (iEntry > 7) 2169 { 2170 iEntry -= 7; 2171 while (iEntry-- > 0) 2172 if (IS_MATCHING_ENTRY(pSet, iEntry, pvHint)) 1801 2173 { 1802 pSet->aEntries[j] = pSet->aEntries[pSet->cEntries]; 1803 pSet->aEntries[pSet->cEntries].iPage = UINT16_MAX; 1804 pSet->aEntries[pSet->cEntries].cRefs = 0; 2174 fFound = true; 2175 break; 1805 2176 } 1806 else 1807 { 1808 pSet->aEntries[j].iPage = UINT16_MAX; 1809 pSet->aEntries[j].cRefs = 0; 1810 } 1811 } 1812 else 1813 { 1814 /* migrate the max number of refs from j into i and quit the inner loop. */ 1815 uint32_t cMigrate = UINT16_MAX - 1 - pSet->aEntries[i].cRefs; 1816 Assert(pSet->aEntries[j].cRefs > cMigrate); 1817 pSet->aEntries[j].cRefs -= cMigrate; 1818 pSet->aEntries[i].cRefs = UINT16_MAX - 1; 1819 break; 1820 } 1821 } 1822 } 1823 } 1824 1825 1826 /** 1827 * Common worker code for PGMDynMapHCPhys, pgmR0DynMapHCPageInlined and 1828 * pgmR0DynMapGCPageInlined. 2177 } 2178 AssertMsgReturnVoid(fFound, 2179 ("pvHint=%p cEntries=%#x iSubset=%#x\n" 2180 "aEntries[0] = {%#x, %#x, %#x, %#x, %p}\n" 2181 "aEntries[1] = {%#x, %#x, %#x, %#x, %p}\n" 2182 "aEntries[2] = {%#x, %#x, %#x, %#x, %p}\n" 2183 "aEntries[3] = {%#x, %#x, %#x, %#x, %p}\n" 2184 "aEntries[4] = {%#x, %#x, %#x, %#x, %p}\n" 2185 "aEntries[5] = {%#x, %#x, %#x, %#x, %p}\n" 2186 , 2187 pvHint, pSet->cEntries, pSet->iSubset, 2188 pSet->aEntries[0].iPage, pSet->aEntries[0].cRefs, pSet->aEntries[0].cInlinedRefs, pSet->aEntries[0].cUnrefs, pSet->aEntries[0].pvPage, 2189 pSet->aEntries[1].iPage, pSet->aEntries[1].cRefs, pSet->aEntries[1].cInlinedRefs, pSet->aEntries[1].cUnrefs, pSet->aEntries[1].pvPage, 2190 pSet->aEntries[2].iPage, pSet->aEntries[2].cRefs, pSet->aEntries[2].cInlinedRefs, pSet->aEntries[2].cUnrefs, pSet->aEntries[2].pvPage, 2191 pSet->aEntries[3].iPage, pSet->aEntries[3].cRefs, pSet->aEntries[3].cInlinedRefs, pSet->aEntries[3].cUnrefs, pSet->aEntries[3].pvPage, 2192 pSet->aEntries[4].iPage, pSet->aEntries[4].cRefs, pSet->aEntries[4].cInlinedRefs, pSet->aEntries[4].cUnrefs, pSet->aEntries[4].pvPage, 2193 pSet->aEntries[5].iPage, pSet->aEntries[5].cRefs, pSet->aEntries[5].cInlinedRefs, pSet->aEntries[5].cUnrefs, pSet->aEntries[5].pvPage)); 2194 } 2195 #undef IS_MATCHING_ENTRY 2196 2197 /* 2198 * Update it. 2199 */ 2200 uint32_t const cTotalRefs = (uint32_t)pSet->aEntries[iEntry].cRefs + pSet->aEntries[iEntry].cInlinedRefs; 2201 uint32_t const cUnrefs = pSet->aEntries[iEntry].cUnrefs; 2202 LogFlow(("pgmRZDynMapUnusedHint: pvHint=%p #%u cRefs=%d cInlinedRefs=%d cUnrefs=%d (+1) cTotalRefs=%d %s(%d) %s\n", 2203 pvHint, iEntry, pSet->aEntries[iEntry].cRefs, pSet->aEntries[iEntry].cInlinedRefs, cUnrefs, cTotalRefs, pszFile, iLine, pszFunction)); 2204 AssertReturnVoid(cTotalRefs > cUnrefs); 2205 2206 if (RT_LIKELY(cUnrefs < UINT16_MAX - 1)) 2207 pSet->aEntries[iEntry].cUnrefs++; 2208 else if (pSet->aEntries[iEntry].cInlinedRefs) 2209 { 2210 uint32_t cSub = RT_MIN(pSet->aEntries[iEntry].cInlinedRefs, pSet->aEntries[iEntry].cUnrefs); 2211 pSet->aEntries[iEntry].cInlinedRefs -= cSub; 2212 pSet->aEntries[iEntry].cUnrefs -= cSub; 2213 pSet->aEntries[iEntry].cUnrefs++; 2214 } 2215 else 2216 Log(("pgmRZDynMapUnusedHint: pvHint=%p ignored because of overflow! %s(%d) %s\n", pvHint, pszFile, iLine, pszFunction)); 2217 } 2218 2219 2220 /** 2221 * Common worker code for pgmRZDynMapHCPageInlined, pgmRZDynMapHCPageV2Inlined 2222 * and pgmR0DynMapGCPageOffInlined. 1829 2223 * 1830 2224 * @returns VINF_SUCCESS, bails out to ring-3 on failure. … … 1835 2229 * @remarks This is a very hot path. 1836 2230 */ 1837 int pgmR 0DynMapHCPageCommon(PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv)1838 { 1839 LogFlow(("pgmR0DynMapHCPageCommon: pSet=%p HCPhys=%RHp ppv=%p\n", pSet, HCPhys, ppv));1840 AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags()));1841 PVMCPU pVCpu = PGMR0DYNMAP_2_VMCPU(pSet);2231 int pgmRZDynMapHCPageCommon(PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL) 2232 { 2233 AssertMsg(pSet->iCpu == PGMRZDYNMAP_CUR_CPU(), ("%d %d efl=%#x\n", pSet->iCpu, PGMRZDYNMAP_CUR_CPU(), ASMGetFlags())); 2234 PVMCPU pVCpu = PGMRZDYNMAP_SET_2_VMCPU(pSet); 2235 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a); 1842 2236 1843 2237 /* 1844 2238 * Map it. 1845 2239 */ 1846 void *pvPage; 1847 uint32_t const iPage = pgmR0DynMapPage(g_pPGMR0DynMap, HCPhys, pSet->iCpu, pVCpu, &pvPage); 2240 void *pvPage; 2241 PPGMRZDYNMAP pThis = PGMRZDYNMAP_SET_2_DYNMAP(pSet); 2242 uint32_t iPage = pgmR0DynMapPage(pThis, HCPhys, pSet->iCpu, pVCpu, &pvPage); 1848 2243 if (RT_UNLIKELY(iPage == UINT32_MAX)) 1849 2244 { 1850 RTAssertMsg2Weak("PGMDynMapHCPage: cLoad=%u/%u cPages=%u cGuardPages=%u\n", 1851 g_pPGMR0DynMap->cLoad, g_pPGMR0DynMap->cMaxLoad, g_pPGMR0DynMap->cPages, g_pPGMR0DynMap->cGuardPages); 1852 if (!g_fPGMR0DynMapTestRunning) 1853 VMMRZCallRing3NoCpu(PGMR0DYNMAP_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0); 1854 *ppv = NULL; 1855 return VERR_PGM_DYNMAP_FAILED; 2245 /* 2246 * We're out of mapping space, optimize our set to try remedy the 2247 * situation. (Only works if there are unreference hints.) 2248 */ 2249 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetOptimize); 2250 pgmDynMapOptimizeAutoSet(pSet); 2251 2252 iPage = pgmR0DynMapPage(pThis, HCPhys, pSet->iCpu, pVCpu, &pvPage); 2253 if (RT_UNLIKELY(iPage == UINT32_MAX)) 2254 { 2255 RTAssertMsg2Weak("pgmRZDynMapHCPageCommon: cLoad=%u/%u cPages=%u cGuardPages=%u\n", 2256 pThis->cLoad, pThis->cMaxLoad, pThis->cPages, pThis->cGuardPages); 2257 if (!g_fPGMR0DynMapTestRunning) 2258 VMMRZCallRing3NoCpu(PGMRZDYNMAP_SET_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0); 2259 *ppv = NULL; 2260 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a); 2261 return VERR_PGM_DYNMAP_FAILED; 2262 } 1856 2263 } 1857 2264 … … 1869 2276 { 1870 2277 unsigned iEntry = pSet->cEntries++; 1871 pSet->aEntries[iEntry].cRefs = 1; 1872 pSet->aEntries[iEntry].iPage = iPage; 1873 pSet->aEntries[iEntry].pvPage = pvPage; 1874 pSet->aEntries[iEntry].HCPhys = HCPhys; 2278 pSet->aEntries[iEntry].cRefs = 1; 2279 pSet->aEntries[iEntry].cUnrefs = 0; 2280 pSet->aEntries[iEntry].cInlinedRefs = 0; 2281 pSet->aEntries[iEntry].iPage = iPage; 2282 pSet->aEntries[iEntry].pvPage = pvPage; 2283 pSet->aEntries[iEntry].HCPhys = HCPhys; 1875 2284 pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry; 2285 LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/0/0 iPage=%#x [a] %s(%d) %s\n", 2286 pSet, HCPhys, iEntry, iEntry + 1, pvPage, 1, iPage, pszFile, iLine, pszFunction)); 1876 2287 } 1877 2288 /* Any of the last 5 pages? */ 1878 2289 else if ( pSet->aEntries[i - 0].iPage == iPage 1879 2290 && pSet->aEntries[i - 0].cRefs < UINT16_MAX - 1) 2291 { 1880 2292 pSet->aEntries[i - 0].cRefs++; 2293 LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [0] %s(%d) %s\n", pSet, HCPhys, i - 0, pSet->cEntries, pvPage, pSet->aEntries[i - 0].cRefs, pSet->aEntries[i - 0].cInlinedRefs, pSet->aEntries[i - 0].cUnrefs, iPage, pszFile, iLine, pszFunction)); 2294 } 1881 2295 else if ( pSet->aEntries[i - 1].iPage == iPage 1882 2296 && pSet->aEntries[i - 1].cRefs < UINT16_MAX - 1) 2297 { 1883 2298 pSet->aEntries[i - 1].cRefs++; 2299 LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [1] %s(%d) %s\n", pSet, HCPhys, i - 1, pSet->cEntries, pvPage, pSet->aEntries[i - 1].cRefs, pSet->aEntries[i - 1].cInlinedRefs, pSet->aEntries[i - 1].cUnrefs, iPage, pszFile, iLine, pszFunction)); 2300 } 1884 2301 else if ( pSet->aEntries[i - 2].iPage == iPage 1885 2302 && pSet->aEntries[i - 2].cRefs < UINT16_MAX - 1) 2303 { 1886 2304 pSet->aEntries[i - 2].cRefs++; 2305 LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [2] %s(%d) %s\n", pSet, HCPhys, i - 2, pSet->cEntries, pvPage, pSet->aEntries[i - 2].cRefs, pSet->aEntries[i - 2].cInlinedRefs, pSet->aEntries[i - 2].cUnrefs, iPage, pszFile, iLine, pszFunction)); 2306 } 1887 2307 else if ( pSet->aEntries[i - 3].iPage == iPage 1888 2308 && pSet->aEntries[i - 3].cRefs < UINT16_MAX - 1) 2309 { 1889 2310 pSet->aEntries[i - 3].cRefs++; 2311 LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [4] %s(%d) %s\n", pSet, HCPhys, i - 3, pSet->cEntries, pvPage, pSet->aEntries[i - 3].cRefs, pSet->aEntries[i - 3].cInlinedRefs, pSet->aEntries[i - 3].cUnrefs, iPage, pszFile, iLine, pszFunction)); 2312 } 1890 2313 else if ( pSet->aEntries[i - 4].iPage == iPage 1891 2314 && pSet->aEntries[i - 4].cRefs < UINT16_MAX - 1) 2315 { 1892 2316 pSet->aEntries[i - 4].cRefs++; 2317 LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [4] %s(%d) %s\n", pSet, HCPhys, i - 4, pSet->cEntries, pvPage, pSet->aEntries[i - 4].cRefs, pSet->aEntries[i - 4].cInlinedRefs, pSet->aEntries[i - 4].cUnrefs, iPage, pszFile, iLine, pszFunction)); 2318 } 1893 2319 /* Don't bother searching unless we're above a 60% load. */ 1894 2320 else if (RT_LIKELY(i <= (int32_t)RT_ELEMENTS(pSet->aEntries) * 60 / 100)) 1895 2321 { 1896 2322 unsigned iEntry = pSet->cEntries++; 1897 pSet->aEntries[iEntry].cRefs = 1; 1898 pSet->aEntries[iEntry].iPage = iPage; 1899 pSet->aEntries[iEntry].pvPage = pvPage; 1900 pSet->aEntries[iEntry].HCPhys = HCPhys; 2323 pSet->aEntries[iEntry].cRefs = 1; 2324 pSet->aEntries[iEntry].cUnrefs = 0; 2325 pSet->aEntries[iEntry].cInlinedRefs = 0; 2326 pSet->aEntries[iEntry].iPage = iPage; 2327 pSet->aEntries[iEntry].pvPage = pvPage; 2328 pSet->aEntries[iEntry].HCPhys = HCPhys; 1901 2329 pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry; 2330 LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=1/0/0 iPage=%#x [b] %s(%d) %s\n", pSet, HCPhys, iEntry, pSet->cEntries, pvPage, iPage, pszFile, iLine, pszFunction)); 1902 2331 } 1903 2332 else … … 1911 2340 { 1912 2341 pSet->aEntries[i].cRefs++; 1913 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapSetSearchHits); 2342 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetSearchHits); 2343 LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [c] %s(%d) %s\n", pSet, HCPhys, i, pSet->cEntries, pvPage, pSet->aEntries[i].cRefs, pSet->aEntries[i].cInlinedRefs, pSet->aEntries[i].cUnrefs, iPage, pszFile, iLine, pszFunction)); 1914 2344 break; 1915 2345 } 1916 2346 if (i < 0) 1917 2347 { 1918 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapSetSearchMisses);2348 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetSearchMisses); 1919 2349 if (pSet->iSubset < pSet->cEntries) 1920 2350 { 1921 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapSetSearchFlushes);1922 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR 0DynMapSetSize[(pSet->cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);2351 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetSearchFlushes); 2352 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(pSet->cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]); 1923 2353 AssertMsg(pSet->cEntries < PGMMAPSET_MAX_FILL, ("%u\n", pSet->cEntries)); 1924 2354 pgmDynMapFlushSubset(pSet); … … 1927 2357 if (RT_UNLIKELY(pSet->cEntries >= RT_ELEMENTS(pSet->aEntries))) 1928 2358 { 1929 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapSetOptimize);2359 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetOptimize); 1930 2360 pgmDynMapOptimizeAutoSet(pSet); 1931 2361 } … … 1934 2364 { 1935 2365 unsigned iEntry = pSet->cEntries++; 1936 pSet->aEntries[iEntry].cRefs = 1; 1937 pSet->aEntries[iEntry].iPage = iPage; 1938 pSet->aEntries[iEntry].pvPage = pvPage; 1939 pSet->aEntries[iEntry].HCPhys = HCPhys; 2366 pSet->aEntries[iEntry].cRefs = 1; 2367 pSet->aEntries[iEntry].cUnrefs = 0; 2368 pSet->aEntries[iEntry].cInlinedRefs = 0; 2369 pSet->aEntries[iEntry].iPage = iPage; 2370 pSet->aEntries[iEntry].pvPage = pvPage; 2371 pSet->aEntries[iEntry].HCPhys = HCPhys; 1940 2372 pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry; 2373 LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=1/0/0 iPage=%#x [d] %s(%d) %s\n", pSet, HCPhys, iEntry, pSet->cEntries, pvPage, iPage, pszFile, iLine, pszFunction)); 1941 2374 } 1942 2375 else 1943 2376 { 1944 2377 /* We're screwed. */ 1945 pgmR 0DynMapReleasePage(g_pPGMR0DynMap, iPage, 1);1946 1947 RTAssertMsg2Weak(" PGMDynMapHCPage: set is full!\n");2378 pgmRZDynMapReleasePage(pThis, iPage, 1); 2379 2380 RTAssertMsg2Weak("pgmRZDynMapHCPageCommon: set is full!\n"); 1948 2381 if (!g_fPGMR0DynMapTestRunning) 1949 VMMRZCallRing3NoCpu(PGMR 0DYNMAP_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0);2382 VMMRZCallRing3NoCpu(PGMRZDYNMAP_SET_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0); 1950 2383 *ppv = NULL; 2384 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a); 1951 2385 return VERR_PGM_DYNMAP_FULL_SET; 1952 2386 } … … 1955 2389 1956 2390 *ppv = pvPage; 2391 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a); 1957 2392 return VINF_SUCCESS; 1958 2393 } 1959 1960 1961 #if 0 /* Not used in R0, should internalized the other PGMDynMapHC/GCPage too. */1962 /* documented elsewhere - a bit of a mess. */1963 VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)1964 {1965 #ifdef VBOX_WITH_STATISTICS1966 PVMCPU pVCpu = VMMGetCpu(pVM);1967 #endif1968 /*1969 * Validate state.1970 */1971 STAM_PROFILE_START(&pVCpu->pgm.s.StatR0DynMapHCPage, a);1972 AssertPtr(ppv);1973 AssertMsg(pVM->pgm.s.pvR0DynMapUsed == g_pPGMR0DynMap,1974 ("%p != %p\n", pVM->pgm.s.pvR0DynMapUsed, g_pPGMR0DynMap));1975 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));1976 PVMCPU pVCpu = VMMGetCpu(pVM);1977 AssertPtr(pVCpu);1978 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;1979 AssertMsg(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries),1980 ("%#x (%u)\n", pSet->cEntries, pSet->cEntries));1981 1982 /*1983 * Call common code.1984 */1985 int rc = pgmR0DynMapHCPageCommon(pSet, HCPhys, ppv);1986 1987 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatR0DynMapHCPage, a);1988 return rc;1989 }1990 #endif1991 2394 1992 2395 … … 2025 2428 { 2026 2429 LogRel(("pgmR0DynMapTest: ****** START ******\n")); 2027 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;2028 2430 PPGMMAPSET pSet = &pVM->aCpus[0].pgm.s.AutoSet; 2431 PPGMRZDYNMAP pThis = PGMRZDYNMAP_SET_2_DYNMAP(pSet); 2029 2432 uint32_t i; 2030 2433 … … 2047 2450 LogRel(("Test #1\n")); 2048 2451 ASMIntDisable(); 2049 PGM DynMapStartAutoSet(&pVM->aCpus[0]);2452 PGMRZDynMapStartAutoSet(&pVM->aCpus[0]); 2050 2453 2051 2454 uint64_t cr3 = ASMGetCR3() & ~(uint64_t)PAGE_OFFSET_MASK; 2052 2455 void *pv = (void *)(intptr_t)-1; 2053 2456 void *pv2 = (void *)(intptr_t)-2; 2054 rc = PGMDynMapHCPage(pVM, cr3, &pv);2055 int rc2 = PGMDynMapHCPage(pVM, cr3, &pv2);2457 rc = pgmRZDynMapHCPageCommon(pVM, cr3, &pv RTLOG_COMMA_SRC_POS); 2458 int rc2 = pgmRZDynMapHCPageCommon(pVM, cr3, &pv2 RTLOG_COMMA_SRC_POS); 2056 2459 ASMIntEnable(); 2057 2460 if ( RT_SUCCESS(rc2) … … 2068 2471 LogRel(("Test #2\n")); 2069 2472 ASMIntDisable(); 2070 PGM DynMapMigrateAutoSet(&pVM->aCpus[0]);2473 PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]); 2071 2474 for (i = 0 ; i < UINT16_MAX*2 - 1 && RT_SUCCESS(rc) && pv2 == pv; i++) 2072 2475 { 2073 2476 pv2 = (void *)(intptr_t)-4; 2074 rc = PGMDynMapHCPage(pVM, cr3, &pv2);2477 rc = pgmRZDynMapHCPageCommon(pVM, cr3, &pv2 RTLOG_COMMA_SRC_POS); 2075 2478 } 2076 2479 ASMIntEnable(); … … 2106 2509 LogRel(("Test #3\n")); 2107 2510 ASMIntDisable(); 2108 PGM DynMapMigrateAutoSet(&pVM->aCpus[0]);2511 PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]); 2109 2512 pv2 = NULL; 2110 2513 for (i = 0 ; i < RT_ELEMENTS(pSet->aEntries) - 5 && RT_SUCCESS(rc) && pv2 != pv; i++) 2111 2514 { 2112 2515 pv2 = (void *)(intptr_t)(-5 - i); 2113 rc = PGMDynMapHCPage(pVM, cr3 + PAGE_SIZE * (i + 5), &pv2);2516 rc = pgmRZDynMapHCPageCommon(pVM, cr3 + PAGE_SIZE * (i + 5), &pv2 RTLOG_COMMA_SRC_POS); 2114 2517 } 2115 2518 ASMIntEnable(); … … 2134 2537 LogRel(("Test #4\n")); 2135 2538 ASMIntDisable(); 2136 PGM DynMapMigrateAutoSet(&pVM->aCpus[0]);2539 PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]); 2137 2540 for (i = 0 ; i < RT_ELEMENTS(pSet->aEntries) + 2; i++) 2138 2541 { 2139 rc = PGMDynMapHCPage(pVM, cr3 - PAGE_SIZE * (i + 5), &pv2);2542 rc = pgmRZDynMapHCPageCommon(pVM, cr3 - PAGE_SIZE * (i + 5), &pv2 RTLOG_COMMA_SRC_POS); 2140 2543 if (RT_SUCCESS(rc)) 2141 2544 rc = PGMR0DynMapAssertIntegrity(); … … 2149 2552 LogRel(("Test #5\n")); 2150 2553 ASMIntDisable(); 2151 PGM DynMapMigrateAutoSet(&pVM->aCpus[0]);2152 PGM DynMapReleaseAutoSet(&pVM->aCpus[0]);2153 PGM DynMapStartAutoSet(&pVM->aCpus[0]);2554 PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]); 2555 PGMRZDynMapReleaseAutoSet(&pVM->aCpus[0]); 2556 PGMRZDynMapStartAutoSet(&pVM->aCpus[0]); 2154 2557 ASMIntEnable(); 2155 2558 … … 2179 2582 LogRel(("Test #5\n")); 2180 2583 ASMIntDisable(); 2181 PGM DynMapMigrateAutoSet(&pVM->aCpus[0]);2584 PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]); 2182 2585 RTHCPHYS HCPhysPT = RTR0MemObjGetPagePhysAddr(pThis->pSegHead->ahMemObjPTs[0], 0); 2183 rc = PGMDynMapHCPage(pVM, HCPhysPT, &pv);2586 rc = pgmRZDynMapHCPageCommon(pVM, HCPhysPT, &pv RTLOG_COMMA_SRC_POS); 2184 2587 if (RT_SUCCESS(rc)) 2185 2588 { … … 2216 2619 LogRel(("Cleanup.\n")); 2217 2620 ASMIntDisable(); 2218 PGM DynMapMigrateAutoSet(&pVM->aCpus[0]);2219 PGM DynMapFlushAutoSet(&pVM->aCpus[0]);2220 PGM DynMapReleaseAutoSet(&pVM->aCpus[0]);2621 PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]); 2622 PGMRZDynMapFlushAutoSet(&pVM->aCpus[0]); 2623 PGMRZDynMapReleaseAutoSet(&pVM->aCpus[0]); 2221 2624 ASMIntEnable(); 2222 2625 -
trunk/src/VBox/VMM/testcase/tstVMStructRC.cpp
r31140 r31402 461 461 GEN_CHECK_OFF(PGMCPU, offVCpu); 462 462 GEN_CHECK_OFF(PGMCPU, offPGM); 463 #if def VBOX_WITH_2X_4GB_ADDR_SPACE463 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE) || defined(VBOX_WITH_RAW_MODE) 464 464 GEN_CHECK_OFF(PGMCPU, AutoSet); 465 465 #endif … … 568 568 GEN_CHECK_OFF(PGM, HCPhysInterPaePML4); 569 569 GEN_CHECK_OFF(PGM, pbDynPageMapBaseGC); 570 GEN_CHECK_OFF(PGM, iDynPageMapLast); 571 GEN_CHECK_OFF(PGM, aHCPhysDynPageMapCache); 570 GEN_CHECK_OFF(PGM, pRCDynMap); 572 571 GEN_CHECK_OFF(PGM, pvR0DynMapUsed); 573 572 GEN_CHECK_OFF(PGM, GCPhys4MBPSEMask); … … 575 574 GEN_CHECK_OFF(PGMCPU, fA20Enabled); 576 575 GEN_CHECK_OFF(PGMCPU, fSyncFlags); 577 GEN_CHECK_OFF(PGM, aHCPhysDynPageMapCache);578 GEN_CHECK_OFF(PGM, aLockedDynPageMapCache);579 576 GEN_CHECK_OFF(PGM, CritSect); 580 577 GEN_CHECK_OFF(PGM, pPoolR3); … … 788 785 GEN_CHECK_OFF(PGMPOOL, aPages[1]); 789 786 GEN_CHECK_OFF(PGMPOOL, aPages[PGMPOOL_IDX_FIRST - 1]); 787 GEN_CHECK_SIZE(PGMRCDYNMAP); 788 GEN_CHECK_OFF(PGMRCDYNMAP, u32Magic); 789 GEN_CHECK_OFF(PGMRCDYNMAP, paPages); 790 GEN_CHECK_OFF(PGMRCDYNMAP, cPages); 791 GEN_CHECK_OFF(PGMRCDYNMAP, fLegacyMode); 792 GEN_CHECK_OFF(PGMRCDYNMAP, cLoad); 793 GEN_CHECK_OFF(PGMRCDYNMAP, cMaxLoad); 794 GEN_CHECK_OFF(PGMRCDYNMAP, cGuardPages); 795 GEN_CHECK_OFF(PGMRCDYNMAP, cUsers); 796 GEN_CHECK_SIZE(PGMRCDYNMAPENTRY); 797 GEN_CHECK_OFF(PGMRCDYNMAPENTRY, HCPhys); 798 GEN_CHECK_OFF(PGMRCDYNMAPENTRY, pvPage); 799 GEN_CHECK_OFF(PGMRCDYNMAPENTRY, cRefs); 800 GEN_CHECK_OFF(PGMRCDYNMAPENTRY, uPte.pLegacy); 801 GEN_CHECK_OFF(PGMRCDYNMAPENTRY, uPte.pPae); 802 GEN_CHECK_OFF(PGMRCDYNMAPENTRY, uPte.pv); 803 GEN_CHECK_OFF(PGMMAPSETENTRY, pvPage); 804 GEN_CHECK_OFF(PGMMAPSETENTRY, iPage); 805 GEN_CHECK_OFF(PGMMAPSETENTRY, cRefs); 806 GEN_CHECK_OFF(PGMMAPSETENTRY, cInlinedRefs); 807 GEN_CHECK_OFF(PGMMAPSETENTRY, cUnrefs); 808 GEN_CHECK_OFF(PGMMAPSETENTRY, HCPhys); 790 809 791 810 GEN_CHECK_SIZE(REM); -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r31123 r31402 332 332 333 333 /* pgm */ 334 #if def VBOX_WITH_2X_4GB_ADDR_SPACE334 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE) || defined(VBOX_WITH_RAW_MODE) 335 335 CHECK_MEMBER_ALIGNMENT(PGMCPU, AutoSet, 8); 336 336 #endif
Note:
See TracChangeset
for help on using the changeset viewer.

