VirtualBox

Changeset 31402 in vbox


Ignore:
Timestamp:
Aug 5, 2010 12:28:18 PM (14 years ago)
Author:
vboxsync
Message:

PGM: Replaced the hazzardous raw-mode context dynamic mapping code with the PGMR0DynMap code used by darwin/x86. This is a risky change but it should pay off once stable by providing 100% certainty that dynamically mapped pages aren't resued behind our back (this has been observed in seemingly benign code paths recently).

Location:
trunk
Files:
21 edited
1 moved

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/pgm.h

    r31201 r31402  
    386386
    387387#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE)
    388 VMMDECL(int)        PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv);
    389 VMMDECL(int)        PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv);
    390 # ifdef IN_RC
    391 VMMDECL(int)        PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv);
    392 VMMDECL(void)       PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage);
    393 VMMDECL(void)       PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage);
    394 #  ifdef VBOX_STRICT
    395 VMMDECL(void)       PGMDynCheckLocks(PVM pVM);
    396 #  endif
    397 # endif
    398 VMMDECL(void)       PGMDynMapStartAutoSet(PVMCPU pVCpu);
    399 VMMDECL(bool)       PGMDynMapStartOrMigrateAutoSet(PVMCPU pVCpu);
    400 VMMDECL(void)       PGMDynMapReleaseAutoSet(PVMCPU pVCpu);
    401 VMMDECL(void)       PGMDynMapFlushAutoSet(PVMCPU pVCpu);
    402 VMMDECL(void)       PGMDynMapMigrateAutoSet(PVMCPU pVCpu);
    403 VMMDECL(uint32_t)   PGMDynMapPushAutoSubset(PVMCPU pVCpu);
    404 VMMDECL(void)       PGMDynMapPopAutoSubset(PVMCPU pVCpu, uint32_t iPrevSubset);
     388VMMDECL(void)       PGMRZDynMapStartAutoSet(PVMCPU pVCpu);
     389VMMDECL(void)       PGMRZDynMapReleaseAutoSet(PVMCPU pVCpu);
     390VMMDECL(void)       PGMRZDynMapFlushAutoSet(PVMCPU pVCpu);
     391VMMDECL(uint32_t)   PGMRZDynMapPushAutoSubset(PVMCPU pVCpu);
     392VMMDECL(void)       PGMRZDynMapPopAutoSubset(PVMCPU pVCpu, uint32_t iPrevSubset);
    405393#endif
    406 
    407394
    408395VMMDECL(void) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages);
     
    422409 * @{
    423410 */
     411VMMRCDECL(int)      PGMRCDynMapInit(PVM pVM);
    424412/** @} */
    425413#endif /* IN_RC */
     
    441429VMMR0DECL(void)     PGMR0DynMapTermVM(PVM pVM);
    442430VMMR0DECL(int)      PGMR0DynMapAssertIntegrity(void);
     431VMMR0DECL(bool)     PGMR0DynMapStartOrMigrateAutoSet(PVMCPU pVCpu);
     432VMMR0DECL(void)     PGMR0DynMapMigrateAutoSet(PVMCPU pVCpu);
    443433# endif
    444434/** @} */
  • trunk/src/VBox/VMM/Makefile.kmk

    r30888 r31402  
    362362        VMMGC/HWACCMGCA.asm \
    363363        VMMRZ/DBGFRZ.cpp \
     364        VMMRZ/PGMRZDynMap.cpp \
    364365        VMMRZ/VMMRZ.cpp \
    365366        VMMAll/CPUMAllRegs.cpp \
     
    494495        VMMR0/VMMR0JmpA-x86.asm
    495496VMMR0_SOURCES.darwin.x86 = \
    496         VMMR0/PGMR0DynMap.cpp
     497        VMMRZ/PGMRZDynMap.cpp
    497498
    498499# disable annoying warnings about array subscript above array bounds in aPages[]
  • trunk/src/VBox/VMM/PATM/VMMGC/PATMGC.cpp

    r30326 r31402  
    292292                    /* We are no longer executing PATM code; set PIF again. */
    293293                    pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
     294                    PGMRZDynMapReleaseAutoSet(VMMGetCpu0(pVM));
    294295                    CPUMGCCallV86Code(pRegFrame);
    295296                    /* does not return */
  • trunk/src/VBox/VMM/PGM.cpp

    r31206 r31402  
    481481 * In order to be able to map in and out memory and to be able to support
    482482 * guest with more RAM than we've got virtual address space, we'll employing
    483  * a mapping cache. There is already a tiny one for GC (see PGMGCDynMapGCPageEx)
    484  * and we'll create a similar one for ring-0 unless we decide to setup a dedicate
    485  * memory context for the HWACCM execution.
     483 * a mapping cache.  Normally ring-0 and ring-3 can share the same cache,
     484 * however on 32-bit darwin the ring-0 code is running in a different memory
     485 * context and therefore needs a separate cache.  In raw-mode context we also
     486 * need a separate cache.  The 32-bit darwin mapping cache and the one for
     487 * raw-mode context share a lot of code, see PGMRZDYNMAP.
    486488 *
    487489 *
     
    17201722
    17211723    /* GC only: */
    1722     PGM_REG_COUNTER(&pStats->StatRCDynMapCacheHits,             "/PGM/RC/DynMapCache/Hits" ,          "Number of dynamic page mapping cache hits.");
    1723     PGM_REG_COUNTER(&pStats->StatRCDynMapCacheMisses,           "/PGM/RC/DynMapCache/Misses" ,        "Number of dynamic page mapping cache misses.");
    17241724    PGM_REG_COUNTER(&pStats->StatRCInvlPgConflict,              "/PGM/RC/InvlPgConflict",             "Number of times PGMInvalidatePage() detected a mapping conflict.");
    17251725    PGM_REG_COUNTER(&pStats->StatRCInvlPgSyncMonCR3,            "/PGM/RC/InvlPgSyncMonitorCR3",       "Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3.");
     
    17781778# endif
    17791779        /* R0 only: */
    1780         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapMigrateInvlPg,         "/PGM/CPU%u/R0/DynMapMigrateInvlPg",        "invlpg count in PGMDynMapMigrateAutoSet.");
    1781         PGM_REG_PROFILE(&pCpuStats->StatR0DynMapGCPageInl,             "/PGM/CPU%u/R0/DynMapPageGCPageInl",        "Calls to pgmR0DynMapGCPageInlined.");
    1782         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapGCPageInlHits,         "/PGM/CPU%u/R0/DynMapPageGCPageInl/Hits",   "Hash table lookup hits.");
    1783         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapGCPageInlMisses,       "/PGM/CPU%u/R0/DynMapPageGCPageInl/Misses", "Misses that falls back to code common with PGMDynMapHCPage.");
    1784         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapGCPageInlRamHits,      "/PGM/CPU%u/R0/DynMapPageGCPageInl/RamHits",   "1st ram range hits.");
    1785         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapGCPageInlRamMisses,    "/PGM/CPU%u/R0/DynMapPageGCPageInl/RamMisses", "1st ram range misses, takes slow path.");
    1786         PGM_REG_PROFILE(&pCpuStats->StatR0DynMapHCPageInl,             "/PGM/CPU%u/R0/DynMapPageHCPageInl",        "Calls to pgmR0DynMapHCPageInlined.");
    1787         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapHCPageInlHits,         "/PGM/CPU%u/R0/DynMapPageHCPageInl/Hits",   "Hash table lookup hits.");
    1788         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapHCPageInlMisses,       "/PGM/CPU%u/R0/DynMapPageHCPageInl/Misses", "Misses that falls back to code common with PGMDynMapHCPage.");
    1789         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPage,                  "/PGM/CPU%u/R0/DynMapPage",                 "Calls to pgmR0DynMapPage");
    1790         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapSetOptimize,           "/PGM/CPU%u/R0/DynMapPage/SetOptimize",     "Calls to pgmDynMapOptimizeAutoSet.");
    1791         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapSetSearchFlushes,      "/PGM/CPU%u/R0/DynMapPage/SetSearchFlushes","Set search restorting to subset flushes.");
    1792         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapSetSearchHits,         "/PGM/CPU%u/R0/DynMapPage/SetSearchHits",   "Set search hits.");
    1793         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapSetSearchMisses,       "/PGM/CPU%u/R0/DynMapPage/SetSearchMisses", "Set search misses.");
    1794         PGM_REG_PROFILE(&pCpuStats->StatR0DynMapHCPage,                "/PGM/CPU%u/R0/DynMapPage/HCPage",          "Calls to PGMDynMapHCPage (ring-0).");
    1795         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageHits0,             "/PGM/CPU%u/R0/DynMapPage/Hits0",           "Hits at iPage+0");
    1796         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageHits1,             "/PGM/CPU%u/R0/DynMapPage/Hits1",           "Hits at iPage+1");
    1797         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageHits2,             "/PGM/CPU%u/R0/DynMapPage/Hits2",           "Hits at iPage+2");
    1798         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageInvlPg,            "/PGM/CPU%u/R0/DynMapPage/InvlPg",          "invlpg count in pgmR0DynMapPageSlow.");
    1799         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageSlow,              "/PGM/CPU%u/R0/DynMapPage/Slow",            "Calls to pgmR0DynMapPageSlow - subtract this from pgmR0DynMapPage to get 1st level hits.");
    1800         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageSlowLoopHits,      "/PGM/CPU%u/R0/DynMapPage/SlowLoopHits" ,   "Hits in the loop path.");
    1801         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageSlowLoopMisses,    "/PGM/CPU%u/R0/DynMapPage/SlowLoopMisses",  "Misses in the loop path. NonLoopMisses = Slow - SlowLoopHit - SlowLoopMisses");
    1802         //PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPageSlowLostHits,      "/PGM/CPU%u/R0/DynMapPage/SlowLostHits",    "Lost hits.");
    1803         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapSubsets,               "/PGM/CPU%u/R0/Subsets",                    "Times PGMDynMapPushAutoSubset was called.");
    1804         PGM_REG_COUNTER(&pCpuStats->StatR0DynMapPopFlushes,            "/PGM/CPU%u/R0/SubsetPopFlushes",           "Times PGMDynMapPopAutoSubset flushes the subset.");
    1805         PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[0],           "/PGM/CPU%u/R0/SetSize000..09",              "00-09% filled");
    1806         PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[1],           "/PGM/CPU%u/R0/SetSize010..19",              "10-19% filled");
    1807         PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[2],           "/PGM/CPU%u/R0/SetSize020..29",              "20-29% filled");
    1808         PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[3],           "/PGM/CPU%u/R0/SetSize030..39",              "30-39% filled");
    1809         PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[4],           "/PGM/CPU%u/R0/SetSize040..49",              "40-49% filled");
    1810         PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[5],           "/PGM/CPU%u/R0/SetSize050..59",              "50-59% filled");
    1811         PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[6],           "/PGM/CPU%u/R0/SetSize060..69",              "60-69% filled");
    1812         PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[7],           "/PGM/CPU%u/R0/SetSize070..79",              "70-79% filled");
    1813         PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[8],           "/PGM/CPU%u/R0/SetSize080..89",              "80-89% filled");
    1814         PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[9],           "/PGM/CPU%u/R0/SetSize090..99",              "90-99% filled");
    1815         PGM_REG_COUNTER(&pCpuStats->aStatR0DynMapSetSize[10],          "/PGM/CPU%u/R0/SetSize100",                 "100% filled");
    18161780
    18171781        /* RZ only: */
     
    18691833        PGM_REG_COUNTER(&pCpuStats->StatRZGuestROMWriteUnhandled,      "/PGM/CPU%u/RZ/ROMWriteUnhandled",              "The number of times the Guest ROM change was passed back to the recompiler.");
    18701834
     1835        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapMigrateInvlPg,         "/PGM/CPU%u/RZ/DynMap/MigrateInvlPg",            "invlpg count in PGMR0DynMapMigrateAutoSet.");
     1836        PGM_REG_PROFILE(&pCpuStats->StatRZDynMapGCPageInl,             "/PGM/CPU%u/RZ/DynMap/PageGCPageInl",            "Calls to pgmR0DynMapGCPageInlined.");
     1837        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlHits,         "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/Hits",       "Hash table lookup hits.");
     1838        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlMisses,       "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/Misses",     "Misses that falls back to the code common.");
     1839        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlRamHits,      "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/RamHits",    "1st ram range hits.");
     1840        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlRamMisses,    "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/RamMisses",  "1st ram range misses, takes slow path.");
     1841        PGM_REG_PROFILE(&pCpuStats->StatRZDynMapHCPageInl,             "/PGM/CPU%u/RZ/DynMap/PageHCPageInl",            "Calls to pgmRZDynMapHCPageInlined.");
     1842        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapHCPageInlHits,         "/PGM/CPU%u/RZ/DynMap/PageHCPageInl/Hits",       "Hash table lookup hits.");
     1843        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapHCPageInlMisses,       "/PGM/CPU%u/RZ/DynMap/PageHCPageInl/Misses",     "Misses that falls back to the code common.");
     1844        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPage,                  "/PGM/CPU%u/RZ/DynMap/Page",                     "Calls to pgmR0DynMapPage");
     1845        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetOptimize,           "/PGM/CPU%u/RZ/DynMap/Page/SetOptimize",         "Calls to pgmRZDynMapOptimizeAutoSet.");
     1846        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetSearchFlushes,      "/PGM/CPU%u/RZ/DynMap/Page/SetSearchFlushes",    "Set search restorting to subset flushes.");
     1847        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetSearchHits,         "/PGM/CPU%u/RZ/DynMap/Page/SetSearchHits",       "Set search hits.");
     1848        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetSearchMisses,       "/PGM/CPU%u/RZ/DynMap/Page/SetSearchMisses",     "Set search misses.");
     1849        PGM_REG_PROFILE(&pCpuStats->StatRZDynMapHCPage,                "/PGM/CPU%u/RZ/DynMap/Page/HCPage",              "Calls to pgmRZDynMapHCPageCommon (ring-0).");
     1850        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageHits0,             "/PGM/CPU%u/RZ/DynMap/Page/Hits0",               "Hits at iPage+0");
     1851        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageHits1,             "/PGM/CPU%u/RZ/DynMap/Page/Hits1",               "Hits at iPage+1");
     1852        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageHits2,             "/PGM/CPU%u/RZ/DynMap/Page/Hits2",               "Hits at iPage+2");
     1853        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageInvlPg,            "/PGM/CPU%u/RZ/DynMap/Page/InvlPg",              "invlpg count in pgmR0DynMapPageSlow.");
     1854        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlow,              "/PGM/CPU%u/RZ/DynMap/Page/Slow",                "Calls to pgmR0DynMapPageSlow - subtract this from pgmR0DynMapPage to get 1st level hits.");
     1855        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlowLoopHits,      "/PGM/CPU%u/RZ/DynMap/Page/SlowLoopHits" ,       "Hits in the loop path.");
     1856        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlowLoopMisses,    "/PGM/CPU%u/RZ/DynMap/Page/SlowLoopMisses",      "Misses in the loop path. NonLoopMisses = Slow - SlowLoopHit - SlowLoopMisses");
     1857        //PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlowLostHits,      "/PGM/CPU%u/R0/DynMap/Page/SlowLostHits",        "Lost hits.");
     1858        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSubsets,               "/PGM/CPU%u/RZ/DynMap/Subsets",                  "Times PGMRZDynMapPushAutoSubset was called.");
     1859        PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPopFlushes,            "/PGM/CPU%u/RZ/DynMap/SubsetPopFlushes",         "Times PGMRZDynMapPopAutoSubset flushes the subset.");
     1860        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[0],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct000..09",      "00-09% filled (RC: min(set-size, dynmap-size))");
     1861        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[1],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct010..19",      "10-19% filled (RC: min(set-size, dynmap-size))");
     1862        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[2],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct020..29",      "20-29% filled (RC: min(set-size, dynmap-size))");
     1863        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[3],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct030..39",      "30-39% filled (RC: min(set-size, dynmap-size))");
     1864        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[4],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct040..49",      "40-49% filled (RC: min(set-size, dynmap-size))");
     1865        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[5],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct050..59",      "50-59% filled (RC: min(set-size, dynmap-size))");
     1866        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[6],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct060..69",      "60-69% filled (RC: min(set-size, dynmap-size))");
     1867        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[7],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct070..79",      "70-79% filled (RC: min(set-size, dynmap-size))");
     1868        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[8],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct080..89",      "80-89% filled (RC: min(set-size, dynmap-size))");
     1869        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[9],      "/PGM/CPU%u/RZ/DynMap/SetFilledPct090..99",      "90-99% filled (RC: min(set-size, dynmap-size))");
     1870        PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[10],     "/PGM/CPU%u/RZ/DynMap/SetFilledPct100",          "100% filled (RC: min(set-size, dynmap-size))");
     1871
    18711872        /* HC only: */
    18721873
     
    20372038    pVM->pgm.s.paDynPageMapPaePTEsGC   = pMapping->aPTs[iPT].paPaePTsRC + iPG * sizeof(pMapping->aPTs[0].paPaePTsR3->a[0]);
    20382039
    2039     /* init cache */
     2040    /* init cache area */
    20402041    RTHCPHYS HCPhysDummy = MMR3PageDummyHCPhys(pVM);
    2041     for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache); i++)
    2042         pVM->pgm.s.aHCPhysDynPageMapCache[i] = HCPhysDummy;
    2043 
    2044     for (unsigned i = 0; i < MM_HYPER_DYNAMIC_SIZE; i += PAGE_SIZE)
    2045     {
    2046         rc = PGMMap(pVM, pVM->pgm.s.pbDynPageMapBaseGC + i, HCPhysDummy, PAGE_SIZE, 0);
     2042    for (uint32_t offDynMap = 0; offDynMap < MM_HYPER_DYNAMIC_SIZE; offDynMap += PAGE_SIZE)
     2043    {
     2044        rc = PGMMap(pVM, pVM->pgm.s.pbDynPageMapBaseGC + offDynMap, HCPhysDummy, PAGE_SIZE, 0);
    20472045        AssertRCReturn(rc, rc);
    20482046    }
     
    22052203     */
    22062204    pVM->pgm.s.paDynPageMap32BitPTEsGC += offDelta;
    2207     pVM->pgm.s.paDynPageMapPaePTEsGC += offDelta;
    2208     pVM->pgm.s.pbDynPageMapBaseGC += offDelta;
     2205    pVM->pgm.s.paDynPageMapPaePTEsGC   += offDelta;
     2206    pVM->pgm.s.pbDynPageMapBaseGC      += offDelta;
     2207
     2208    if (pVM->pgm.s.pRCDynMap)
     2209    {
     2210        pVM->pgm.s.pRCDynMap += offDelta;
     2211        PPGMRCDYNMAP pDynMap = (PPGMRCDYNMAP)MMHyperRCToCC(pVM, pVM->pgm.s.pRCDynMap);
     2212
     2213        pDynMap->paPages     += offDelta;
     2214        PPGMRCDYNMAPENTRY paPages = (PPGMRCDYNMAPENTRY)MMHyperRCToCC(pVM, pDynMap->paPages);
     2215
     2216        for (uint32_t iPage = 0; iPage < pDynMap->cPages; iPage++)
     2217        {
     2218            paPages[iPage].pvPage  += offDelta;
     2219            paPages[iPage].uPte.pv += offDelta;
     2220        }
     2221    }
    22092222
    22102223    /*
  • trunk/src/VBox/VMM/PGMInline.h

    r31178 r31402  
    286286}
    287287
    288 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    289 
    290 /**
    291  * Inlined version of the ring-0 version of PGMDynMapHCPage that
    292  * optimizes access to pages already in the set.
     288#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
     289
     290/**
     291 * Inlined version of the ring-0 version of the host page mapping code
     292 * that optimizes access to pages already in the set.
    293293 *
    294294 * @returns VINF_SUCCESS. Will bail out to ring-3 on failure.
     
    297297 * @param   ppv         Where to store the mapping address.
    298298 */
    299 DECLINLINE(int) pgmR0DynMapHCPageInlined(PVMCPU pVCpu, RTHCPHYS HCPhys, void **ppv)
     299DECLINLINE(int) pgmRZDynMapHCPageInlined(PVMCPU pVCpu, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
    300300{
    301301    PPGMMAPSET  pSet    = &pVCpu->pgm.s.AutoSet;
    302302
    303     STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapHCPageInl, a);
     303    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
    304304    Assert(!(HCPhys & PAGE_OFFSET_MASK));
    305305    Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
     
    308308    unsigned    iEntry  = pSet->aiHashTable[iHash];
    309309    if (    iEntry < pSet->cEntries
    310         &&  pSet->aEntries[iEntry].HCPhys == HCPhys)
    311     {
     310        &&  pSet->aEntries[iEntry].HCPhys == HCPhys
     311        &&  pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
     312    {
     313        pSet->aEntries[iEntry].cInlinedRefs++;
    312314        *ppv = pSet->aEntries[iEntry].pvPage;
    313         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapHCPageInlHits);
     315        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlHits);
    314316    }
    315317    else
    316318    {
    317         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapHCPageInlMisses);
    318         pgmR0DynMapHCPageCommon(pSet, HCPhys, ppv);
    319     }
    320 
    321     STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapHCPageInl, a);
     319        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlMisses);
     320        pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
     321    }
     322
     323    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
    322324    return VINF_SUCCESS;
    323325}
     
    325327
    326328/**
    327  * Inlined version of the ring-0 version of PGMDynMapGCPage that optimizes
    328  * access to pages already in the set.
    329  *
    330  * @returns See PGMDynMapGCPage.
     329 * Inlined version of the guest page mapping code that optimizes access to pages
     330 * already in the set.
     331 *
     332 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
    331333 * @param   pVM         The VM handle.
    332334 * @param   pVCpu       The current CPU.
     
    334336 * @param   ppv         Where to store the mapping address.
    335337 */
    336 DECLINLINE(int) pgmR0DynMapGCPageV2Inlined(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv)
    337 {
    338     STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInl, a);
     338DECLINLINE(int) pgmRZDynMapGCPageV2Inlined(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
     339{
     340    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
    339341    AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
    340342
     
    347349        /** @todo   || page state stuff */))
    348350    {
    349         /* This case is not counted into StatR0DynMapGCPageInl. */
    350         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlRamMisses);
    351         return PGMDynMapGCPage(pVM, GCPhys, ppv);
     351        /* This case is not counted into StatRZDynMapGCPageInl. */
     352        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
     353        return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
    352354    }
    353355
    354356    RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
    355     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlRamHits);
     357    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
    356358
    357359    /*
    358      * pgmR0DynMapHCPageInlined with out stats.
     360     * pgmRZDynMapHCPageInlined with out stats.
    359361     */
    360362    PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
     
    365367    unsigned    iEntry  = pSet->aiHashTable[iHash];
    366368    if (    iEntry < pSet->cEntries
    367         &&  pSet->aEntries[iEntry].HCPhys == HCPhys)
    368     {
     369        &&  pSet->aEntries[iEntry].HCPhys == HCPhys
     370        &&  pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
     371    {
     372        pSet->aEntries[iEntry].cInlinedRefs++;
    369373        *ppv = pSet->aEntries[iEntry].pvPage;
    370         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlHits);
     374        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
    371375    }
    372376    else
    373377    {
    374         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlMisses);
    375         pgmR0DynMapHCPageCommon(pSet, HCPhys, ppv);
    376     }
    377 
    378     STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInl, a);
     378        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
     379        pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
     380    }
     381
     382    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
    379383    return VINF_SUCCESS;
    380384}
     
    382386
    383387/**
    384  * Inlined version of the ring-0 version of PGMDynMapGCPage that optimizes
     388 * Inlined version of the ring-0 version of guest page mapping that optimizes
    385389 * access to pages already in the set.
    386390 *
    387  * @returns See PGMDynMapGCPage.
     391 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
    388392 * @param   pVCpu       The current CPU.
    389393 * @param   GCPhys      The guest physical address of the page.
    390394 * @param   ppv         Where to store the mapping address.
    391395 */
    392 DECLINLINE(int) pgmR0DynMapGCPageInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv)
    393 {
    394     return pgmR0DynMapGCPageV2Inlined(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhys, ppv);
    395 }
    396 
    397 
    398 /**
    399  * Inlined version of the ring-0 version of PGMDynMapGCPageOff that optimizes
    400  * access to pages already in the set.
    401  *
    402  * @returns See PGMDynMapGCPage.
     396DECLINLINE(int) pgmRZDynMapGCPageInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
     397{
     398    return pgmRZDynMapGCPageV2Inlined(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
     399}
     400
     401
     402/**
     403 * Inlined version of the ring-0 version of the guest byte mapping code
     404 * that optimizes access to pages already in the set.
     405 *
     406 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
    403407 * @param   pVCpu       The current CPU.
    404408 * @param   HCPhys      The physical address of the page.
    405  * @param   ppv         Where to store the mapping address.
    406  */
    407 DECLINLINE(int) pgmR0DynMapGCPageOffInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv)
    408 {
    409     STAM_PROFILE_START(&pVCpu->pgm.s.StatR0DynMapGCPageInl, a);
     409 * @param   ppv         Where to store the mapping address. The offset is
     410 *                      preserved.
     411 */
     412DECLINLINE(int) pgmRZDynMapGCPageOffInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
     413{
     414    STAM_PROFILE_START(&pVCpu->pgm.s.StatRZDynMapGCPageInl, a);
    410415
    411416    /*
     
    418423        /** @todo   || page state stuff */))
    419424    {
    420         /* This case is not counted into StatR0DynMapGCPageInl. */
    421         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlRamMisses);
    422         return PGMDynMapGCPageOff(pVM, GCPhys, ppv);
     425        /* This case is not counted into StatRZDynMapGCPageInl. */
     426        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
     427        return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
    423428    }
    424429
    425430    RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
    426     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlRamHits);
     431    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
    427432
    428433    /*
    429      * pgmR0DynMapHCPageInlined with out stats.
     434     * pgmRZDynMapHCPageInlined with out stats.
    430435     */
    431436    PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
     
    436441    unsigned    iEntry  = pSet->aiHashTable[iHash];
    437442    if (    iEntry < pSet->cEntries
    438         &&  pSet->aEntries[iEntry].HCPhys == HCPhys)
    439     {
     443        &&  pSet->aEntries[iEntry].HCPhys == HCPhys
     444        &&  pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
     445    {
     446        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
     447        pSet->aEntries[iEntry].cInlinedRefs++;
    440448        *ppv = (void *)((uintptr_t)pSet->aEntries[iEntry].pvPage | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
    441         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlHits);
    442449    }
    443450    else
    444451    {
    445         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlMisses);
    446         pgmR0DynMapHCPageCommon(pSet, HCPhys, ppv);
     452        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
     453        pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
    447454        *ppv = (void *)((uintptr_t)*ppv | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
    448455    }
    449456
    450     STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInl, a);
     457    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
    451458    return VINF_SUCCESS;
    452459}
     
    462469 * @param   pPage       The page.
    463470 */
    464 DECLINLINE(void *) pgmPoolMapPageInlined(PVM pVM, PPGMPOOLPAGE pPage)
     471DECLINLINE(void *) pgmPoolMapPageInlined(PVM pVM, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
    465472{
    466473    if (pPage->idx >= PGMPOOL_IDX_FIRST)
     
    468475        Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
    469476        void *pv;
    470 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    471         pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), pPage->Core.Key, &pv);
    472 # else
    473         PGMDynMapHCPage(pVM, pPage->Core.Key, &pv);
    474 # endif
     477        pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
    475478        return pv;
    476479    }
     
    486489 * @param   pPage       The page.
    487490 */
    488 DECLINLINE(void *) pgmPoolMapPageV2Inlined(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pPage)
     491DECLINLINE(void *) pgmPoolMapPageV2Inlined(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
    489492{
    490493    if (pPage->idx >= PGMPOOL_IDX_FIRST)
     
    492495        Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
    493496        void *pv;
    494 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    495497        Assert(pVCpu == VMMGetCpu(pVM));
    496         pgmR0DynMapHCPageInlined(pVCpu, pPage->Core.Key, &pv);
    497 # else
    498         PGMDynMapHCPage(pVM, pPage->Core.Key, &pv);
    499 # endif
     498        pgmRZDynMapHCPageInlined(pVCpu, pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
    500499        return pv;
    501500    }
     
    514513 * @param   HCPhys      HC Physical address of the page.
    515514 */
    516 DECLINLINE(void *) pgmDynMapHCPageOff(PVM pVM, RTHCPHYS HCPhys)
     515DECLINLINE(void *) pgmRZDynMapHCPageOff(PVM pVM, RTHCPHYS HCPhys RTLOG_COMMA_SRC_POS_DECL)
    517516{
    518517    void *pv;
    519 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    520     pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv);
    521 # else
    522     PGMDynMapHCPage(pVM, HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv);
    523 # endif
     518    pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv RTLOG_COMMA_SRC_POS_ARGS);
    524519    pv = (void *)((uintptr_t)pv | ((uintptr_t)HCPhys & PAGE_OFFSET_MASK));
    525520    return pv;
     
    651646{
    652647#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    653     int rc = pgmR0DynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPd);
     648    int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPd RTLOG_COMMA_SRC_POS);
    654649    if (RT_FAILURE(rc))
    655650    {
     
    676671#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    677672    PX86PD pGuestPD = NULL;
    678     int rc = pgmR0DynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPD);
     673    int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPD RTLOG_COMMA_SRC_POS);
    679674    if (RT_FAILURE(rc))
    680675    {
     
    705700{
    706701#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    707     int rc = pgmR0DynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPdpt);
     702    int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPdpt RTLOG_COMMA_SRC_POS);
    708703    if (RT_FAILURE(rc))
    709704    {
     
    749744#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    750745    PX86PDPT pGuestPDPT = NULL;
    751     int rc = pgmR0DynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPDPT);
     746    int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPDPT RTLOG_COMMA_SRC_POS);
    752747    AssertRCReturn(rc, NULL);
    753748#else
     
    785780#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    786781            PX86PDPAE   pGuestPD = NULL;
    787             int rc = pgmR0DynMapGCPageInlined(pVCpu,
     782            int rc = pgmRZDynMapGCPageInlined(pVCpu,
    788783                                              pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
    789                                               (void **)&pGuestPD);
     784                                              (void **)&pGuestPD
     785                                              RTLOG_COMMA_SRC_POS);
    790786            if (RT_SUCCESS(rc))
    791787                return pGuestPD->a[iPD];
     
    837833#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    838834    PX86PDPAE   pGuestPD = NULL;
    839     int rc = pgmR0DynMapGCPageInlined(pVCpu,
     835    int rc = pgmRZDynMapGCPageInlined(pVCpu,
    840836                                      pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
    841                                       (void **)&pGuestPD);
     837                                      (void **)&pGuestPD
     838                                      RTLOG_COMMA_SRC_POS);
    842839    if (RT_FAILURE(rc))
    843840    {
     
    868865{
    869866#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    870     int rc = pgmR0DynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPml4);
     867    int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPml4 RTLOG_COMMA_SRC_POS);
    871868    if (RT_FAILURE(rc))
    872869    {
     
    910907#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    911908    PX86PML4 pGuestPml4;
    912     int rc = pgmR0DynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPml4);
     909    int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPml4 RTLOG_COMMA_SRC_POS);
    913910    AssertRCReturn(rc, NULL);
    914911#else
  • trunk/src/VBox/VMM/PGMInternal.h

    r31206 r31402  
    234234 *                      this.
    235235 *
    236  * @remark  In RC this uses PGMDynMapHCPage(), so it will consume of the small
    237  *          page window employeed by that function. Be careful.
     236 * @remark  Use with care as we don't have so much dynamic mapping space in
     237 *          ring-0 on 32-bit darwin and in RC.
    238238 * @remark  There is no need to assert on the result.
    239239 */
    240 #ifdef IN_RC
     240#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
    241241# define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) \
    242      PGMDynMapHCPage(pVM, HCPhys, (void **)(ppv))
    243 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    244 # define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) \
    245      pgmR0DynMapHCPageInlined(pVCpu, HCPhys, (void **)(ppv))
     242     pgmRZDynMapHCPageInlined(pVCpu, HCPhys, (void **)(ppv) RTLOG_COMMA_SRC_POS)
    246243#else
    247244# define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) \
     
    258255 * @param   ppv     Where to store the virtual address. No need to cast this.
    259256 *
    260  * @remark  In GC this uses PGMGCDynMapGCPage(), so it will consume of the
    261  *          small page window employeed by that function. Be careful.
     257 * @remark  Use with care as we don't have so much dynamic mapping space in
     258 *          ring-0 on 32-bit darwin and in RC.
    262259 * @remark  There is no need to assert on the result.
    263260 */
    264 #ifdef IN_RC
     261#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
    265262# define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \
    266      PGMDynMapGCPage(pVM, GCPhys, (void **)(ppv))
    267 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    268 # define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \
    269      pgmR0DynMapGCPageV2Inlined(pVM, pVCpu, GCPhys, (void **)(ppv))
     263     pgmRZDynMapGCPageV2Inlined(pVM, pVCpu, GCPhys, (void **)(ppv) RTLOG_COMMA_SRC_POS)
    270264#else
    271265# define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \
     
    281275 * @param   ppv     Where to store the virtual address. No need to cast this.
    282276 *
    283  * @remark  In GC this uses PGMGCDynMapGCPage(), so it will consume of the
    284  *          small page window employeed by that function. Be careful.
     277 * @remark  Use with care as we don't have so much dynamic mapping space in
     278 *          ring-0 on 32-bit darwin and in RC.
    285279 * @remark  There is no need to assert on the result.
    286280 */
     
    295289 * @param   ppv     Where to store the virtual address. No need to cast this.
    296290 *
    297  * @remark  In RC this uses PGMGCDynMapGCPage(), so it will consume of the
    298  *          small page window employeed by that function. Be careful.
     291 * @remark  Use with care as we don't have so much dynamic mapping space in
     292 *          ring-0 on 32-bit darwin and in RC.
    299293 * @remark  There is no need to assert on the result.
    300294 */
     
    309303 * @param   ppv     Where to store the virtual address. No need to cast this.
    310304 *
    311  * @remark  In GC this uses PGMGCDynMapGCPage(), so it will consume of the
    312  *          small page window employeed by that function. Be careful.
     305 * @remark  Use with care as we don't have so much dynamic mapping space in
     306 *          ring-0 on 32-bit darwin and in RC.
    313307 * @remark  There is no need to assert on the result.
    314308 */
    315309#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    316310# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
    317      PGMDynMapGCPageOff(pVM, GCPhys, (void **)(ppv))
     311     pgmRZDynMapGCPageOffInlined(VMMGetCpu(pVM), GCPhys, (void **)(ppv) RTLOG_COMMA_SRC_POS)
    318312#else
    319313# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
    320314     PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
    321315#endif
     316
     317/** @def PGM_DYNMAP_UNUSED_HINT
     318 * Hints to the dynamic mapping code in RC and R0/darwin that the specified page
     319 * is no longer used.
     320 *
     321 * @param   pVCpu   The current CPU.
     322 * @param   pPage   The pool page.
     323 */
     324#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
     325# ifdef LOG_ENABLED
     326#  define PGM_DYNMAP_UNUSED_HINT(pVCpu, pvPage)  pgmRZDynMapUnusedHint(pVCpu, pvPage, RT_SRC_POS)
     327# else
     328#  define PGM_DYNMAP_UNUSED_HINT(pVCpu, pvPage)  pgmRZDynMapUnusedHint(pVCpu, pvPage)
     329# endif
     330#else
     331# define PGM_DYNMAP_UNUSED_HINT(pVCpu, pvPage)  do {} while (0)
     332#endif
     333
     334/** @def PGM_DYNMAP_UNUSED_HINT_VM
     335 * Hints to the dynamic mapping code in RC and R0/darwin that the specified page
     336 * is no longer used.
     337 *
     338 * @param   pVM     The VM handle.
     339 * @param   pPage   The pool page.
     340 */
     341#define PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvPage)  PGM_DYNMAP_UNUSED_HINT(VMMGetCpu(pVM), pvPage)
     342
    322343
    323344/** @def PGM_INVL_PG
     
    15491570
    15501571/**
     1572 * Raw-mode context dynamic mapping cache entry.
     1573 *
     1574 * Because of raw-mode context being reloctable and all relocations are applied
     1575 * in ring-3, this has to be defined here and be RC specfic.
     1576 *
     1577 * @sa PGMRZDYNMAPENTRY, PGMR0DYNMAPENTRY.
     1578 */
     1579typedef struct PGMRCDYNMAPENTRY
     1580{
     1581    /** The physical address of the currently mapped page.
     1582     * This is duplicate for three reasons: cache locality, cache policy of the PT
     1583     * mappings and sanity checks.   */
     1584    RTHCPHYS                    HCPhys;
     1585    /** Pointer to the page. */
     1586    RTRCPTR                     pvPage;
     1587    /** The number of references. */
     1588    int32_t volatile            cRefs;
     1589    /** PTE pointer union. */
     1590    union PGMRCDYNMAPENTRY_PPTE
     1591    {
     1592        /** PTE pointer, 32-bit legacy version. */
     1593        RCPTRTYPE(PX86PTE)      pLegacy;
     1594        /** PTE pointer, PAE version. */
     1595        RCPTRTYPE(PX86PTEPAE)   pPae;
     1596        /** PTE pointer, the void version. */
     1597        RTRCPTR                 pv;
     1598    } uPte;
     1599    /** Alignment padding. */
     1600    RTRCPTR                     RCPtrAlignment;
     1601} PGMRCDYNMAPENTRY;
     1602/** Pointer to a dynamic mapping cache entry for the raw-mode context. */
     1603typedef PGMRCDYNMAPENTRY *PPGMRCDYNMAPENTRY;
     1604
     1605
     1606/**
     1607 * Dynamic mapping cache for the raw-mode context.
     1608 *
     1609 * This is initialized during VMMRC init based upon the pbDynPageMapBaseGC and
     1610 * paDynPageMap* PGM members.  However, it has to be defined in PGMInternal.h
     1611 * so that we can perform relocations from PGMR3Relocate.  This has the
     1612 * consequence that we must have separate ring-0 and raw-mode context versions
     1613 * of this struct even if they share the basic elements.
     1614 *
     1615 * @sa PPGMRZDYNMAP, PGMR0DYNMAP.
     1616 */
     1617typedef struct PGMRCDYNMAP
     1618{
     1619    /** The usual magic number / eye catcher (PGMRZDYNMAP_MAGIC). */
     1620    uint32_t                        u32Magic;
     1621    /** Array for tracking and managing the pages.  */
     1622    RCPTRTYPE(PPGMRCDYNMAPENTRY)    paPages;
     1623    /** The cache size given as a number of pages. */
     1624    uint32_t                        cPages;
     1625    /** Whether it's 32-bit legacy or PAE/AMD64 paging mode. */
     1626    bool                            fLegacyMode;
     1627    /** The current load.
     1628     * This does not include guard pages. */
     1629    uint32_t                        cLoad;
     1630    /** The max load ever.
     1631     * This is maintained to get trigger adding of more mapping space. */
     1632    uint32_t                        cMaxLoad;
     1633    /** The number of guard pages. */
     1634    uint32_t                        cGuardPages;
     1635    /** The number of users (protected by hInitLock). */
     1636    uint32_t                        cUsers;
     1637} PGMRCDYNMAP;
     1638/** Pointer to the dynamic cache for the raw-mode context. */
     1639typedef PGMRCDYNMAP *PPGMRCDYNMAP;
     1640
     1641
     1642/**
    15511643 * Mapping cache usage set entry.
    15521644 *
    15531645 * @remarks 16-bit ints was choosen as the set is not expected to be used beyond
    15541646 *          the dynamic ring-0 and (to some extent) raw-mode context mapping
    1555  *          cache. If it's extended to include ring-3, well, then something will
    1556  *          have be changed here...
     1647 *          cache.  If it's extended to include ring-3, well, then something
     1648 *          will have be changed here...
    15571649 */
    15581650typedef struct PGMMAPSETENTRY
    15591651{
     1652    /** Pointer to the page. */
     1653#ifndef IN_RC
     1654    RTR0PTR                     pvPage;
     1655#else
     1656    RTRCPTR                     pvPage;
     1657# if HC_ARCH_BITS == 64
     1658    uint32_t                    u32Alignment2;
     1659# endif
     1660#endif
    15601661    /** The mapping cache index. */
    15611662    uint16_t                    iPage;
     
    15631664     * The max is UINT16_MAX - 1. */
    15641665    uint16_t                    cRefs;
    1565 #if HC_ARCH_BITS == 64
    1566     uint32_t                    alignment;
    1567 #endif
    1568     /** Pointer to the page. */
    1569     RTR0PTR                     pvPage;
     1666    /** The number inlined references.
     1667     * The max is UINT16_MAX - 1. */
     1668    uint16_t                    cInlinedRefs;
     1669    /** Unreferences.  */
     1670    uint16_t                    cUnrefs;
     1671
     1672#if HC_ARCH_BITS == 32
     1673    uint32_t                    u32Alignment1;
     1674#endif
    15701675    /** The physical address for this entry. */
    15711676    RTHCPHYS                    HCPhys;
    15721677} PGMMAPSETENTRY;
     1678AssertCompileMemberOffset(PGMMAPSETENTRY, iPage, RT_MAX(sizeof(RTR0PTR), sizeof(RTRCPTR)));
     1679AssertCompileMemberAlignment(PGMMAPSETENTRY, HCPhys, sizeof(RTHCPHYS));
    15731680/** Pointer to a mapping cache usage set entry. */
    15741681typedef PGMMAPSETENTRY *PPGMMAPSETENTRY;
     
    21502257 * @remark  There is no need to assert on the result.
    21512258 */
    2152 #if defined(IN_RC)
    2153 # define PGMPOOL_PAGE_2_PTR(pVM, pPage)  pgmPoolMapPageInlined((pVM), (pPage))
    2154 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    2155 # define PGMPOOL_PAGE_2_PTR(pVM, pPage)  pgmPoolMapPageInlined((pVM), (pPage))
     2259#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
     2260# define PGMPOOL_PAGE_2_PTR(pVM, pPage)  pgmPoolMapPageInlined((pVM), (pPage) RTLOG_COMMA_SRC_POS)
    21562261#elif defined(VBOX_STRICT)
    21572262# define PGMPOOL_PAGE_2_PTR(pVM, pPage)  pgmPoolMapPageStrict(pPage)
     
    21782283 * @remark  There is no need to assert on the result.
    21792284 */
    2180 #if defined(IN_RC)
    2181 # define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage)   pgmPoolMapPageV2Inlined((pVM), (pVCpu), (pPage))
    2182 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    2183 # define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage)   pgmPoolMapPageV2Inlined((pVM), (pVCpu), (pPage))
     2285#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
     2286# define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage)   pgmPoolMapPageV2Inlined((pVM), (pVCpu), (pPage) RTLOG_COMMA_SRC_POS)
    21842287#else
    21852288# define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage)   PGMPOOL_PAGE_2_PTR((pVM), (pPage))
     
    26232726
    26242727    /* RC only: */
    2625     STAMCOUNTER StatRCDynMapCacheMisses;            /**< RC: The number of dynamic page mapping cache misses */
    2626     STAMCOUNTER StatRCDynMapCacheHits;              /**< RC: The number of dynamic page mapping cache hits */
    26272728    STAMCOUNTER StatRCInvlPgConflict;               /**< RC: Number of times PGMInvalidatePage() detected a mapping conflict. */
    26282729    STAMCOUNTER StatRCInvlPgSyncMonCR3;             /**< RC: Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3. */
     
    28462947    /** Base address of the dynamic page mapping area.
    28472948     * The array is MM_HYPER_DYNAMIC_SIZE bytes big.
     2949     *
     2950     * @todo The plan of keeping PGMRCDYNMAP private to PGMRZDynMap.cpp didn't
     2951     *       work out.  Some cleaning up of the initialization that would
     2952     *       remove this memory is yet to be done...
    28482953     */
    28492954    RCPTRTYPE(uint8_t *)            pbDynPageMapBaseGC;
    2850     /** The index of the last entry used in the dynamic page mapping area. */
    2851     RTUINT                          iDynPageMapLast;
    2852     /** Cache containing the last entries in the dynamic page mapping area.
    2853      * The cache size is covering half of the mapping area. */
    2854     RTHCPHYS                        aHCPhysDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT + 1)];
    2855     /** Keep a lock counter for the full (!) mapping area. */
    2856     uint32_t                        aLockedDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)];
    2857 
     2955    /** The address of the raw-mode context mapping cache. */
     2956    RCPTRTYPE(PPGMRCDYNMAP)         pRCDynMap;
    28582957    /** The address of the ring-0 mapping cache if we're making use of it.  */
    28592958    RTR0PTR                         pvR0DynMapUsed;
     
    30523151AssertCompileMemberAlignment(PGM, GCPtrMappingFixed, sizeof(RTGCPTR));
    30533152AssertCompileMemberAlignment(PGM, HCPhysInterPD, 8);
    3054 AssertCompileMemberAlignment(PGM, aHCPhysDynPageMapCache, 8);
    30553153AssertCompileMemberAlignment(PGM, CritSect, 8);
    30563154AssertCompileMemberAlignment(PGM, ChunkR3Map, 8);
     
    30723170
    30733171    /* R0 only: */
    3074     STAMCOUNTER StatR0DynMapMigrateInvlPg;          /**< R0: invlpg in PGMDynMapMigrateAutoSet. */
    3075     STAMPROFILE StatR0DynMapGCPageInl;              /**< R0: Calls to pgmR0DynMapGCPageInlined. */
    3076     STAMCOUNTER StatR0DynMapGCPageInlHits;          /**< R0: Hash table lookup hits. */
    3077     STAMCOUNTER StatR0DynMapGCPageInlMisses;        /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
    3078     STAMCOUNTER StatR0DynMapGCPageInlRamHits;       /**< R0: 1st ram range hits. */
    3079     STAMCOUNTER StatR0DynMapGCPageInlRamMisses;     /**< R0: 1st ram range misses, takes slow path. */
    3080     STAMPROFILE StatR0DynMapHCPageInl;              /**< R0: Calls to pgmR0DynMapHCPageInlined. */
    3081     STAMCOUNTER StatR0DynMapHCPageInlHits;          /**< R0: Hash table lookup hits. */
    3082     STAMCOUNTER StatR0DynMapHCPageInlMisses;        /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
    3083     STAMPROFILE StatR0DynMapHCPage;                 /**< R0: Calls to PGMDynMapHCPage. */
    3084     STAMCOUNTER StatR0DynMapSetOptimize;            /**< R0: Calls to pgmDynMapOptimizeAutoSet. */
    3085     STAMCOUNTER StatR0DynMapSetSearchFlushes;       /**< R0: Set search restorting to subset flushes. */
    3086     STAMCOUNTER StatR0DynMapSetSearchHits;          /**< R0: Set search hits. */
    3087     STAMCOUNTER StatR0DynMapSetSearchMisses;        /**< R0: Set search misses. */
    3088     STAMCOUNTER StatR0DynMapPage;                   /**< R0: Calls to pgmR0DynMapPage. */
    3089     STAMCOUNTER StatR0DynMapPageHits0;              /**< R0: Hits at iPage+0. */
    3090     STAMCOUNTER StatR0DynMapPageHits1;              /**< R0: Hits at iPage+1. */
    3091     STAMCOUNTER StatR0DynMapPageHits2;              /**< R0: Hits at iPage+2. */
    3092     STAMCOUNTER StatR0DynMapPageInvlPg;             /**< R0: invlpg. */
    3093     STAMCOUNTER StatR0DynMapPageSlow;               /**< R0: Calls to pgmR0DynMapPageSlow. */
    3094     STAMCOUNTER StatR0DynMapPageSlowLoopHits;       /**< R0: Hits in the pgmR0DynMapPageSlow search loop. */
    3095     STAMCOUNTER StatR0DynMapPageSlowLoopMisses;     /**< R0: Misses in the pgmR0DynMapPageSlow search loop. */
    3096     //STAMCOUNTER StatR0DynMapPageSlowLostHits;       /**< R0: Lost hits. */
    3097     STAMCOUNTER StatR0DynMapSubsets;                /**< R0: Times PGMDynMapPushAutoSubset was called. */
    3098     STAMCOUNTER StatR0DynMapPopFlushes;             /**< R0: Times PGMDynMapPopAutoSubset flushes the subset. */
    3099     STAMCOUNTER aStatR0DynMapSetSize[11];           /**< R0: Set size distribution. */
    31003172
    31013173    /* RZ only: */
     
    31483220    STAMCOUNTER StatRZGuestROMWriteHandled;         /**< RC/R0: The number of times pgmPhysRomWriteHandler() was successfully called. */
    31493221    STAMCOUNTER StatRZGuestROMWriteUnhandled;       /**< RC/R0: The number of times pgmPhysRomWriteHandler() was called and we had to fall back to the recompiler */
     3222    STAMCOUNTER StatRZDynMapMigrateInvlPg;          /**< RZ: invlpg in PGMR0DynMapMigrateAutoSet. */
     3223    STAMPROFILE StatRZDynMapGCPageInl;              /**< RZ: Calls to pgmRZDynMapGCPageInlined. */
     3224    STAMCOUNTER StatRZDynMapGCPageInlHits;          /**< RZ: Hash table lookup hits. */
     3225    STAMCOUNTER StatRZDynMapGCPageInlMisses;        /**< RZ: Misses that falls back to the code common. */
     3226    STAMCOUNTER StatRZDynMapGCPageInlRamHits;       /**< RZ: 1st ram range hits. */
     3227    STAMCOUNTER StatRZDynMapGCPageInlRamMisses;     /**< RZ: 1st ram range misses, takes slow path. */
     3228    STAMPROFILE StatRZDynMapHCPageInl;              /**< RZ: Calls to pgmRZDynMapHCPageInlined. */
     3229    STAMCOUNTER StatRZDynMapHCPageInlHits;          /**< RZ: Hash table lookup hits. */
     3230    STAMCOUNTER StatRZDynMapHCPageInlMisses;        /**< RZ: Misses that falls back to the code common. */
     3231    STAMPROFILE StatRZDynMapHCPage;                 /**< RZ: Calls to pgmRZDynMapHCPageCommon. */
     3232    STAMCOUNTER StatRZDynMapSetOptimize;            /**< RZ: Calls to pgmRZDynMapOptimizeAutoSet. */
     3233    STAMCOUNTER StatRZDynMapSetSearchFlushes;       /**< RZ: Set search restorting to subset flushes. */
     3234    STAMCOUNTER StatRZDynMapSetSearchHits;          /**< RZ: Set search hits. */
     3235    STAMCOUNTER StatRZDynMapSetSearchMisses;        /**< RZ: Set search misses. */
     3236    STAMCOUNTER StatRZDynMapPage;                   /**< RZ: Calls to pgmR0DynMapPage. */
     3237    STAMCOUNTER StatRZDynMapPageHits0;              /**< RZ: Hits at iPage+0. */
     3238    STAMCOUNTER StatRZDynMapPageHits1;              /**< RZ: Hits at iPage+1. */
     3239    STAMCOUNTER StatRZDynMapPageHits2;              /**< RZ: Hits at iPage+2. */
     3240    STAMCOUNTER StatRZDynMapPageInvlPg;             /**< RZ: invlpg. */
     3241    STAMCOUNTER StatRZDynMapPageSlow;               /**< RZ: Calls to pgmR0DynMapPageSlow. */
     3242    STAMCOUNTER StatRZDynMapPageSlowLoopHits;       /**< RZ: Hits in the pgmR0DynMapPageSlow search loop. */
     3243    STAMCOUNTER StatRZDynMapPageSlowLoopMisses;     /**< RZ: Misses in the pgmR0DynMapPageSlow search loop. */
     3244    //STAMCOUNTER StatRZDynMapPageSlowLostHits;       /**< RZ: Lost hits. */
     3245    STAMCOUNTER StatRZDynMapSubsets;                /**< RZ: Times PGMDynMapPushAutoSubset was called. */
     3246    STAMCOUNTER StatRZDynMapPopFlushes;             /**< RZ: Times PGMDynMapPopAutoSubset flushes the subset. */
     3247    STAMCOUNTER aStatRZDynMapSetFilledPct[11];      /**< RZ: Set fill distribution, percent. */
    31503248
    31513249    /* HC - R3 and (maybe) R0: */
     
    32703368{
    32713369    /** Offset to the VM structure. */
    3272     RTINT                           offVM;
     3370    int32_t                         offVM;
    32733371    /** Offset to the VMCPU structure. */
    3274     RTINT                           offVCpu;
     3372    int32_t                         offVCpu;
    32753373    /** Offset of the PGM structure relative to VMCPU. */
    3276     RTINT                           offPGM;
    3277     RTINT                           uPadding0;      /**< structure size alignment. */
    3278 
    3279 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     3374    int32_t                         offPGM;
     3375    uint32_t                        uPadding0;      /**< structure size alignment. */
     3376
     3377#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE) || defined(VBOX_WITH_RAW_MODE)
    32803378    /** Automatically tracked physical memory mapping set.
    32813379     * Ring-0 and strict raw-mode builds. */
     
    35933691
    35943692#endif /* IN_RING3 */
    3595 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    3596 int             pgmR0DynMapHCPageCommon(PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv);
     3693#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || IN_RC
     3694int             pgmRZDynMapHCPageCommon(PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL);
     3695int             pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL);
     3696# ifdef LOG_ENABLED
     3697void            pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint, RT_SRC_POS_DECL);
     3698# else
     3699void            pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint);
     3700# endif
    35973701#endif
    35983702int             pgmPoolAllocEx(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage, bool fLockPage = false);
  • trunk/src/VBox/VMM/VMMAll/MMAllPagePool.cpp

    r28800 r31402  
    3939
    4040
    41 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     41#if !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(IN_RC)
    4242
    4343/**
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r31170 r31402  
    936936        PGMPOOLKIND enmKind;
    937937
    938 # if defined(IN_RC)
    939         /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    940         PGMDynLockHCPage(pVM, (uint8_t *)pPdpe);
    941 # endif
    942 
    943938        if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
    944939        {
     
    990985         */
    991986        ASMReloadCR3();
    992         PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);
    993987# endif
     988        PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
    994989    }
    995990    else
     
    15241519#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    15251520#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    1526 
    15271521/**
    15281522 * Performs the lazy mapping of the 32-bit guest PD.
     
    15631557    return rc;
    15641558}
    1565 
    15661559#endif
    15671560
     
    22722265#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    22732266
    2274 /** Common worker for PGMDynMapGCPage and PGMDynMapGCPageOff. */
    2275 DECLINLINE(int) pgmDynMapGCPageInternal(PVM pVM, RTGCPHYS GCPhys, void **ppv)
     2267/**
     2268 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined.
     2269 *
     2270 * @returns VBox status code.
     2271 * @param   pVM         The VM handle.
     2272 * @param   pVCpu       The current CPU.
     2273 * @param   GCPhys      The guest physical address of the page to map.  The
     2274 *                      offset bits are not ignored.
     2275 * @param   ppv         Where to return the address corresponding to @a GCPhys.
     2276 */
     2277int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
    22762278{
    22772279    pgmLock(pVM);
    22782280
    22792281    /*
    2280      * Convert it to a writable page and it on to PGMDynMapHCPage.
     2282     * Convert it to a writable page and it on to the dynamic mapper.
    22812283     */
    22822284    int rc;
     
    22872289        if (RT_SUCCESS(rc))
    22882290        {
    2289             //Log(("PGMDynMapGCPage: GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
    2290 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    2291             rc = pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), PGM_PAGE_GET_HCPHYS(pPage), ppv);
    2292 #else
    2293             rc = PGMDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), ppv);
    2294 #endif
     2291            void *pv;
     2292            rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS);
     2293            if (RT_SUCCESS(rc))
     2294                *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK));
    22952295        }
    22962296        else
     
    23072307}
    23082308
    2309 /**
    2310  * Temporarily maps one guest page specified by GC physical address.
    2311  * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
    2312  *
    2313  * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
    2314  * reused after 8 mappings (or perhaps a few more if you score with the cache).
    2315  *
    2316  * @returns VBox status.
    2317  * @param   pVM         VM handle.
    2318  * @param   GCPhys      GC Physical address of the page.
    2319  * @param   ppv         Where to store the address of the mapping.
    2320  */
    2321 VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
    2322 {
    2323     AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
    2324     return pgmDynMapGCPageInternal(pVM, GCPhys, ppv);
    2325 }
    2326 
    2327 
    2328 /**
    2329  * Temporarily maps one guest page specified by unaligned GC physical address.
    2330  * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
    2331  *
    2332  * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
    2333  * reused after 8 mappings (or perhaps a few more if you score with the cache).
    2334  *
    2335  * The caller is aware that only the speicifed page is mapped and that really bad things
    2336  * will happen if writing beyond the page!
    2337  *
    2338  * @returns VBox status.
    2339  * @param   pVM         VM handle.
    2340  * @param   GCPhys      GC Physical address within the page to be mapped.
    2341  * @param   ppv         Where to store the address of the mapping address corresponding to GCPhys.
    2342  */
    2343 VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
    2344 {
    2345     void *pv;
    2346     int rc = pgmDynMapGCPageInternal(pVM, GCPhys, &pv);
    2347     if (RT_SUCCESS(rc))
    2348     {
    2349         *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
    2350         return VINF_SUCCESS;
    2351     }
    2352     return rc;
    2353 }
    2354 
    2355 # ifdef IN_RC
    2356 
    2357 /**
    2358  * Temporarily maps one host page specified by HC physical address.
    2359  *
    2360  * Be WARNED that the dynamic page mapping area is small, 16 pages, thus the space is
    2361  * reused after 16 mappings (or perhaps a few more if you score with the cache).
    2362  *
    2363  * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
    2364  * @param   pVM         VM handle.
    2365  * @param   HCPhys      HC Physical address of the page.
    2366  * @param   ppv         Where to store the address of the mapping. This is the
    2367  *                      address of the PAGE not the exact address corresponding
    2368  *                      to HCPhys. Use PGMDynMapHCPageOff if you care for the
    2369  *                      page offset.
    2370  */
    2371 VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
    2372 {
    2373     AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
    2374 
    2375     /*
    2376      * Check the cache.
    2377      */
    2378     register unsigned iCache;
    2379     for (iCache = 0;iCache < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache);iCache++)
    2380     {
    2381         static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
    2382         {
    2383             { 0,  9, 10, 11, 12, 13, 14, 15},
    2384             { 0,  1, 10, 11, 12, 13, 14, 15},
    2385             { 0,  1,  2, 11, 12, 13, 14, 15},
    2386             { 0,  1,  2,  3, 12, 13, 14, 15},
    2387             { 0,  1,  2,  3,  4, 13, 14, 15},
    2388             { 0,  1,  2,  3,  4,  5, 14, 15},
    2389             { 0,  1,  2,  3,  4,  5,  6, 15},
    2390             { 0,  1,  2,  3,  4,  5,  6,  7},
    2391             { 8,  1,  2,  3,  4,  5,  6,  7},
    2392             { 8,  9,  2,  3,  4,  5,  6,  7},
    2393             { 8,  9, 10,  3,  4,  5,  6,  7},
    2394             { 8,  9, 10, 11,  4,  5,  6,  7},
    2395             { 8,  9, 10, 11, 12,  5,  6,  7},
    2396             { 8,  9, 10, 11, 12, 13,  6,  7},
    2397             { 8,  9, 10, 11, 12, 13, 14,  7},
    2398             { 8,  9, 10, 11, 12, 13, 14, 15},
    2399         };
    2400         AssertCompile(RT_ELEMENTS(au8Trans) == 16);
    2401         AssertCompile(RT_ELEMENTS(au8Trans[0]) == 8);
    2402 
    2403         if (pVM->pgm.s.aHCPhysDynPageMapCache[iCache] == HCPhys)
    2404         {
    2405             int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
    2406 
    2407             /* The cache can get out of sync with locked entries. (10 locked, 2 overwrites its cache position, last = 11, lookup 2 -> page 10 instead of 2) */
    2408             if ((pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & X86_PTE_PG_MASK) == HCPhys)
    2409             {
    2410                 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
    2411                 *ppv = pv;
    2412                 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCDynMapCacheHits);
    2413                 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
    2414                 return VINF_SUCCESS;
    2415             }
    2416             LogFlow(("Out of sync entry %d\n", iPage));
    2417         }
    2418     }
    2419     AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 8);
    2420     AssertCompile((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 16);
    2421     STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCDynMapCacheMisses);
    2422 
    2423     /*
    2424      * Update the page tables.
    2425      */
    2426     unsigned iPage = pVM->pgm.s.iDynPageMapLast;
    2427     unsigned i;
    2428     for (i = 0; i < (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT); i++)
    2429     {
    2430         pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
    2431         if (!pVM->pgm.s.aLockedDynPageMapCache[iPage])
    2432             break;
    2433         iPage++;
    2434     }
    2435     AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT));
    2436 
    2437     pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
    2438     pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
    2439     pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u   =           HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
    2440     pVM->pgm.s.aLockedDynPageMapCache[iPage]    = 0;
    2441 
    2442     void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
    2443     *ppv = pv;
    2444     ASMInvalidatePage(pv);
    2445     Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));
    2446     return VINF_SUCCESS;
    2447 }
    2448 
    2449 
    2450 /**
    2451  * Temporarily lock a dynamic page to prevent it from being reused.
    2452  *
    2453  * @param   pVM         VM handle.
    2454  * @param   GCPage      GC address of page
    2455  */
    2456 VMMDECL(void) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
    2457 {
    2458     unsigned iPage;
    2459 
    2460     Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
    2461     iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
    2462     ASMAtomicIncU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
    2463     Log4(("PGMDynLockHCPage %RRv iPage=%d\n", GCPage, iPage));
    2464 }
    2465 
    2466 
    2467 /**
    2468  * Unlock a dynamic page
    2469  *
    2470  * @param   pVM         VM handle.
    2471  * @param   GCPage      GC address of page
    2472  */
    2473 VMMDECL(void) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
    2474 {
    2475     unsigned iPage;
    2476 
    2477     AssertCompile(RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache) == 2* RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache));
    2478     AssertCompileMemberSize(VM, pgm.s.aLockedDynPageMapCache, sizeof(uint32_t) * (MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)));
    2479 
    2480     Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
    2481     iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
    2482     Assert(pVM->pgm.s.aLockedDynPageMapCache[iPage]);
    2483     ASMAtomicDecU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
    2484     Log4(("PGMDynUnlockHCPage %RRv iPage=%d\n", GCPage, iPage));
    2485 }
    2486 
    2487 
    2488 #  ifdef VBOX_STRICT
    2489 /**
    2490  * Check for lock leaks.
    2491  *
    2492  * @param   pVM         VM handle.
    2493  */
    2494 VMMDECL(void) PGMDynCheckLocks(PVM pVM)
    2495 {
    2496     for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache); i++)
    2497         Assert(!pVM->pgm.s.aLockedDynPageMapCache[i]);
    2498 }
    2499 #  endif /* VBOX_STRICT */
    2500 
    2501 # endif /* IN_RC */
    25022309#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    2503 
    25042310#if !defined(IN_R0) || defined(LOG_ENABLED)
    25052311
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r31207 r31402  
    383383    *pfLockTaken = false;
    384384
    385 # if defined(IN_RC) && defined(VBOX_STRICT)
    386     PGMDynCheckLocks(pVM);
    387 # endif
    388 
    389385# if  (   PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT \
    390386       || PGM_GST_TYPE == PGM_TYPE_PAE   || PGM_GST_TYPE == PGM_TYPE_AMD64) \
     
    433429    if (uErr & X86_TRAP_PF_RSVD)
    434430    {
     431/** @todo This is not complete code. take locks */
    435432        Assert(uErr & X86_TRAP_PF_P);
    436433        PPGMPAGE pPage;
     
    563560            return VINF_SUCCESS;
    564561        }
    565 #ifndef IN_RC
    566562        AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u));
    567563        AssertMsg(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u));
    568 #else
    569         /* Ugly hack, proper fix is comming up later. */
    570         if (   !(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u)
    571             || !(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u) )
    572         {
    573             rc = PGM_GST_NAME(Walk)(pVCpu, pvFault, &GstWalk);
    574             if (RT_FAILURE_NP(rc))
    575                 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, &GstWalk, uErr));
    576         }
    577 #endif
    578564    }
    579565
     
    11481134    }
    11491135
    1150 # if defined(IN_RC)
    1151     /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    1152     PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
    1153 # endif
    1154 
    11551136    /*
    11561137     * Get the guest PD entry and calc big page.
     
    12951276                    LogFlow(("Skipping flush for big page containing %RGv (PD=%X .u=%RX64)-> nothing has changed!\n", GCPtrPage, iPDSrc, PdeSrc.u));
    12961277                    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage4MBPagesSkip));
    1297 # if defined(IN_RC)
    1298                     /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    1299                     PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    1300 # endif
     1278                    PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    13011279                    return VINF_SUCCESS;
    13021280                }
     
    13351313        }
    13361314    }
    1337 # if defined(IN_RC)
    1338     /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    1339     PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    1340 # endif
     1315    PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    13411316    return rc;
    13421317
     
    17851760    PPGMPOOLPAGE    pShwPde  = pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK);
    17861761    Assert(pShwPde);
    1787 # endif
    1788 
    1789 # if defined(IN_RC)
    1790     /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    1791     PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
    17921762# endif
    17931763
     
    20211991                }
    20221992            }
    2023 # if defined(IN_RC)
    2024             /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    2025             PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    2026 # endif
     1993            PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    20271994            return VINF_SUCCESS;
    20281995        }
     
    20502017    ASMAtomicWriteSize(pPdeDst, 0);
    20512018
    2052 # if defined(IN_RC)
    2053     /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    2054     PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    2055 # endif
     2019    PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    20562020    PGM_INVL_VCPU_TLBS(pVCpu);
    20572021    return VINF_PGM_SYNCPAGE_MODIFIED_PDE;
     
    25642528    Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/
    25652529
    2566 # if defined(IN_RC)
    2567     /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    2568     PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
    2569 # endif
    2570 
    25712530    /*
    25722531     * Sync page directory entry.
     
    26462605            }
    26472606            ASMAtomicWriteSize(pPdeDst, PdeDst.u);
    2648 # if defined(IN_RC)
    2649             PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    2650 # endif
     2607            PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    26512608            return VINF_SUCCESS;
    26522609        }
     
    26542611        {
    26552612            VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    2656 # if defined(IN_RC)
    2657             PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    2658 # endif
     2613            PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    26592614            return VINF_PGM_SYNC_CR3;
    26602615        }
     
    26872642                         | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
    26882643                ASMAtomicWriteSize(pPdeDst, PdeDst.u);
    2689 # if defined(IN_RC)
    2690                 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    2691 # endif
     2644                PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    26922645
    26932646                /*
     
    27682721
    27692722            /**
    2770              * @todo It might be more efficient to sync only a part of the 4MB page (similar to what we do for 4kb PDs).
     2723             * @todo It might be more efficient to sync only a part of the 4MB
     2724             *       page (similar to what we do for 4KB PDs).
    27712725             */
    27722726
     
    27952749            }
    27962750            ASMAtomicWriteSize(pPdeDst, PdeDst.u);
    2797 # if defined(IN_RC)
    2798             PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    2799 # endif
     2751            PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    28002752
    28012753            /*
     
    33913343# endif
    33923344
    3393 # if defined(IN_RC)
    3394     /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    3395     PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
    3396 # endif
    3397 
    33983345    if (!pPdeDst->n.u1Present)
    33993346    {
     
    34013348        if (rc != VINF_SUCCESS)
    34023349        {
    3403 # if defined(IN_RC)
    3404             /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    3405             PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    3406 # endif
     3350            PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    34073351            pgmUnlock(pVM);
    34083352            AssertRC(rc);
     
    34493393        }
    34503394    }
    3451 # if defined(IN_RC)
    3452     /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    3453     PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    3454 # endif
     3395    PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    34553396    pgmUnlock(pVM);
    34563397    return rc;
     
    43594300    AssertReturn(pPageCR3, VERR_INTERNAL_ERROR_2);
    43604301    HCPhysGuestCR3 = PGM_PAGE_GET_HCPHYS(pPageCR3);
    4361     /** @todo this needs some reworking wrt. locking.  */
     4302    /** @todo this needs some reworking wrt. locking?  */
    43624303# if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    43634304    HCPtrGuestCR3 = NIL_RTHCPTR;
  • trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp

    r31174 r31402  
    247247                PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(pVCpu);
    248248                AssertFatal(pShw32BitPd);
    249 #ifdef IN_RC    /* Lock mapping to prevent it from being reused during pgmPoolFree. */
    250                 PGMDynLockHCPage(pVM, (uint8_t *)pShw32BitPd);
    251 #endif
     249
    252250                /* Free any previous user, unless it's us. */
    253251                Assert(   (pShw32BitPd->a[iNewPDE].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
     
    260258                pShw32BitPd->a[iNewPDE].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US
    261259                                          | (uint32_t)pMap->aPTs[i].HCPhysPT;
    262 #ifdef IN_RC
    263                 /* Unlock dynamic mappings again. */
    264                 PGMDynUnlockHCPage(pVM, (uint8_t *)pShw32BitPd);
    265 #endif
     260                PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShw32BitPd);
    266261                break;
    267262            }
     
    274269                PX86PDPT        pShwPdpt  = pgmShwGetPaePDPTPtr(pVCpu);
    275270                Assert(pShwPdpt);
    276 #ifdef IN_RC    /* Lock mapping to prevent it from being reused during pgmShwSyncPaePDPtr. */
    277                 PGMDynLockHCPage(pVM, (uint8_t *)pShwPdpt);
    278 #endif
    279271
    280272                /*
     
    302294                }
    303295                Assert(pShwPaePd);
    304 #ifdef IN_RC    /* Lock mapping to prevent it from being reused during pgmPoolFree. */
    305                 PGMDynLockHCPage(pVM, (uint8_t *)pShwPaePd);
    306 #endif
    307296
    308297                /*
     
    357346                pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
    358347
    359 #ifdef IN_RC
    360                 /* Unlock dynamic mappings again. */
    361                 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPaePd);
    362                 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPdpt);
    363 #endif
     348                PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShwPaePd);
     349                PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShwPdpt);
    364350                break;
    365351            }
     
    406392    if (    PGMGetGuestMode(pVCpu) >= PGMMODE_PAE
    407393        &&  pShwPageCR3 != pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
    408     {
    409394        pCurrentShwPdpt = pgmShwGetPaePDPTPtr(pVCpu);
    410 #ifdef IN_RC    /* Lock mapping to prevent it from being reused (currently not possible). */
    411         if (pCurrentShwPdpt)
    412             PGMDynLockHCPage(pVM, (uint8_t *)pCurrentShwPdpt);
    413 #endif
    414     }
    415395
    416396    unsigned i = pMap->cPTs;
     
    503483        }
    504484    }
    505 #ifdef IN_RC
    506     /* Unlock dynamic mappings again. */
    507     if (pCurrentShwPdpt)
    508         PGMDynUnlockHCPage(pVM, (uint8_t *)pCurrentShwPdpt);
    509 #endif
     485
     486    PGM_DYNMAP_UNUSED_HINT_VM(pVM, pCurrentShwPdpt);
    510487}
    511488#endif /* !IN_RING0 */
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r31208 r31402  
    738738    AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
    739739
    740 #ifdef IN_RC
     740#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    741741    /*
    742742     * Map it by HCPhys.
    743743     */
    744     return PGMDynMapHCPage(pVM, HCPhys, ppv);
    745 
    746 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    747     /*
    748      * Map it by HCPhys.
    749      */
    750     return pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv);
     744    return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv  RTLOG_COMMA_SRC_POS);
    751745
    752746#else
     
    824818    RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
    825819    Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
    826 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    827     pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv);
    828 # else
    829     PGMDynMapHCPage(pVM, HCPhys, ppv);
    830 # endif
     820    pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
    831821    return VINF_SUCCESS;
    832822
     
    11381128     */
    11391129#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    1140     *ppv = pgmDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK));
     1130    *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS);
    11411131#else
    11421132    PPGMPAGEMAPTLBE pTlbe;
     
    11761166     */
    11771167#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    1178     *ppv = pgmDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
     1168    *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
    11791169#else
    11801170    PPGMPAGEMAPTLBE pTlbe;
     
    12341224        if (RT_SUCCESS(rc))
    12351225        {
    1236             *ppv = pgmDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
     1226            *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
    12371227# if 0
    12381228            pLock->pvMap = 0;
     
    13451335        else
    13461336        {
    1347             *ppv = pgmDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
     1337            *ppv = pgmRZDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK) RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
    13481338# if 0
    13491339            pLock->pvMap = 0;
     
    14931483    pLock->u32Dummy = 0;
    14941484
    1495 #else   /* IN_RING3 */
     1485#else
    14961486    PPGMPAGEMAP pMap       = (PPGMPAGEMAP)pLock->pvMap;
    14971487    PPGMPAGE    pPage      = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r31170 r31402  
    8888}
    8989
    90 /** @def PGMPOOL_PAGE_2_LOCKED_PTR
    91  * Maps a pool page pool into the current context and lock it (RC only).
    92  *
    93  * @returns VBox status code.
    94  * @param   pVM     The VM handle.
    95  * @param   pPage   The pool page.
    96  *
    97  * @remark  In RC this uses PGMGCDynMapHCPage(), so it will consume of the
    98  *          small page window employeed by that function. Be careful.
    99  * @remark  There is no need to assert on the result.
    100  */
    101 #if defined(IN_RC)
    102 DECLINLINE(void *) PGMPOOL_PAGE_2_LOCKED_PTR(PVM pVM, PPGMPOOLPAGE pPage)
    103 {
    104     void *pv = pgmPoolMapPageInlined(pVM, pPage);
    105 
    106     /* Make sure the dynamic mapping will not be reused. */
    107     if (pv)
    108         PGMDynLockHCPage(pVM, (uint8_t *)pv);
    109 
    110     return pv;
    111 }
    112 #else
    113 # define PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage)  PGMPOOL_PAGE_2_PTR(pVM, pPage)
    114 #endif
    115 
    116 /** @def PGMPOOL_UNLOCK_PTR
    117  * Unlock a previously locked dynamic caching (RC only).
    118  *
    119  * @returns VBox status code.
    120  * @param   pVM     The VM handle.
    121  * @param   pPage   The pool page.
    122  *
    123  * @remark  In RC this uses PGMGCDynMapHCPage(), so it will consume of the
    124  *          small page window employeed by that function. Be careful.
    125  * @remark  There is no need to assert on the result.
    126  */
    127 #if defined(IN_RC)
    128 DECLINLINE(void) PGMPOOL_UNLOCK_PTR(PVM pVM, void *pvPage)
    129 {
    130     if (pvPage)
    131         PGMDynUnlockHCPage(pVM, (uint8_t *)pvPage);
    132 }
    133 #else
    134 # define PGMPOOL_UNLOCK_PTR(pVM, pPage)  do {} while (0)
    135 #endif
    136 
    13790
    13891/**
     
    247200            {
    248201                STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT));
    249                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     202                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    250203                const unsigned iShw = off / sizeof(X86PTE);
    251204                LogFlow(("PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT iShw=%x\n", iShw));
     
    270223            {
    271224                STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT));
    272                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     225                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    273226                if (!((off ^ pPage->GCPhys) & (PAGE_SIZE / 2)))
    274227                {
     
    300253                unsigned iShwPdpt = iGst / 256;
    301254                unsigned iShw     = (iGst % 256) * 2;
    302                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     255                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    303256
    304257                LogFlow(("pgmPoolMonitorChainChanging PAE for 32 bits: iGst=%x iShw=%x idx = %d page idx=%d\n", iGst, iShw, iShwPdpt, pPage->enmKind - PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD));
     
    363316            case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
    364317            {
    365                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     318                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    366319                const unsigned iShw = off / sizeof(X86PTEPAE);
    367320                STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT));
     
    409362            case PGMPOOLKIND_32BIT_PD:
    410363            {
    411                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     364                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    412365                const unsigned iShw = off / sizeof(X86PTE);         // ASSUMING 32-bit guest paging!
    413366
     
    489442            case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
    490443            {
    491                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     444                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    492445                const unsigned iShw = off / sizeof(X86PDEPAE);
    493446                STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD));
     
    566519                const unsigned offPdpt = GCPhysFault - pPage->GCPhys;
    567520
    568                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     521                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    569522                const unsigned iShw = offPdpt / sizeof(X86PDPE);
    570523                if (iShw < X86_PG_PAE_PDPE_ENTRIES)          /* don't use RT_ELEMENTS(uShw.pPDPT->a), because that's for long mode only */
     
    633586            {
    634587                STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD));
    635                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     588                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    636589                const unsigned iShw = off / sizeof(X86PDEPAE);
    637590                Assert(!(uShw.pPDPae->a[iShw].u & PGM_PDFLAGS_MAPPING));
     
    673626                 * - messing with the bits of pd pointers without changing the physical address
    674627                 */
    675                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     628                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    676629                const unsigned iShw = off / sizeof(X86PDPE);
    677630                if (uShw.pPDPT->a[iShw].n.u1Present)
     
    703656                 * - messing with the bits of pd pointers without changing the physical address
    704657                 */
    705                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     658                uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    706659                const unsigned iShw = off / sizeof(X86PDPE);
    707660                if (uShw.pPML4->a[iShw].n.u1Present)
     
    730683                AssertFatalMsgFailed(("enmKind=%d\n", pPage->enmKind));
    731684        }
    732         PGMPOOL_UNLOCK_PTR(pVM, uShw.pv);
     685        PGM_DYNMAP_UNUSED_HINT_VM(pVM, uShw.pv);
    733686
    734687        /* next */
     
    960913    while (pRegFrame->rcx)
    961914    {
    962 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    963         uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
     915#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
     916        uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
    964917        pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, uIncrement);
    965         PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
     918        PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
    966919#else
    967920        pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, uIncrement);
     
    1012965     * Clear all the pages. ASSUMES that pvFault is readable.
    1013966     */
    1014 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    1015     uint32_t    iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
     967#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
     968    uint32_t    iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
    1016969    pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, DISGetParamSize(pDis, &pDis->param1));
    1017     PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
     970    PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
    1018971#else
    1019972    pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, DISGetParamSize(pDis, &pDis->param1));
     
    11131066    if (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
    11141067    {
    1115         void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);
     1068        void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
    11161069        void *pvGst;
    11171070        int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
     
    14211374                    if (pTempPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
    14221375                    {
    1423                         PX86PTPAE pShwPT2 = (PX86PTPAE)PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pTempPage);
     1376                        PX86PTPAE pShwPT2 = (PX86PTPAE)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pTempPage);
    14241377
    14251378                        for (unsigned j = 0; j < RT_ELEMENTS(pShwPT->a); j++)
     
    15391492    pPage->fDirty = false;
    15401493
    1541 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    1542     uint32_t iPrevSubset = PGMDynMapPushAutoSubset(VMMGetCpu(pVM));
     1494#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
     1495    PVMCPU   pVCpu = VMMGetCpu(pVM);
     1496    uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
    15431497#endif
    15441498
     
    15571511    /* Flush those PTEs that have changed. */
    15581512    STAM_PROFILE_START(&pPool->StatTrackDeref,a);
    1559     void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);
     1513    void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
    15601514    void *pvGst;
    15611515    bool  fFlush;
     
    15891543        Log(("Removed dirty page %RGp cMods=%d cChanges=%d\n", pPage->GCPhys, pPage->cModifications, cChanges));
    15901544
    1591 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    1592     PGMDynMapPopAutoSubset(VMMGetCpu(pVM), iPrevSubset);
     1545#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_R0) || defined(IN_RC)
     1546    PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
    15931547#endif
    15941548}
     
    16271581     * references to physical pages. (the HCPhys linear lookup is *extremely* expensive!)
    16281582     */
    1629     void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);
     1583    void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
    16301584    void *pvGst;
    16311585    int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
     
    33523306        else
    33533307        {
    3354 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     3308# if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
    33553309            /* Start a subset here because pgmPoolTrackFlushGCPhysPTsSlow and
    33563310               pgmPoolTrackFlushGCPhysPTs will/may kill the pool otherwise. */
    3357             uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
     3311            uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
    33583312# endif
    33593313
     
    33703324            *pfFlushTLBs = true;
    33713325
    3372 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    3373             PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
     3326# if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_R0) || defined(IN_RC)
     3327            PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
    33743328# endif
    33753329        }
     
    36633617            AssertFatalMsgFailed(("enmKind=%d iUser=%#x iUserTable=%#x\n", pUserPage->enmKind, pUser->iUser, pUser->iUserTable));
    36643618    }
     3619    PGM_DYNMAP_UNUSED_HINT_VM(pPool->CTX_SUFF(pVM), u.pau64);
    36653620}
    36663621
     
    44354390     * Map the shadow page and take action according to the page kind.
    44364391     */
    4437     void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);
     4392    void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
    44384393    switch (pPage->enmKind)
    44394394    {
     
    45394494    STAM_PROFILE_STOP(&pPool->StatZeroPage, z);
    45404495    pPage->fZeroed = true;
    4541     PGMPOOL_UNLOCK_PTR(pPool->CTX_SUFF(pVM), pvShw);
     4496    PGM_DYNMAP_UNUSED_HINT_VM(pPool->CTX_SUFF(pVM), pvShw);
    45424497    Assert(!pPage->cPresent);
    45434498}
     
    45964551    }
    45974552
    4598 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     4553#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
    45994554    /* Start a subset so we won't run out of mapping space. */
    46004555    PVMCPU pVCpu = VMMGetCpu(pVM);
    4601     uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
     4556    uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
    46024557#endif
    46034558
     
    46294584    pgmPoolCacheFlushPage(pPool, pPage);
    46304585
    4631 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     4586#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_R0) || defined(IN_RC)
    46324587    /* Heavy stuff done. */
    4633     PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
     4588    PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
    46344589#endif
    46354590
  • trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp

    r30493 r31402  
    693693                        STAM_PROFILE_ADV_STOP(&pVM->trpm.s.aStatGCTraps[iOrgTrap], o);
    694694
    695                     CPUMGCCallGuestTrapHandler(pRegFrame, GuestIdte.Gen.u16SegSel | 1, pVM->trpm.s.aGuestTrapHandler[iGate], eflags.u32, ss_r0, (RTRCPTR)esp_r0);
     695                    PGMRZDynMapReleaseAutoSet(pVCpu);
     696                    CPUMGCCallGuestTrapHandler(pRegFrame, GuestIdte.Gen.u16SegSel | 1, pVM->trpm.s.aGuestTrapHandler[iGate],
     697                                               eflags.u32, ss_r0, (RTRCPTR)esp_r0);
    696698                    /* does not return */
    697699#else
  • trunk/src/VBox/VMM/VMMGC/PGMGC.cpp

    r28800 r31402  
    55
    66/*
    7  * Copyright (C) 2006-2007 Oracle Corporation
     7 * Copyright (C) 2006-2010 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
  • trunk/src/VBox/VMM/VMMGC/TRPMGCHandlers.cpp

    r31100 r31402  
    4646#include <iprt/assert.h>
    4747
     48
    4849/*******************************************************************************
    4950*   Defined Constants And Macros                                               *
     
    130131 * @param   rc          The VBox status code to return.
    131132 * @param   pRegFrame   Pointer to the register frame for the trap.
     133 *
     134 * @remarks This must not be used for hypervisor traps, only guest traps.
    132135 */
    133136static int trpmGCExitTrap(PVM pVM, PVMCPU pVCpu, int rc, PCPUMCTXCORE pRegFrame)
     
    231234         */
    232235        else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
     236        {
    233237#if 1
     238            PGMRZDynMapReleaseAutoSet(pVCpu);
     239            PGMRZDynMapStartAutoSet(pVCpu);
    234240            rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
    235241#else
    236242            rc = VINF_PGM_SYNC_CR3;
    237243#endif
     244        }
    238245        /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */
    239246        else if (   VM_FF_ISPENDING(pVM, VM_FF_REQUEST)
     
    246253                    && ( pRegFrame->eflags.Bits.u2IOPL < (unsigned)(pRegFrame->ss & X86_SEL_RPL) || pRegFrame->eflags.Bits.u1VM))
    247254              , ("rc=%Rrc\neflags=%RX32 ss=%RTsel IOPL=%d\n", rc, pRegFrame->eflags.u32, pRegFrame->ss, pRegFrame->eflags.Bits.u2IOPL));
     255    PGMRZDynMapReleaseAutoSet(pVCpu);
    248256    return rc;
    249257}
     
    270278
    271279    /*
    272      * We currently don't make sure of the X86_DR7_GD bit, but
     280     * We currently don't make use of the X86_DR7_GD bit, but
    273281     * there might come a time when we do.
    274282     */
    275     if ((uDr6 & X86_DR6_BD) == X86_DR6_BD)
    276     {
    277         AssertReleaseMsgFailed(("X86_DR6_BD isn't used, but it's set! dr7=%RTreg(%RTreg) dr6=%RTreg\n",
    278                                 ASMGetDR7(), CPUMGetHyperDR7(pVCpu), uDr6));
    279         return VERR_NOT_IMPLEMENTED;
    280     }
    281 
     283    AssertReleaseMsgReturn((uDr6 & X86_DR6_BD) != X86_DR6_BD,
     284                           ("X86_DR6_BD isn't used, but it's set! dr7=%RTreg(%RTreg) dr6=%RTreg\n",
     285                            ASMGetDR7(), CPUMGetHyperDR7(pVCpu), uDr6),
     286                           VERR_NOT_IMPLEMENTED);
    282287    AssertReleaseMsg(!(uDr6 & X86_DR6_BT), ("X86_DR6_BT is impossible!\n"));
    283288
     
    285290     * Now leave the rest to the DBGF.
    286291     */
     292    PGMRZDynMapStartAutoSet(pVCpu);
    287293    int rc = DBGFRZTrap01Handler(pVM, pVCpu, pRegFrame, uDr6);
    288294    if (rc == VINF_EM_RAW_GUEST_TRAP)
     
    296302
    297303/**
     304 * \#DB (Debug event) handler for the hypervisor code.
     305 *
     306 * This is mostly the same as TRPMGCTrap01Handler, but we skip the PGM auto
     307 * mapping set as well as the default trap exit path since they are both really
     308 * bad ideas in this context.
     309 *
     310 * @returns VBox status code.
     311 *          VINF_SUCCESS means we completely handled this trap,
     312 *          other codes are passed execution to host context.
     313 *
     314 * @param   pTrpmCpu    Pointer to TRPMCPU data (within VM).
     315 * @param   pRegFrame   Pointer to the register frame for the trap.
     316 * @internal
     317 */
     318DECLASM(int) TRPMGCHyperTrap01Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
     319{
     320    RTGCUINTREG uDr6  = ASMGetAndClearDR6();
     321    PVM         pVM   = TRPMCPU_2_VM(pTrpmCpu);
     322    PVMCPU      pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
     323
     324    LogFlow(("TRPMGCHyper01: cs:eip=%04x:%08x uDr6=%RTreg\n", pRegFrame->cs, pRegFrame->eip, uDr6));
     325
     326    /*
     327     * We currently don't make use of the X86_DR7_GD bit, but
     328     * there might come a time when we do.
     329     */
     330    AssertReleaseMsgReturn((uDr6 & X86_DR6_BD) != X86_DR6_BD,
     331                           ("X86_DR6_BD isn't used, but it's set! dr7=%RTreg(%RTreg) dr6=%RTreg\n",
     332                            ASMGetDR7(), CPUMGetHyperDR7(pVCpu), uDr6),
     333                           VERR_NOT_IMPLEMENTED);
     334    AssertReleaseMsg(!(uDr6 & X86_DR6_BT), ("X86_DR6_BT is impossible!\n"));
     335
     336    /*
     337     * Now leave the rest to the DBGF.
     338     */
     339    int rc = DBGFRZTrap01Handler(pVM, pVCpu, pRegFrame, uDr6);
     340    AssertStmt(rc != VINF_EM_RAW_GUEST_TRAP, rc = VERR_INTERNAL_ERROR_3);
     341
     342    Log6(("TRPMGCHyper01: %Rrc (%04x:%08x %RTreg)\n", rc, pRegFrame->cs, pRegFrame->eip, uDr6));
     343    return rc;
     344}
     345
     346
     347/**
    298348 * NMI handler, for when we are using NMIs to debug things.
    299349 *
     
    311361    LogFlow(("TRPMGCTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs, pRegFrame->eip));
    312362    RTLogComPrintf("TRPMGCTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs, pRegFrame->eip);
     363    return VERR_TRPM_DONT_PANIC;
     364}
     365
     366
     367/**
     368 * NMI handler, for when we are using NMIs to debug things.
     369 *
     370 * This is the handler we're most likely to hit when the NMI fires (it is
     371 * unlikely that we'll be stuck in guest code).
     372 *
     373 * @returns VBox status code.
     374 *          VINF_SUCCESS means we completely handled this trap,
     375 *          other codes are passed execution to host context.
     376 *
     377 * @param   pTrpmCpu    Pointer to TRPMCPU data (within VM).
     378 * @param   pRegFrame   Pointer to the register frame for the trap.
     379 * @internal
     380 * @remark  This is not hooked up unless you're building with VBOX_WITH_NMI defined.
     381 */
     382DECLASM(int) TRPMGCHyperTrap02Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
     383{
     384    LogFlow(("TRPMGCHyperTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs, pRegFrame->eip));
     385    RTLogComPrintf("TRPMGCHyperTrap02Handler: cs:eip=%04x:%08x\n", pRegFrame->cs, pRegFrame->eip);
    313386    return VERR_TRPM_DONT_PANIC;
    314387}
     
    332405    PVMCPU  pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
    333406    int     rc;
    334 
    335     /*
    336      * Both PATM are using INT3s, let them have a go first.
     407    PGMRZDynMapStartAutoSet(pVCpu);
     408
     409    /*
     410     * PATM is using INT3s, let them have a go first.
    337411     */
    338412    if (    (pRegFrame->ss & X86_SEL_RPL) == 1
     
    357431
    358432/**
     433 * \#BP (Breakpoint) handler.
     434 *
     435 * This is similar to TRPMGCTrap03Handler but we bits which are potentially
     436 * harmful to us (common trap exit and the auto mapping set).
     437 *
     438 * @returns VBox status code.
     439 *          VINF_SUCCESS means we completely handled this trap,
     440 *          other codes are passed execution to host context.
     441 *
     442 * @param   pTrpmCpu    Pointer to TRPMCPU data (within VM).
     443 * @param   pRegFrame   Pointer to the register frame for the trap.
     444 * @internal
     445 */
     446DECLASM(int) TRPMGCHyperTrap03Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
     447{
     448    LogFlow(("TRPMGCHyper03: %04x:%08x\n", pRegFrame->cs, pRegFrame->eip));
     449    PVM     pVM   = TRPMCPU_2_VM(pTrpmCpu);
     450    PVMCPU  pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
     451
     452    /*
     453     * Hand it over to DBGF.
     454     */
     455    int rc = DBGFRZTrap03Handler(pVM, pVCpu, pRegFrame);
     456    AssertStmt(rc != VINF_EM_RAW_GUEST_TRAP, rc = VERR_INTERNAL_ERROR_3);
     457
     458    Log6(("TRPMGCHyper03: %Rrc (%04x:%08x)\n", rc, pRegFrame->cs, pRegFrame->eip));
     459    return rc;
     460}
     461
     462
     463/**
    359464 * Trap handler for illegal opcode fault (\#UD).
    360465 *
     
    373478    PVMCPU  pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
    374479    int     rc;
     480    PGMRZDynMapStartAutoSet(pVCpu);
    375481
    376482    if (CPUMGetGuestCPL(pVCpu, pRegFrame) == 0)
     
    402508        /*
    403509         * UD2 in a patch?
     510         * Note! PATMGCHandleIllegalInstrTrap doesn't always return.
    404511         */
    405512        if (    Cpu.pCurInstr->opcode == OP_ILLUD2
    406513            &&  PATMIsPatchGCAddr(pVM, pRegFrame->eip))
    407514        {
     515            LogFlow(("TRPMGCTrap06Handler: -> PATMGCHandleIllegalInstrTrap\n"));
    408516            rc = PATMGCHandleIllegalInstrTrap(pVM, pRegFrame);
    409517            /** @todo  These tests are completely unnecessary, should just follow the
     
    439547        else if (Cpu.pCurInstr->opcode == OP_MONITOR)
    440548        {
     549            LogFlow(("TRPMGCTrap06Handler: -> EMInterpretInstructionCPU\n"));
    441550            uint32_t cbIgnored;
    442551            rc = EMInterpretInstructionCPU(pVM, pVCpu, &Cpu, pRegFrame, PC, &cbIgnored);
     
    446555        /* Never generate a raw trap here; it might be an instruction, that requires emulation. */
    447556        else
     557        {
     558            LogFlow(("TRPMGCTrap06Handler: -> VINF_EM_RAW_EMULATE_INSTR\n"));
    448559            rc = VINF_EM_RAW_EMULATE_INSTR;
     560        }
    449561    }
    450562    else
    451563    {
     564        LogFlow(("TRPMGCTrap06Handler: -> TRPMForwardTrap\n"));
    452565        rc = TRPMForwardTrap(pVCpu, pRegFrame, 0x6, 0, TRPM_TRAP_NO_ERRORCODE, TRPM_TRAP, 0x6);
    453566        Assert(rc == VINF_EM_RAW_GUEST_TRAP);
     
    478591    PVM     pVM   = TRPMCPU_2_VM(pTrpmCpu);
    479592    PVMCPU  pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
     593    PGMRZDynMapStartAutoSet(pVCpu);
    480594
    481595    int rc = CPUMHandleLazyFPU(pVCpu);
     
    500614{
    501615    LogFlow(("TRPMGC0b: %04x:%08x\n", pRegFrame->cs, pRegFrame->eip));
    502     PVM pVM = TRPMCPU_2_VM(pTrpmCpu);
     616    PVM     pVM   = TRPMCPU_2_VM(pTrpmCpu);
     617    PVMCPU  pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
     618    PGMRZDynMapStartAutoSet(pVCpu);
    503619
    504620    /*
     
    574690            pTrpmCpu->uActiveVector = ~0;
    575691            Log6(("TRPMGC0b: %Rrc (%04x:%08x) (CG)\n", VINF_EM_RAW_RING_SWITCH, pRegFrame->cs, pRegFrame->eip));
     692            PGMRZDynMapReleaseAutoSet(pVCpu);
    576693            return VINF_EM_RAW_RING_SWITCH;
    577694        }
     
    582699     */
    583700    Log6(("TRPMGC0b: %Rrc (%04x:%08x)\n", VINF_EM_RAW_GUEST_TRAP, pRegFrame->cs, pRegFrame->eip));
     701    PGMRZDynMapReleaseAutoSet(pVCpu);
    584702    return VINF_EM_RAW_GUEST_TRAP;
    585703}
     
    9331051    LogFlow(("TRPMGC0d: %04x:%08x err=%x\n", pRegFrame->cs, pRegFrame->eip, (uint32_t)pVCpu->trpm.s.uActiveErrorCode));
    9341052
     1053    PGMRZDynMapStartAutoSet(pVCpu);
    9351054    int rc = trpmGCTrap0dHandler(pVM, pTrpmCpu, pRegFrame);
    9361055    switch (rc)
     
    9941113     * This is all PGM stuff.
    9951114     */
     1115    PGMRZDynMapStartAutoSet(pVCpu);
    9961116    int rc = PGMTrap0eHandler(pVCpu, pVCpu->trpm.s.uActiveErrorCode, pRegFrame, (RTGCPTR)pVCpu->trpm.s.uActiveCR2);
    9971117    switch (rc)
     
    10091129        case VINF_EM_RAW_GUEST_TRAP:
    10101130            if (PATMIsPatchGCAddr(pVM, pRegFrame->eip))
     1131            {
     1132                PGMRZDynMapReleaseAutoSet(pVCpu);
    10111133                return VINF_PATM_PATCH_TRAP_PF;
     1134            }
    10121135
    10131136            rc = TRPMForwardTrap(pVCpu, pRegFrame, 0xE, 0, TRPM_TRAP_HAS_ERRORCODE, TRPM_TRAP, 0xe);
  • trunk/src/VBox/VMM/VMMGC/TRPMGCHandlersA.asm

    r28800 r31402  
    44;
    55
    6 ; Copyright (C) 2006-2007 Oracle Corporation
     6; Copyright (C) 2006-2010 Oracle Corporation
    77;
    88; This file is part of VirtualBox Open Source Edition (OSE), as
     
    3434extern IMPNAME(g_TRPM)                  ; where there is a pointer to the real symbol. PE imports
    3535extern IMPNAME(g_TRPMCPU)               ; are a bit confusing at first... :-)
    36 extern IMPNAME(g_VM)                   
     36extern IMPNAME(g_VM)
    3737extern NAME(CPUMGCRestoreInt)
    3838extern NAME(cpumHandleLazyFPUAsm)
    3939extern NAME(CPUMHyperSetCtxCore)
    4040extern NAME(trpmGCTrapInGeneric)
    41 extern NAME(TRPMGCHyperTrap0bHandler)
    42 extern NAME(TRPMGCHyperTrap0dHandler)
    43 extern NAME(TRPMGCHyperTrap0eHandler)
    4441extern NAME(TRPMGCTrap01Handler)
     42extern NAME(TRPMGCHyperTrap01Handler)
    4543%ifdef VBOX_WITH_NMI
    4644extern NAME(TRPMGCTrap02Handler)
     45extern NAME(TRPMGCHyperTrap02Handler)
    4746%endif
    4847extern NAME(TRPMGCTrap03Handler)
     48extern NAME(TRPMGCHyperTrap03Handler)
    4949extern NAME(TRPMGCTrap06Handler)
     50extern NAME(TRPMGCTrap07Handler)
    5051extern NAME(TRPMGCTrap0bHandler)
     52extern NAME(TRPMGCHyperTrap0bHandler)
    5153extern NAME(TRPMGCTrap0dHandler)
     54extern NAME(TRPMGCHyperTrap0dHandler)
    5255extern NAME(TRPMGCTrap0eHandler)
    53 extern NAME(TRPMGCTrap07Handler)
     56extern NAME(TRPMGCHyperTrap0eHandler)
    5457
    5558;; IMPORTANT all COM_ functions trashes esi, some edi and the LOOP_SHORT_WHILE kills ecx.
     
    7174                                        ; =============================================================
    7275    dd 0                                ;  0 - #DE - F   - N - Divide error
    73     dd NAME(TRPMGCTrap01Handler)        ;  1 - #DB - F/T - N - Single step, INT 1 instruction
     76    dd NAME(TRPMGCHyperTrap01Handler)   ;  1 - #DB - F/T - N - Single step, INT 1 instruction
    7477%ifdef VBOX_WITH_NMI
    75     dd NAME(TRPMGCTrap02Handler)        ;  2 -     - I   - N - Non-Maskable Interrupt (NMI)
     78    dd NAME(TRPMGCHyperTrap02Handler)   ;  2 -     - I   - N - Non-Maskable Interrupt (NMI)
    7679%else
    7780    dd 0                                ;  2 -     - I   - N - Non-Maskable Interrupt (NMI)
    7881%endif
    79     dd NAME(TRPMGCTrap03Handler)        ;  3 - #BP - T   - N - Breakpoint, INT 3 instruction.
     82    dd NAME(TRPMGCHyperTrap03Handler)   ;  3 - #BP - T   - N - Breakpoint, INT 3 instruction.
    8083    dd 0                                ;  4 - #OF - T   - N - Overflow, INTO instruction.
    8184    dd 0                                ;  5 - #BR - F   - N - BOUND Range Exceeded, BOUND instruction.
     
    271274    mov     [esp + CPUMCTXCORE.eflags], eax
    272275
    273 %if GC_ARCH_BITS == 64   
     276%if GC_ARCH_BITS == 64
    274277    ; zero out the high dwords
    275278    mov     dword [esp + CPUMCTXCORE.eax + 4], 0
     
    775778    mov     [esp + CPUMCTXCORE.ss], eax
    776779
    777 %if GC_ARCH_BITS == 64   
     780%if GC_ARCH_BITS == 64
    778781    ; zero out the high dwords
    779782    mov     dword [esp + CPUMCTXCORE.eax + 4], 0
  • trunk/src/VBox/VMM/VMMGC/VMMGC.cpp

    r29250 r31402  
    55
    66/*
    7  * Copyright (C) 2006-2007 Oracle Corporation
     7 * Copyright (C) 2006-2010 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    9191            AssertRCReturn(rc, rc);
    9292
     93            rc = PGMRCDynMapInit(pVM);
     94            AssertRCReturn(rc, rc);
    9395            return VINF_SUCCESS;
    9496        }
  • trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp

    r30241 r31402  
    10941094
    10951095#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    1096     bool fStartedSet = PGMDynMapStartOrMigrateAutoSet(pVCpu);
     1096    bool fStartedSet = PGMR0DynMapStartOrMigrateAutoSet(pVCpu);
    10971097#endif
    10981098
     
    11071107#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    11081108    if (fStartedSet)
    1109         PGMDynMapReleaseAutoSet(pVCpu);
     1109        PGMRZDynMapReleaseAutoSet(pVCpu);
    11101110#endif
    11111111
     
    12091209
    12101210#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    1211     PGMDynMapStartAutoSet(pVCpu);
     1211    PGMRZDynMapStartAutoSet(pVCpu);
    12121212#endif
    12131213
     
    12171217
    12181218#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    1219     PGMDynMapReleaseAutoSet(pVCpu);
     1219    PGMRZDynMapReleaseAutoSet(pVCpu);
    12201220#endif
    12211221    return rc;
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r31394 r31402  
    25652565#endif
    25662566#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    2567     PGMDynMapFlushAutoSet(pVCpu);
     2567    PGMRZDynMapFlushAutoSet(pVCpu);
    25682568#endif
    25692569
  • trunk/src/VBox/VMM/VMMRZ/PGMRZDynMap.cpp

    r31270 r31402  
    11/* $Id$ */
    22/** @file
    3  * PGM - Page Manager and Monitor, ring-0 dynamic mapping cache.
     3 * PGM - Page Manager and Monitor, dynamic mapping cache.
    44 */
    55
    66/*
    7  * Copyright (C) 2008 Oracle Corporation
     7 * Copyright (C) 2008-2010 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    1616 */
    1717
     18
    1819/*******************************************************************************
    1920*   Internal Functions                                                         *
    2021*******************************************************************************/
    21 #define LOG_GROUP LOG_GROUP_PGM
     22#define LOG_GROUP LOG_GROUP_PGM_DYNMAP
    2223#include <VBox/pgm.h>
    2324#include "../PGMInternal.h"
    2425#include <VBox/vm.h>
    2526#include "../PGMInline.h"
     27#include <VBox/err.h>
     28#include <VBox/param.h>
    2629#include <VBox/sup.h>
    27 #include <VBox/err.h>
    2830#include <iprt/asm.h>
    2931#include <iprt/asm-amd64-x86.h>
    30 #include <iprt/alloc.h>
    3132#include <iprt/assert.h>
    32 #include <iprt/cpuset.h>
    33 #include <iprt/memobj.h>
    34 #include <iprt/mp.h>
    35 #include <iprt/semaphore.h>
    36 #include <iprt/spinlock.h>
     33#ifndef IN_RC
     34# include <iprt/cpuset.h>
     35# include <iprt/mem.h>
     36# include <iprt/memobj.h>
     37# include <iprt/mp.h>
     38# include <iprt/semaphore.h>
     39# include <iprt/spinlock.h>
     40#endif
    3741#include <iprt/string.h>
    3842
     
    4145*   Defined Constants And Macros                                               *
    4246*******************************************************************************/
     47#ifdef IN_RING0
    4348/** The max size of the mapping cache (in pages). */
    44 #define PGMR0DYNMAP_MAX_PAGES               ((16*_1M) >> PAGE_SHIFT)
     49# define PGMR0DYNMAP_MAX_PAGES              ((16*_1M) >> PAGE_SHIFT)
    4550/** The small segment size that is adopted on out-of-memory conditions with a
    4651 * single big segment. */
    47 #define PGMR0DYNMAP_SMALL_SEG_PAGES         128
     52# define PGMR0DYNMAP_SMALL_SEG_PAGES        128
    4853/** The number of pages we reserve per CPU. */
    49 #define PGMR0DYNMAP_PAGES_PER_CPU           256
     54# define PGMR0DYNMAP_PAGES_PER_CPU          256
    5055/** The minimum number of pages we reserve per CPU.
    5156 * This must be equal or larger than the autoset size.  */
    52 #define PGMR0DYNMAP_PAGES_PER_CPU_MIN       64
     57# define PGMR0DYNMAP_PAGES_PER_CPU_MIN      64
     58/** Calcs the overload threshold (safety margin).  Current set at 50%. */
     59# define PGMR0DYNMAP_CALC_OVERLOAD(cPages)  ((cPages) / 2)
    5360/** The number of guard pages.
    5461 * @remarks Never do tuning of the hashing or whatnot with a strict build!  */
    55 #if defined(VBOX_STRICT)
    56 # define PGMR0DYNMAP_GUARD_PAGES            1
    57 #else
    58 # define PGMR0DYNMAP_GUARD_PAGES            0
    59 #endif
     62# if defined(VBOX_STRICT)
     63#  define PGMR0DYNMAP_GUARD_PAGES           1
     64# else
     65#  define PGMR0DYNMAP_GUARD_PAGES           0
     66# endif
     67#endif /* IN_RING0 */
    6068/** The dummy physical address of guard pages. */
    6169#define PGMR0DYNMAP_GUARD_PAGE_HCPHYS       UINT32_C(0x7777feed)
     
    6674 * The alternative is to replace the entire PTE with an bad not-present
    6775 * PTE. Either way, XNU will screw us. :-/   */
    68 #define PGMR0DYNMAP_GUARD_NP
     76# define PGMR0DYNMAP_GUARD_NP
    6977#endif
    7078/** The dummy PTE value for a page. */
     
    7280/** The dummy PTE value for a page. */
    7381#define PGMR0DYNMAP_GUARD_PAGE_PAE_PTE      UINT64_MAX /*X86_PTE_PAE_PG_MASK*/
    74 /** Calcs the overload threshold. Current set at 50%. */
    75 #define PGMR0DYNMAP_CALC_OVERLOAD(cPages)   ((cPages) / 2)
    76 
    77 #if 0
    78 /* Assertions causes panics if preemption is disabled, this can be used to work around that. */
    79 //#define RTSpinlockAcquire(a,b) do {} while (0)
    80 //#define RTSpinlockRelease(a,b) do {} while (0)
    81 #endif
     82
     83#ifdef IN_RING0 /* Note! Assertions causes panics if preemption is disabled,
     84                 *       disable this to work around that. */
     85/**
     86 * Acquire the spinlock.
     87 * This will declare a temporary variable and expands to two statements!
     88 */
     89# define PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis) \
     90    RTSPINLOCKTMP   MySpinlockTmp = RTSPINLOCKTMP_INITIALIZER; \
     91    RTSpinlockAcquire((pThis)->hSpinlock, &MySpinlockTmp)
     92/**
     93 * Releases the spinlock.
     94 */
     95# define PGMRZDYNMAP_SPINLOCK_RELEASE(pThis) \
     96    RTSpinlockRelease((pThis)->hSpinlock, &MySpinlockTmp)
     97
     98/**
     99 * Re-acquires the spinlock.
     100 */
     101# define PGMRZDYNMAP_SPINLOCK_REACQUIRE(pThis) \
     102    RTSpinlockAcquire((pThis)->hSpinlock, &MySpinlockTmp)
     103#else
     104# define PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis)   do { } while (0)
     105# define PGMRZDYNMAP_SPINLOCK_RELEASE(pThis)   do { } while (0)
     106# define PGMRZDYNMAP_SPINLOCK_REACQUIRE(pThis) do { } while (0)
     107#endif
     108
    82109
    83110/** Converts a PGMCPUM::AutoSet pointer into a PVMCPU. */
    84 #define PGMR0DYNMAP_2_VMCPU(pSet)           (RT_FROM_MEMBER(pSet, VMCPU, pgm.s.AutoSet))
     111#define PGMRZDYNMAP_SET_2_VMCPU(pSet)       (RT_FROM_MEMBER(pSet, VMCPU, pgm.s.AutoSet))
    85112
    86113/** Converts a PGMCPUM::AutoSet pointer into a PVM. */
    87 #define PGMR0DYNMAP_2_VM(pSet)              (PGMR0DYNMAP_2_VMCPU(pSet)->CTX_SUFF(pVM))
     114#define PGMRZDYNMAP_SET_2_VM(pSet)          (PGMRZDYNMAP_SET_2_VMCPU(pSet)->CTX_SUFF(pVM))
     115
     116/** Converts a PGMCPUM::AutoSet pointer into a PVM. */
     117#ifdef IN_RC
     118# define PGMRZDYNMAP_SET_2_DYNMAP(pSet)     (PGMRZDYNMAP_SET_2_VM(pSet)->pgm.s.pRCDynMap)
     119#else
     120# define PGMRZDYNMAP_SET_2_DYNMAP(pSet)     (g_pPGMR0DynMap)
     121#endif
     122
     123/**
     124 * Gets the set index of the current CPU.
     125 *
     126 * This always returns 0 when in raw-mode context because there is only ever
     127 * one EMT in that context (at least presently).
     128 */
     129#ifdef IN_RC
     130# define PGMRZDYNMAP_CUR_CPU()              (0)
     131#else
     132# define PGMRZDYNMAP_CUR_CPU()              RTMpCpuIdToSetIndex(RTMpCpuId())
     133#endif
     134
     135/** PGMRZDYNMAP::u32Magic. (Jens Christian Bugge Wesseltoft) */
     136#define PGMRZDYNMAP_MAGIC                   UINT32_C(0x19640201)
     137
     138
     139/** Zaps an set entry. */
     140#define PGMRZDYNMAP_ZAP_ENTRY(pEntry) \
     141    do \
     142    { \
     143        (pEntry)->iPage        = UINT16_MAX; \
     144        (pEntry)->cRefs        = 0; \
     145        (pEntry)->cInlinedRefs = 0; \
     146        (pEntry)->cUnrefs      = 0; \
     147    } while (0)
    88148
    89149
     
    91151*   Structures and Typedefs                                                    *
    92152*******************************************************************************/
     153#ifdef IN_RING0
    93154/**
    94155 * Ring-0 dynamic mapping cache segment.
     
    125186 * Ring-0 dynamic mapping cache entry.
    126187 *
    127  * This structure tracks
     188 * @sa PGMRZDYNMAPENTRY, PGMRCDYNMAPENTRY.
    128189 */
    129190typedef struct PGMR0DYNMAPENTRY
     
    147208        void                   *pv;
    148209    } uPte;
     210# ifndef IN_RC
    149211    /** CPUs that haven't invalidated this entry after it's last update. */
    150212    RTCPUSET                    PendingSet;
     213# endif
    151214} PGMR0DYNMAPENTRY;
    152 /** Pointer to a ring-0 dynamic mapping cache entry. */
     215/** Pointer a mapping cache entry for the ring-0.
     216 * @sa PPGMRZDYNMAPENTRY, PPGMRCDYNMAPENTRY,  */
    153217typedef PGMR0DYNMAPENTRY *PPGMR0DYNMAPENTRY;
    154218
    155219
    156220/**
    157  * Ring-0 dynamic mapping cache.
    158  *
    159  * This is initialized during VMMR0 module init but no segments are allocated at
    160  * that time.  Segments will be added when the first VM is started and removed
    161  * again when the last VM shuts down, thus avoid consuming memory while dormant.
    162  * At module termination, the remaining bits will be freed up.
     221 * Dynamic mapping cache for ring-0.
     222 *
     223 * This is initialized during VMMR0 module init but no segments are allocated
     224 * at that time.  Segments will be added when the first VM is started and
     225 * removed again when the last VM shuts down, thus avoid consuming memory while
     226 * dormant. At module termination, the remaining bits will be freed up.
     227 *
     228 * @sa PPGMRZDYNMAP, PGMRCDYNMAP.
    163229 */
    164230typedef struct PGMR0DYNMAP
    165231{
    166     /** The usual magic number / eye catcher (PGMR0DYNMAP_MAGIC). */
     232    /** The usual magic number / eye catcher (PGMRZDYNMAP_MAGIC). */
    167233    uint32_t                    u32Magic;
     234# ifndef IN_RC
    168235    /** Spinlock serializing the normal operation of the cache. */
    169236    RTSPINLOCK                  hSpinlock;
     237# endif
    170238    /** Array for tracking and managing the pages.  */
    171239    PPGMR0DYNMAPENTRY           paPages;
     
    180248     * This is maintained to get trigger adding of more mapping space. */
    181249    uint32_t                    cMaxLoad;
     250# ifndef IN_RC
    182251    /** Initialization / termination lock. */
    183252    RTSEMFASTMUTEX              hInitLock;
     253# endif
    184254    /** The number of guard pages. */
    185255    uint32_t                    cGuardPages;
    186256    /** The number of users (protected by hInitLock). */
    187257    uint32_t                    cUsers;
     258# ifndef IN_RC
    188259    /** Array containing a copy of the original page tables.
    189260     * The entries are either X86PTE or X86PTEPAE according to fLegacyMode. */
     
    193264    /** The paging mode. */
    194265    SUPPAGINGMODE               enmPgMode;
     266# endif
    195267} PGMR0DYNMAP;
    196 /** Pointer to the ring-0 dynamic mapping cache */
    197 typedef PGMR0DYNMAP *PPGMR0DYNMAP;
    198 
    199 /** PGMR0DYNMAP::u32Magic. (Jens Christian Bugge Wesseltoft) */
    200 #define PGMR0DYNMAP_MAGIC       0x19640201
    201268
    202269
     
    228295/** Pointer to paging level data. */
    229296typedef PGMR0DYNMAPPGLVL *PPGMR0DYNMAPPGLVL;
     297#endif
     298
     299/** Mapping cache entry for the current context.
     300 * @sa PGMR0DYNMAPENTRY, PGMRCDYNMAPENTRY  */
     301typedef CTX_MID(PGM,DYNMAPENTRY) PGMRZDYNMAPENTRY;
     302/** Pointer a mapping cache entry for the current context.
     303 * @sa PGMR0DYNMAPENTRY, PGMRCDYNMAPENTRY  */
     304typedef PGMRZDYNMAPENTRY *PPGMRZDYNMAPENTRY;
     305
     306/** Pointer the mapping cache instance for the current context.
     307 * @sa PGMR0DYNMAP, PGMRCDYNMAP  */
     308typedef CTX_MID(PGM,DYNMAP) *PPGMRZDYNMAP;
     309
    230310
    231311
     
    233313*   Global Variables                                                           *
    234314*******************************************************************************/
     315#ifdef IN_RING0
    235316/** Pointer to the ring-0 dynamic mapping cache. */
    236 static PPGMR0DYNMAP g_pPGMR0DynMap;
     317static PGMR0DYNMAP *g_pPGMR0DynMap;
     318#endif
    237319/** For overflow testing. */
    238320static bool         g_fPGMR0DynMapTestRunning = false;
     
    242324*   Internal Functions                                                         *
    243325*******************************************************************************/
    244 static void pgmR0DynMapReleasePage(PPGMR0DYNMAP pThis, uint32_t iPage, uint32_t cRefs);
    245 static int  pgmR0DynMapSetup(PPGMR0DYNMAP pThis);
    246 static int  pgmR0DynMapExpand(PPGMR0DYNMAP pThis);
    247 static void pgmR0DynMapTearDown(PPGMR0DYNMAP pThis);
     326static void pgmRZDynMapReleasePage(PPGMRZDYNMAP pThis, uint32_t iPage, uint32_t cRefs);
     327#ifdef IN_RING0
     328static int  pgmR0DynMapSetup(PPGMRZDYNMAP pThis);
     329static int  pgmR0DynMapExpand(PPGMRZDYNMAP pThis);
     330static void pgmR0DynMapTearDown(PPGMRZDYNMAP pThis);
     331#endif
    248332#if 0 /*def DEBUG*/
    249333static int  pgmR0DynMapTest(PVM pVM);
     
    252336
    253337/**
     338 * Initializes the auto mapping sets for a VM.
     339 *
     340 * @returns VINF_SUCCESS on success, VERR_INTERNAL_ERROR on failure.
     341 * @param   pVM         The VM in question.
     342 */
     343static int pgmRZDynMapInitAutoSetsForVM(PVM pVM)
     344{
     345    VMCPUID idCpu = pVM->cCpus;
     346    AssertReturn(idCpu > 0 && idCpu <= VMM_MAX_CPU_COUNT, VERR_INTERNAL_ERROR);
     347    while (idCpu-- > 0)
     348    {
     349        PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet;
     350        uint32_t j = RT_ELEMENTS(pSet->aEntries);
     351        while (j-- > 0)
     352        {
     353            pSet->aEntries[j].pvPage        = NULL;
     354            pSet->aEntries[j].HCPhys        = NIL_RTHCPHYS;
     355            PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[j]);
     356        }
     357        pSet->cEntries = PGMMAPSET_CLOSED;
     358        pSet->iSubset = UINT32_MAX;
     359        pSet->iCpu = -1;
     360        memset(&pSet->aiHashTable[0], 0xff, sizeof(pSet->aiHashTable));
     361    }
     362
     363    return VINF_SUCCESS;
     364}
     365
     366
     367#ifdef IN_RING0
     368
     369/**
    254370 * Initializes the ring-0 dynamic mapping cache.
    255371 *
     
    263379     * Create and initialize the cache instance.
    264380     */
    265     PPGMR0DYNMAP pThis = (PPGMR0DYNMAP)RTMemAllocZ(sizeof(*pThis));
     381    PPGMRZDYNMAP pThis = (PPGMRZDYNMAP)RTMemAllocZ(sizeof(*pThis));
    266382    AssertLogRelReturn(pThis, VERR_NO_MEMORY);
    267383    int             rc = VINF_SUCCESS;
     
    295411            if (RT_SUCCESS(rc))
    296412            {
    297                 pThis->u32Magic = PGMR0DYNMAP_MAGIC;
     413                pThis->u32Magic = PGMRZDYNMAP_MAGIC;
    298414                g_pPGMR0DynMap = pThis;
    299415                return VINF_SUCCESS;
     
    322438     * is just a mirror image of PGMR0DynMapInit.
    323439     */
    324     PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
     440    PPGMRZDYNMAP pThis = g_pPGMR0DynMap;
    325441    if (pThis)
    326442    {
     
    359475     * Initialize the auto sets.
    360476     */
    361     VMCPUID idCpu = pVM->cCpus;
    362     AssertReturn(idCpu > 0 && idCpu <= VMM_MAX_CPU_COUNT, VERR_INTERNAL_ERROR);
    363     while (idCpu-- > 0)
    364     {
    365         PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet;
    366         uint32_t j = RT_ELEMENTS(pSet->aEntries);
    367         while (j-- > 0)
    368         {
    369             pSet->aEntries[j].iPage  = UINT16_MAX;
    370             pSet->aEntries[j].cRefs  = 0;
    371             pSet->aEntries[j].pvPage = NULL;
    372             pSet->aEntries[j].HCPhys = NIL_RTHCPHYS;
    373         }
    374         pSet->cEntries = PGMMAPSET_CLOSED;
    375         pSet->iSubset = UINT32_MAX;
    376         pSet->iCpu = -1;
    377         memset(&pSet->aiHashTable[0], 0xff, sizeof(pSet->aiHashTable));
    378     }
     477    int rc = pgmRZDynMapInitAutoSetsForVM(pVM);
     478    if (RT_FAILURE(rc))
     479        return rc;
    379480
    380481    /*
     
    387488     * Reference and if necessary setup or expand the cache.
    388489     */
    389     PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
     490    PPGMRZDYNMAP pThis = g_pPGMR0DynMap;
    390491    AssertPtrReturn(pThis, VERR_INTERNAL_ERROR);
    391     int rc = RTSemFastMutexRequest(pThis->hInitLock);
     492    rc = RTSemFastMutexRequest(pThis->hInitLock);
    392493    AssertLogRelRCReturn(rc, rc);
    393494
     
    430531        return;
    431532
    432     PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
     533    PPGMRZDYNMAP pThis = g_pPGMR0DynMap;
    433534    AssertPtrReturnVoid(pThis);
    434535
     
    463564                    LogRel(("PGMR0DynMapTermVM: %d dangling refs to %#x\n", cRefs, iPage));
    464565                    if (iPage < pThis->cPages && cRefs > 0)
    465                         pgmR0DynMapReleasePage(pThis, iPage, cRefs);
     566                        pgmRZDynMapReleasePage(pThis, iPage, cRefs);
    466567                    else
    467568                        AssertLogRelMsgFailed(("cRefs=%d iPage=%#x cPages=%u\n", cRefs, iPage, pThis->cPages));
    468569
    469                     pSet->aEntries[j].iPage  = UINT16_MAX;
    470                     pSet->aEntries[j].cRefs  = 0;
    471                     pSet->aEntries[j].pvPage = NULL;
    472                     pSet->aEntries[j].HCPhys = NIL_RTHCPHYS;
     570                    PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[j]);
    473571                }
    474572                pSet->cEntries = PGMMAPSET_CLOSED;
     
    512610{
    513611    Assert(!pvUser2);
    514     PPGMR0DYNMAP        pThis   = (PPGMR0DYNMAP)pvUser1;
     612    PPGMRZDYNMAP        pThis   = (PPGMRZDYNMAP)pvUser1;
    515613    Assert(pThis == g_pPGMR0DynMap);
    516     PPGMR0DYNMAPENTRY   paPages = pThis->paPages;
     614    PPGMRZDYNMAPENTRY   paPages = pThis->paPages;
    517615    uint32_t            iPage   = pThis->cPages;
    518616    while (iPage-- > 0)
     
    527625 * @param   pThis       The dynamic mapping cache instance.
    528626 */
    529 static int pgmR0DynMapTlbShootDown(PPGMR0DYNMAP pThis)
     627static int pgmR0DynMapTlbShootDown(PPGMRZDYNMAP pThis)
    530628{
    531629    int rc = RTMpOnAll(pgmR0DynMapShootDownTlbs, pThis, NULL);
     
    548646 * @param   pcMinPages  The minimal size in pages.
    549647 */
    550 static uint32_t pgmR0DynMapCalcNewSize(PPGMR0DYNMAP pThis, uint32_t *pcMinPages)
     648static uint32_t pgmR0DynMapCalcNewSize(PPGMRZDYNMAP pThis, uint32_t *pcMinPages)
    551649{
    552650    Assert(pThis->cPages <= PGMR0DYNMAP_MAX_PAGES);
     
    594692 * @param   pPgLvl      The paging level data.
    595693 */
    596 void pgmR0DynMapPagingArrayInit(PPGMR0DYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl)
     694void pgmR0DynMapPagingArrayInit(PPGMRZDYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl)
    597695{
    598696    RTCCUINTREG     cr4 = ASMGetCR4();
     
    704802 * @param   ppvPTE      Where to store the PTE address.
    705803 */
    706 static int pgmR0DynMapPagingArrayMapPte(PPGMR0DYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl, void *pvPage,
     804static int pgmR0DynMapPagingArrayMapPte(PPGMRZDYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl, void *pvPage,
    707805                                        PPGMR0DYNMAPSEG pSeg, uint32_t cMaxPTs, void **ppvPTE)
    708806{
     
    791889 * @param   pPage       The page.
    792890 */
    793 DECLINLINE(void) pgmR0DynMapSetupGuardPage(PPGMR0DYNMAP pThis, PPGMR0DYNMAPENTRY pPage)
     891DECLINLINE(void) pgmR0DynMapSetupGuardPage(PPGMRZDYNMAP pThis, PPGMRZDYNMAPENTRY pPage)
    794892{
    795893    memset(pPage->pvPage, 0xfd, PAGE_SIZE);
     
    815913 * @param   cPages      The size of the new segment, give as a page count.
    816914 */
    817 static int pgmR0DynMapAddSeg(PPGMR0DYNMAP pThis, uint32_t cPages)
     915static int pgmR0DynMapAddSeg(PPGMRZDYNMAP pThis, uint32_t cPages)
    818916{
    819917    int rc2;
     
    838936    }
    839937
    840     RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
    841     RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
     938    PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
    842939
    843940    memcpy(pvPages, pThis->paPages, sizeof(pThis->paPages[0]) * pThis->cPages);
    844941    void *pvToFree = pThis->paPages;
    845     pThis->paPages = (PPGMR0DYNMAPENTRY)pvPages;
    846 
    847     RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     942    pThis->paPages = (PPGMRZDYNMAPENTRY)pvPages;
     943
     944    PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    848945    RTMemFree(pvToFree);
    849946
     
    882979            pThis->paPages[iPage].cRefs  = 0;
    883980            pThis->paPages[iPage].uPte.pPae = 0;
     981#ifndef IN_RC
    884982            RTCpuSetFill(&pThis->paPages[iPage].PendingSet);
     983#endif
    885984
    886985            /* Map its page table, retry until we've got a clean run (paranoia). */
     
    9831082 * @param   pThis       The dynamic mapping cache instance.
    9841083 */
    985 static int pgmR0DynMapSetup(PPGMR0DYNMAP pThis)
     1084static int pgmR0DynMapSetup(PPGMRZDYNMAP pThis)
    9861085{
    9871086    /*
     
    10261125 * @param   pThis       The dynamic mapping cache instance.
    10271126 */
    1028 static int pgmR0DynMapExpand(PPGMR0DYNMAP pThis)
     1127static int pgmR0DynMapExpand(PPGMRZDYNMAP pThis)
    10291128{
    10301129    /*
     
    10691168 * @param   pThis       The dynamic mapping cache instance.
    10701169 */
    1071 static void pgmR0DynMapTearDown(PPGMR0DYNMAP pThis)
     1170static void pgmR0DynMapTearDown(PPGMRZDYNMAP pThis)
    10721171{
    10731172    /*
    10741173     * Restore the original page table entries
    10751174     */
    1076     PPGMR0DYNMAPENTRY   paPages = pThis->paPages;
     1175    PPGMRZDYNMAPENTRY   paPages = pThis->paPages;
    10771176    uint32_t            iPage   = pThis->cPages;
    10781177    if (pThis->fLegacyMode)
     
    11451244}
    11461245
     1246#endif /* IN_RING0 */
     1247#ifdef IN_RC
     1248
     1249/**
     1250 * Initializes the dynamic mapping cache in raw-mode context.
     1251 *
     1252 * @returns VBox status code.
     1253 * @param   pVM                 The VM handle.
     1254 */
     1255VMMRCDECL(int) PGMRCDynMapInit(PVM pVM)
     1256{
     1257    /*
     1258     * Allocate and initialize the instance data and page array.
     1259     */
     1260    PPGMRZDYNMAP    pThis;
     1261    size_t const    cPages = MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE;
     1262    size_t const    cb     = RT_ALIGN_Z(sizeof(*pThis), 32)
     1263                           + sizeof(PGMRZDYNMAPENTRY) * cPages;
     1264    int rc = MMHyperAlloc(pVM, cb, 32, MM_TAG_PGM, (void **)&pThis);
     1265    if (RT_FAILURE(rc))
     1266        return rc;
     1267
     1268    pThis->u32Magic     = PGMRZDYNMAP_MAGIC;
     1269    pThis->paPages      = RT_ALIGN_PT(pThis + 1, 32, PPGMRZDYNMAPENTRY);
     1270    pThis->cPages       = cPages;
     1271    pThis->fLegacyMode  = PGMGetHostMode(pVM) == PGMMODE_32_BIT;
     1272    pThis->cLoad        = 0;
     1273    pThis->cMaxLoad     = 0;
     1274    pThis->cGuardPages  = 0;
     1275    pThis->cUsers       = 1;
     1276
     1277    for (size_t iPage = 0; iPage < cPages; iPage++)
     1278    {
     1279        pThis->paPages[iPage].HCPhys = NIL_RTHCPHYS;
     1280        pThis->paPages[iPage].pvPage = pVM->pgm.s.pbDynPageMapBaseGC + iPage * PAGE_SIZE;
     1281        pThis->paPages[iPage].cRefs  = 0;
     1282        if (pThis->fLegacyMode)
     1283            pThis->paPages[iPage].uPte.pLegacy = &pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage];
     1284        else
     1285            pThis->paPages[iPage].uPte.pPae    = &pVM->pgm.s.paDynPageMapPaePTEsGC[iPage];
     1286    }
     1287
     1288    pVM->pgm.s.pRCDynMap = pThis;
     1289
     1290    /*
     1291     * Initialize the autosets the VM.
     1292     */
     1293    rc = pgmRZDynMapInitAutoSetsForVM(pVM);
     1294    if (RT_FAILURE(rc))
     1295        return rc;
     1296
     1297    return VINF_SUCCESS;
     1298}
     1299
     1300#endif /* IN_RC */
    11471301
    11481302/**
     
    11531307 * @param   cRefs       The number of references to release.
    11541308 */
    1155 DECLINLINE(void) pgmR0DynMapReleasePageLocked(PPGMR0DYNMAP pThis, uint32_t iPage, int32_t cRefs)
     1309DECLINLINE(void) pgmRZDynMapReleasePageLocked(PPGMRZDYNMAP pThis, uint32_t iPage, int32_t cRefs)
    11561310{
    11571311    cRefs = ASMAtomicSubS32(&pThis->paPages[iPage].cRefs, cRefs) - cRefs;
     
    11691323 * @param   cRefs       The number of references to release.
    11701324 */
    1171 static void pgmR0DynMapReleasePage(PPGMR0DYNMAP pThis, uint32_t iPage, uint32_t cRefs)
    1172 {
    1173     RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
    1174     RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
    1175     pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs);
    1176     RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     1325static void pgmRZDynMapReleasePage(PPGMRZDYNMAP pThis, uint32_t iPage, uint32_t cRefs)
     1326{
     1327    PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
     1328    pgmRZDynMapReleasePageLocked(pThis, iPage, cRefs);
     1329    PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    11771330}
    11781331
     
    11861339 * @param   iPage       The page index pgmR0DynMapPage hashed HCPhys to.
    11871340 * @param   pVCpu       The current CPU, for statistics.
    1188  */
    1189 static uint32_t pgmR0DynMapPageSlow(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, uint32_t iPage, PVMCPU pVCpu)
    1190 {
    1191     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageSlow);
     1341 * @param   pfNew       Set to @c true if a new entry was made and @c false if
     1342 *                      an old entry was found and reused.
     1343 */
     1344static uint32_t pgmR0DynMapPageSlow(PPGMRZDYNMAP pThis, RTHCPHYS HCPhys, uint32_t iPage, PVMCPU pVCpu, bool *pfNew)
     1345{
     1346    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageSlow);
    11921347
    11931348    /*
     
    11991354#endif
    12001355    uint32_t const      cPages  = pThis->cPages;
    1201     PPGMR0DYNMAPENTRY   paPages = pThis->paPages;
     1356    PPGMRZDYNMAPENTRY   paPages = pThis->paPages;
    12021357    uint32_t            iFreePage;
    12031358    if (!paPages[iPage].cRefs)
     
    12171372            if (paPages[iFreePage].HCPhys == HCPhys)
    12181373            {
    1219                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageSlowLoopHits);
     1374                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageSlowLoopHits);
     1375                *pfNew = false;
    12201376                return iFreePage;
    12211377            }
     
    12281384                return UINT32_MAX;
    12291385        }
    1230         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageSlowLoopMisses);
     1386        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageSlowLoopMisses);
    12311387#ifdef VBOX_WITH_STATISTICS
    12321388        fLooped = true;
     
    12401396        for (uint32_t iPage2 = (iPage + 3) % cPages; iPage2 != iPage; iPage2 = (iPage2 + 1) % cPages)
    12411397            if (paPages[iPage2].HCPhys == HCPhys)
    1242                 STAM_COUNTER_INC(&pVCpu->pgm.s.StatR0DynMapPageSlowLostHits);
     1398                STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZDynMapPageSlowLostHits);
    12431399#endif
    12441400
     
    12461402     * Setup the new entry.
    12471403     */
     1404    *pfNew = true;
    12481405    /*Log6(("pgmR0DynMapPageSlow: old - %RHp %#x %#llx\n", paPages[iFreePage].HCPhys, paPages[iFreePage].cRefs, paPages[iFreePage].uPte.pPae->u));*/
    12491406    paPages[iFreePage].HCPhys = HCPhys;
     1407#ifndef IN_RC
    12501408    RTCpuSetFill(&paPages[iFreePage].PendingSet);
     1409#endif
    12511410    if (pThis->fLegacyMode)
    12521411    {
     
    12861445 * @param   ppvPage     Where to the page address.
    12871446 */
    1288 DECLINLINE(uint32_t) pgmR0DynMapPage(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, int32_t iRealCpu, PVMCPU pVCpu, void **ppvPage)
    1289 {
    1290     RTSPINLOCKTMP       Tmp     = RTSPINLOCKTMP_INITIALIZER;
    1291     RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
     1447DECLINLINE(uint32_t) pgmR0DynMapPage(PPGMRZDYNMAP pThis, RTHCPHYS HCPhys, int32_t iRealCpu, PVMCPU pVCpu, void **ppvPage)
     1448{
     1449    PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
    12921450    AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
    1293     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPage);
     1451    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPage);
    12941452
    12951453    /*
     
    13011459     * to pgmR0DynMapPageSlow().
    13021460     */
     1461    bool                fNew    = false;
    13031462    uint32_t const      cPages  = pThis->cPages;
    13041463    uint32_t            iPage   = (HCPhys >> PAGE_SHIFT) % cPages;
    1305     PPGMR0DYNMAPENTRY   paPages = pThis->paPages;
     1464    PPGMRZDYNMAPENTRY   paPages = pThis->paPages;
    13061465    if (RT_LIKELY(paPages[iPage].HCPhys == HCPhys))
    1307         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageHits0);
     1466        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageHits0);
    13081467    else
    13091468    {
     
    13121471        {
    13131472            iPage = iPage2;
    1314             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageHits1);
     1473            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageHits1);
    13151474        }
    13161475        else
     
    13201479            {
    13211480                iPage = iPage2;
    1322                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageHits2);
     1481                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageHits2);
    13231482            }
    13241483            else
    13251484            {
    1326                 iPage = pgmR0DynMapPageSlow(pThis, HCPhys, iPage, pVCpu);
     1485                iPage = pgmR0DynMapPageSlow(pThis, HCPhys, iPage, pVCpu, &fNew);
    13271486                if (RT_UNLIKELY(iPage == UINT32_MAX))
    13281487                {
    1329                     RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     1488                    PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    13301489                    *ppvPage = NULL;
    13311490                    return iPage;
     
    13491508    {
    13501509        ASMAtomicDecS32(&paPages[iPage].cRefs);
    1351         RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     1510        PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    13521511        *ppvPage = NULL;
    13531512        AssertLogRelMsgFailedReturn(("cRefs=%d iPage=%p HCPhys=%RHp\n", cRefs, iPage, HCPhys), UINT32_MAX);
     
    13551514    void *pvPage = paPages[iPage].pvPage;
    13561515
     1516#ifndef IN_RC
    13571517    /*
    13581518     * Invalidate the entry?
     
    13611521    if (RT_UNLIKELY(fInvalidateIt))
    13621522        RTCpuSetDelByIndex(&paPages[iPage].PendingSet, iRealCpu);
    1363 
    1364     RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     1523#endif
     1524
     1525    PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    13651526
    13661527    /*
    13671528     * Do the actual invalidation outside the spinlock.
    13681529     */
     1530#ifdef IN_RC
     1531    if (RT_UNLIKELY(fNew))
     1532#else
    13691533    if (RT_UNLIKELY(fInvalidateIt))
    1370     {
    1371         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageInvlPg);
     1534#endif
     1535    {
     1536        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageInvlPg);
    13721537        ASMInvalidatePage(pvPage);
    13731538    }
     
    13831548 * @returns VBox status code.
    13841549 */
    1385 VMMR0DECL(int) PGMR0DynMapAssertIntegrity(void)
     1550static int pgmRZDynMapAssertIntegrity(PPGMRZDYNMAP pThis)
    13861551{
    13871552    /*
    13881553     * Basic pool stuff that doesn't require any lock, just assumes we're a user.
    13891554     */
    1390     PPGMR0DYNMAP        pThis       = g_pPGMR0DynMap;
    13911555    if (!pThis)
    13921556        return VINF_SUCCESS;
    13931557    AssertPtrReturn(pThis, VERR_INVALID_POINTER);
    1394     AssertReturn(pThis->u32Magic == PGMR0DYNMAP_MAGIC, VERR_INVALID_MAGIC);
     1558    AssertReturn(pThis->u32Magic == PGMRZDYNMAP_MAGIC, VERR_INVALID_MAGIC);
    13951559    if (!pThis->cUsers)
    13961560        return VERR_INVALID_PARAMETER;
     
    13981562
    13991563    int                 rc          = VINF_SUCCESS;
    1400     RTSPINLOCKTMP       Tmp         = RTSPINLOCKTMP_INITIALIZER;
    1401     RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
     1564    PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
    14021565
    14031566#define CHECK_RET(expr, a) \
     
    14051568        if (RT_UNLIKELY(!(expr))) \
    14061569        { \
    1407             RTSpinlockRelease(pThis->hSpinlock, &Tmp); \
     1570            PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); \
    14081571            RTAssertMsg1Weak(#expr, __LINE__, __FILE__, __PRETTY_FUNCTION__); \
    14091572            RTAssertMsg2Weak a; \
     
    14171580    uint32_t            cGuard      = 0;
    14181581    uint32_t            cLoad       = 0;
    1419     PPGMR0DYNMAPENTRY   paPages     = pThis->paPages;
     1582    PPGMRZDYNMAPENTRY   paPages     = pThis->paPages;
    14201583    uint32_t            iPage       = pThis->cPages;
    14211584    if (pThis->fLegacyMode)
    14221585    {
     1586#ifdef IN_RING0
    14231587        PCX86PGUINT     paSavedPTEs = (PCX86PGUINT)pThis->pvSavedPTEs; NOREF(paSavedPTEs);
     1588#endif
    14241589        while (iPage-- > 0)
    14251590        {
     
    14401605            {
    14411606                CHECK_RET(!(paPages[iPage].HCPhys & PAGE_OFFSET_MASK), ("#%u: %RHp\n", iPage, paPages[iPage].HCPhys));
    1442                 X86PGUINT uPte = (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
    1443                                | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
     1607                X86PGUINT uPte = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
     1608#ifdef IN_RING0
     1609                               | (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
     1610#endif
    14441611                               | (paPages[iPage].HCPhys & X86_PTE_PAE_PG_MASK);
    14451612                CHECK_RET(paPages[iPage].uPte.pLegacy->u == uPte,
     
    14481615                    cLoad++;
    14491616            }
     1617#ifdef IN_RING0
    14501618            else
    14511619                CHECK_RET(paPages[iPage].uPte.pLegacy->u == paSavedPTEs[iPage],
    14521620                          ("#%u: %#x %#x", iPage, paPages[iPage].uPte.pLegacy->u, paSavedPTEs[iPage]));
     1621#endif
    14531622        }
    14541623    }
    14551624    else
    14561625    {
     1626#ifdef IN_RING0
    14571627        PCX86PGPAEUINT  paSavedPTEs = (PCX86PGPAEUINT)pThis->pvSavedPTEs; NOREF(paSavedPTEs);
     1628#endif
    14581629        while (iPage-- > 0)
    14591630        {
     
    14741645            {
    14751646                CHECK_RET(!(paPages[iPage].HCPhys & PAGE_OFFSET_MASK), ("#%u: %RHp\n", iPage, paPages[iPage].HCPhys));
    1476                 X86PGPAEUINT uPte = (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
    1477                                   | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
     1647                X86PGPAEUINT uPte = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
     1648#ifdef IN_RING0
     1649                                  | (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
     1650#endif
    14781651                                  | (paPages[iPage].HCPhys & X86_PTE_PAE_PG_MASK);
    14791652                CHECK_RET(paPages[iPage].uPte.pPae->u == uPte,
     
    14821655                    cLoad++;
    14831656            }
     1657#ifdef IN_RING0
    14841658            else
    14851659                CHECK_RET(paPages[iPage].uPte.pPae->u == paSavedPTEs[iPage],
    14861660                          ("#%u: %#llx %#llx", iPage, paPages[iPage].uPte.pPae->u, paSavedPTEs[iPage]));
     1661#endif
    14871662        }
    14881663    }
     
    14921667
    14931668#undef CHECK_RET
    1494     RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     1669    PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    14951670    return VINF_SUCCESS;
    14961671}
     1672
     1673#ifdef IN_RING0
     1674/**
     1675 * Assert the the integrity of the pool.
     1676 *
     1677 * @returns VBox status code.
     1678 */
     1679VMMR0DECL(int) PGMR0DynMapAssertIntegrity(void)
     1680{
     1681    return pgmRZDynMapAssertIntegrity(g_pPGMR0DynMap);
     1682}
     1683#endif /* IN_RING0 */
     1684
     1685#ifdef IN_RC
     1686/**
     1687 * Assert the the integrity of the pool.
     1688 *
     1689 * @returns VBox status code.
     1690 */
     1691VMMRCDECL(int) PGMRCDynMapAssertIntegrity(PVM pVM)
     1692{
     1693    return pgmRZDynMapAssertIntegrity((PPGMRZDYNMAP)pVM->pgm.s.pRCDynMap);
     1694}
     1695#endif /* IN_RC */
     1696
     1697
     1698/**
     1699 * As a final resort for a (somewhat) full auto set or full cache, try merge
     1700 * duplicate entries and flush the ones we can.
     1701 *
     1702 * @param   pSet        The set.
     1703 */
     1704static void pgmDynMapOptimizeAutoSet(PPGMMAPSET pSet)
     1705{
     1706    LogFlow(("pgmDynMapOptimizeAutoSet\n"));
     1707
     1708    for (uint32_t i = 0 ; i < pSet->cEntries; i++)
     1709    {
     1710        /*
     1711         * Try merge entries.
     1712         */
     1713        uint16_t const  iPage = pSet->aEntries[i].iPage;
     1714        uint32_t        j     = i + 1;
     1715        while (   j < pSet->cEntries
     1716               && (   pSet->iSubset == UINT32_MAX
     1717                   || pSet->iSubset < pSet->cEntries) )
     1718        {
     1719            if (pSet->aEntries[j].iPage != iPage)
     1720                j++;
     1721            else
     1722            {
     1723                uint32_t const  cHardRefs    = (uint32_t)pSet->aEntries[i].cRefs
     1724                                             + (uint32_t)pSet->aEntries[j].cRefs;
     1725                uint32_t        cInlinedRefs = (uint32_t)pSet->aEntries[i].cInlinedRefs
     1726                                             + (uint32_t)pSet->aEntries[j].cInlinedRefs;
     1727                uint32_t        cUnrefs      = (uint32_t)pSet->aEntries[i].cUnrefs
     1728                                             + (uint32_t)pSet->aEntries[j].cUnrefs;
     1729                uint32_t        cSub         = RT_MIN(cUnrefs, cInlinedRefs);
     1730                cInlinedRefs -= cSub;
     1731                cUnrefs      -= cSub;
     1732
     1733                if (    cHardRefs    < UINT16_MAX
     1734                    &&  cInlinedRefs < UINT16_MAX
     1735                    &&  cUnrefs      < UINT16_MAX)
     1736                {
     1737                    /* merge j into i removing j. */
     1738                    Log2(("pgmDynMapOptimizeAutoSet: Merging #%u into #%u\n", j, i));
     1739                    pSet->aEntries[i].cRefs        = cHardRefs;
     1740                    pSet->aEntries[i].cInlinedRefs = cInlinedRefs;
     1741                    pSet->aEntries[i].cUnrefs      = cUnrefs;
     1742                    pSet->cEntries--;
     1743                    if (j < pSet->cEntries)
     1744                    {
     1745                        pSet->aEntries[j] = pSet->aEntries[pSet->cEntries];
     1746                        PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[pSet->cEntries]);
     1747                    }
     1748                    else
     1749                        PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[j]);
     1750                }
     1751#if 0 /* too complicated, skip it. */
     1752                else
     1753                {
     1754                    /* migrate the max number of refs from j into i and quit the inner loop. */
     1755                    uint32_t cMigrate = UINT16_MAX - 1 - pSet->aEntries[i].cRefs;
     1756                    Assert(pSet->aEntries[j].cRefs > cMigrate);
     1757                    pSet->aEntries[j].cRefs -= cMigrate;
     1758                    pSet->aEntries[i].cRefs = UINT16_MAX - 1;
     1759                    break;
     1760                }
     1761#endif
     1762            }
     1763        }
     1764
     1765        /*
     1766         * Try make use of the unused hinting (cUnrefs) to evict entries
     1767         * from both the set as well as the mapping cache.
     1768         */
     1769
     1770        uint32_t const cTotalRefs = (uint32_t)pSet->aEntries[i].cRefs + pSet->aEntries[i].cInlinedRefs;
     1771        Log2(("pgmDynMapOptimizeAutoSet: #%u/%u/%u pvPage=%p iPage=%u cRefs=%u cInlinedRefs=%u cUnrefs=%u cTotalRefs=%u\n",
     1772              i,
     1773              pSet->iSubset,
     1774              pSet->cEntries,
     1775              pSet->aEntries[i].pvPage,
     1776              pSet->aEntries[i].iPage,
     1777              pSet->aEntries[i].cRefs,
     1778              pSet->aEntries[i].cInlinedRefs,
     1779              pSet->aEntries[i].cUnrefs,
     1780              cTotalRefs));
     1781        Assert(cTotalRefs >= pSet->aEntries[i].cUnrefs);
     1782
     1783        if (    cTotalRefs == pSet->aEntries[i].cUnrefs
     1784            &&  (   pSet->iSubset == UINT32_MAX
     1785                 || pSet->iSubset < pSet->cEntries)
     1786           )
     1787        {
     1788            Log2(("pgmDynMapOptimizeAutoSet: Releasing iPage=%d/%p\n", pSet->aEntries[i].iPage, pSet->aEntries[i].pvPage));
     1789            //LogFlow(("pgmDynMapOptimizeAutoSet: Releasing iPage=%d/%p\n", pSet->aEntries[i].iPage, pSet->aEntries[i].pvPage));
     1790            pgmRZDynMapReleasePage(PGMRZDYNMAP_SET_2_DYNMAP(pSet),
     1791                                   pSet->aEntries[i].iPage,
     1792                                   pSet->aEntries[i].cRefs);
     1793            pSet->cEntries--;
     1794            if (i < pSet->cEntries)
     1795            {
     1796                pSet->aEntries[i] = pSet->aEntries[pSet->cEntries];
     1797                PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[pSet->cEntries]);
     1798            }
     1799
     1800            i--;
     1801        }
     1802    }
     1803}
     1804
     1805
    14971806
    14981807
     
    15051814 * @param   pVCpu       The shared data for the current virtual CPU.
    15061815 */
    1507 VMMDECL(void) PGMDynMapStartAutoSet(PVMCPU pVCpu)
    1508 {
     1816VMMDECL(void) PGMRZDynMapStartAutoSet(PVMCPU pVCpu)
     1817{
     1818    LogFlow(("PGMRZDynMapStartAutoSet:\n"));
    15091819    Assert(pVCpu->pgm.s.AutoSet.cEntries == PGMMAPSET_CLOSED);
    15101820    Assert(pVCpu->pgm.s.AutoSet.iSubset == UINT32_MAX);
    15111821    pVCpu->pgm.s.AutoSet.cEntries = 0;
    1512     pVCpu->pgm.s.AutoSet.iCpu = RTMpCpuIdToSetIndex(RTMpCpuId());
    1513 }
    1514 
    1515 
     1822    pVCpu->pgm.s.AutoSet.iCpu = PGMRZDYNMAP_CUR_CPU();
     1823}
     1824
     1825
     1826#ifdef IN_RING0
    15161827/**
    15171828 * Starts or migrates the autoset of a virtual CPU.
     
    15261837 * @thread  EMT
    15271838 */
    1528 VMMDECL(bool) PGMDynMapStartOrMigrateAutoSet(PVMCPU pVCpu)
     1839VMMR0DECL(bool) PGMR0DynMapStartOrMigrateAutoSet(PVMCPU pVCpu)
    15291840{
    15301841    bool fStartIt = pVCpu->pgm.s.AutoSet.cEntries == PGMMAPSET_CLOSED;
    15311842    if (fStartIt)
    1532         PGMDynMapStartAutoSet(pVCpu);
     1843        PGMRZDynMapStartAutoSet(pVCpu);
    15331844    else
    1534         PGMDynMapMigrateAutoSet(pVCpu);
     1845        PGMR0DynMapMigrateAutoSet(pVCpu);
    15351846    return fStartIt;
    15361847}
     1848#endif /* IN_RING0 */
    15371849
    15381850
     
    15511863        &&  RT_LIKELY(cEntries <= RT_ELEMENTS(pSet->aEntries)))
    15521864    {
    1553         PPGMR0DYNMAP    pThis   = g_pPGMR0DynMap;
    1554         RTSPINLOCKTMP   Tmp     = RTSPINLOCKTMP_INITIALIZER;
    1555         RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
     1865        PPGMRZDYNMAP    pThis   = PGMRZDYNMAP_SET_2_DYNMAP(pSet);
     1866        PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
    15561867
    15571868        uint32_t i = cEntries;
     
    15621873            int32_t  cRefs = pSet->aEntries[i].cRefs;
    15631874            Assert(cRefs > 0);
    1564             pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs);
    1565 
    1566             pSet->aEntries[i].iPage = UINT16_MAX;
    1567             pSet->aEntries[i].cRefs = 0;
     1875            pgmRZDynMapReleasePageLocked(pThis, iPage, cRefs);
     1876
     1877            PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[i]);
    15681878        }
    15691879
    15701880        Assert(pThis->cLoad <= pThis->cPages - pThis->cGuardPages);
    1571         RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     1881        PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    15721882    }
    15731883}
     
    15801890 * @param   pVCpu       The shared data for the current virtual CPU.
    15811891 */
    1582 VMMDECL(void) PGMDynMapReleaseAutoSet(PVMCPU pVCpu)
     1892VMMDECL(void) PGMRZDynMapReleaseAutoSet(PVMCPU pVCpu)
    15831893{
    15841894    PPGMMAPSET  pSet = &pVCpu->pgm.s.AutoSet;
     
    15931903    pSet->iCpu = -1;
    15941904
    1595     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
     1905#ifdef IN_RC
     1906    if (RT_ELEMENTS(pSet->aEntries) > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)
     1907        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / (MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)) % 11]);
     1908    else
     1909#endif
     1910        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
    15961911    AssertMsg(cEntries < PGMMAPSET_MAX_FILL, ("%u\n", cEntries));
    15971912    if (cEntries > RT_ELEMENTS(pSet->aEntries) * 50 / 100)
    1598         Log(("PGMDynMapReleaseAutoSet: cEntries=%d\n", pSet->cEntries));
     1913        Log(("PGMRZDynMapReleaseAutoSet: cEntries=%d\n", cEntries));
     1914    else
     1915        LogFlow(("PGMRZDynMapReleaseAutoSet: cEntries=%d\n", cEntries));
    15991916
    16001917    pgmDynMapFlushAutoSetWorker(pSet, cEntries);
     
    16071924 * @param   pVCpu       The shared data for the current virtual CPU.
    16081925 */
    1609 VMMDECL(void) PGMDynMapFlushAutoSet(PVMCPU pVCpu)
     1926VMMDECL(void) PGMRZDynMapFlushAutoSet(PVMCPU pVCpu)
    16101927{
    16111928    PPGMMAPSET  pSet = &pVCpu->pgm.s.AutoSet;
    1612     AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags()));
     1929    AssertMsg(pSet->iCpu == PGMRZDYNMAP_CUR_CPU(), ("%d %d efl=%#x\n", pSet->iCpu, PGMRZDYNMAP_CUR_CPU(), ASMGetFlags()));
    16131930
    16141931    /*
     
    16171934    uint32_t cEntries = pSet->cEntries;
    16181935    AssertReturnVoid(cEntries != PGMMAPSET_CLOSED);
    1619     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
     1936#ifdef IN_RC
     1937    if (RT_ELEMENTS(pSet->aEntries) > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)
     1938        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / (MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)) % 11]);
     1939    else
     1940#endif
     1941        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
    16201942    if (cEntries >= RT_ELEMENTS(pSet->aEntries) * 45 / 100)
    16211943    {
     
    16261948
    16271949        pgmDynMapFlushAutoSetWorker(pSet, cEntries);
    1628         AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags()));
    1629     }
    1630 }
    1631 
    1632 
     1950        AssertMsg(pSet->iCpu == PGMRZDYNMAP_CUR_CPU(), ("%d %d efl=%#x\n", pSet->iCpu, PGMRZDYNMAP_CUR_CPU(), ASMGetFlags()));
     1951    }
     1952}
     1953
     1954
     1955#ifndef IN_RC
    16331956/**
    16341957 * Migrates the automatic mapping set of the current vCPU if it's active and
     
    16441967 * @thread  EMT
    16451968 */
    1646 VMMDECL(void) PGMDynMapMigrateAutoSet(PVMCPU pVCpu)
    1647 {
     1969VMMR0DECL(void) PGMR0DynMapMigrateAutoSet(PVMCPU pVCpu)
     1970{
     1971    LogFlow(("PGMR0DynMapMigrateAutoSet\n"));
    16481972    PPGMMAPSET      pSet     = &pVCpu->pgm.s.AutoSet;
    1649     int32_t         iRealCpu = RTMpCpuIdToSetIndex(RTMpCpuId());
     1973    int32_t         iRealCpu = PGMRZDYNMAP_CUR_CPU();
    16501974    if (pSet->iCpu != iRealCpu)
    16511975    {
     
    16561980            if (i != 0 && RT_LIKELY(i <= RT_ELEMENTS(pSet->aEntries)))
    16571981            {
    1658                 PPGMR0DYNMAP    pThis  = g_pPGMR0DynMap;
    1659                 RTSPINLOCKTMP   Tmp    = RTSPINLOCKTMP_INITIALIZER;
    1660                 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
     1982                PPGMRZDYNMAP    pThis  = PGMRZDYNMAP_SET_2_DYNMAP(pSet);
     1983                PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
    16611984
    16621985                while (i-- > 0)
     
    16681991                    {
    16691992                        RTCpuSetDelByIndex(&pThis->paPages[iPage].PendingSet, iRealCpu);
    1670                         RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     1993                        PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    16711994
    16721995                        ASMInvalidatePage(pThis->paPages[iPage].pvPage);
    1673                         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapMigrateInvlPg);
    1674 
    1675                         RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
     1996                        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapMigrateInvlPg);
     1997
     1998                        PGMRZDYNMAP_SPINLOCK_REACQUIRE(pThis);
    16761999                    }
    16772000                }
    16782001
    1679                 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     2002                PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    16802003            }
    16812004        }
     
    16832006    }
    16842007}
     2008#endif /* !IN_RC */
    16852009
    16862010
     
    17062030        pSet->cEntries = iSubset;
    17072031
    1708         PPGMR0DYNMAP    pThis = g_pPGMR0DynMap;
    1709         RTSPINLOCKTMP   Tmp   = RTSPINLOCKTMP_INITIALIZER;
    1710         RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
     2032        PPGMRZDYNMAP    pThis = PGMRZDYNMAP_SET_2_DYNMAP(pSet);
     2033        PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
    17112034
    17122035        while (i-- > iSubset)
     
    17162039            int32_t  cRefs = pSet->aEntries[i].cRefs;
    17172040            Assert(cRefs > 0);
    1718             pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs);
    1719 
    1720             pSet->aEntries[i].iPage = UINT16_MAX;
    1721             pSet->aEntries[i].cRefs = 0;
    1722         }
    1723 
    1724         RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     2041            pgmRZDynMapReleasePageLocked(pThis, iPage, cRefs);
     2042
     2043            PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[i]);
     2044        }
     2045
     2046        PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    17252047    }
    17262048}
     
    17382060 *
    17392061 * @returns The index of the previous subset. Pass this to
    1740  *        PGMDynMapPopAutoSubset when poping it.
     2062 *          PGMDynMapPopAutoSubset when popping it.
    17412063 * @param   pVCpu           Pointer to the virtual cpu data.
    17422064 */
    1743 VMMDECL(uint32_t) PGMDynMapPushAutoSubset(PVMCPU pVCpu)
     2065VMMDECL(uint32_t) PGMRZDynMapPushAutoSubset(PVMCPU pVCpu)
    17442066{
    17452067    PPGMMAPSET      pSet = &pVCpu->pgm.s.AutoSet;
    17462068    AssertReturn(pSet->cEntries != PGMMAPSET_CLOSED, UINT32_MAX);
    17472069    uint32_t        iPrevSubset = pSet->iSubset;
    1748     LogFlow(("PGMDynMapPushAutoSubset: pVCpu=%p iPrevSubset=%u\n", pVCpu, iPrevSubset));
     2070    LogFlow(("PGMRZDynMapPushAutoSubset: pVCpu=%p iPrevSubset=%u\n", pVCpu, iPrevSubset));
     2071
     2072#ifdef IN_RC
     2073    /* kludge */
     2074    if (pSet->cEntries > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE / 2)
     2075    {
     2076        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetOptimize);
     2077        pgmDynMapOptimizeAutoSet(pSet);
     2078    }
     2079#endif
    17492080
    17502081    pSet->iSubset = pSet->cEntries;
    1751     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapSubsets);
     2082    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSubsets);
     2083
    17522084    return iPrevSubset;
    17532085}
     
    17602092 * @param   iPrevSubset     What PGMDynMapPushAutoSubset returned.
    17612093 */
    1762 VMMDECL(void) PGMDynMapPopAutoSubset(PVMCPU pVCpu, uint32_t iPrevSubset)
     2094VMMDECL(void) PGMRZDynMapPopAutoSubset(PVMCPU pVCpu, uint32_t iPrevSubset)
    17632095{
    17642096    PPGMMAPSET      pSet = &pVCpu->pgm.s.AutoSet;
    17652097    uint32_t        cEntries = pSet->cEntries;
    1766     LogFlow(("PGMDynMapPopAutoSubset: pVCpu=%p iPrevSubset=%u iSubset=%u cEntries=%u\n", pVCpu, iPrevSubset, pSet->iSubset, cEntries));
     2098    LogFlow(("PGMRZDynMapPopAutoSubset: pVCpu=%p iPrevSubset=%u iSubset=%u cEntries=%u\n", pVCpu, iPrevSubset, pSet->iSubset, cEntries));
    17672099    AssertReturnVoid(cEntries != PGMMAPSET_CLOSED);
    17682100    AssertReturnVoid(pSet->iSubset >= iPrevSubset || iPrevSubset == UINT32_MAX);
    1769     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
     2101#ifdef IN_RC
     2102    if (RT_ELEMENTS(pSet->aEntries) > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)
     2103        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / (MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)) % 11]);
     2104    else
     2105#endif
     2106        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
    17702107    if (    cEntries >= RT_ELEMENTS(pSet->aEntries) * 40 / 100
    17712108        &&  cEntries != pSet->iSubset)
     
    17792116
    17802117/**
    1781  * As a final resort for a full auto set, try merge duplicate entries.
    1782  *
    1783  * @param   pSet        The set.
    1784  */
    1785 static void pgmDynMapOptimizeAutoSet(PPGMMAPSET pSet)
    1786 {
    1787     for (uint32_t i = 0 ; i < pSet->cEntries; i++)
    1788     {
    1789         uint16_t const  iPage = pSet->aEntries[i].iPage;
    1790         uint32_t        j     = i + 1;
    1791         while (j < pSet->cEntries)
    1792         {
    1793             if (pSet->aEntries[j].iPage != iPage)
    1794                 j++;
    1795             else if ((uint32_t)pSet->aEntries[i].cRefs + (uint32_t)pSet->aEntries[j].cRefs < UINT16_MAX)
    1796             {
    1797                 /* merge j into i removing j. */
    1798                 pSet->aEntries[i].cRefs += pSet->aEntries[j].cRefs;
    1799                 pSet->cEntries--;
    1800                 if (j < pSet->cEntries)
     2118 * Indicates that the given page is unused and its mapping can be re-used.
     2119 *
     2120 * @param   pVCpu           The current CPU.
     2121 * @param   pvHint          The page that is now unused.  This does not have to
     2122 *                          point at the start of the page.  NULL is ignored.
     2123 */
     2124#ifdef LOG_ENABLED
     2125void pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint, RT_SRC_POS_DECL)
     2126#else
     2127void pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint)
     2128#endif
     2129{
     2130    /*
     2131     * Ignore NULL pointers and mask off the page offset bits.
     2132     */
     2133    if (pvHint == NULL)
     2134        return;
     2135    pvHint = (void *)((uintptr_t)pvHint & ~(uintptr_t)PAGE_OFFSET_MASK);
     2136
     2137    PPGMMAPSET  pSet    = &pVCpu->pgm.s.AutoSet;
     2138    uint32_t    iEntry  = pSet->cEntries;
     2139    AssertReturnVoid(iEntry > 0);
     2140
     2141    /*
     2142     * Find the entry in the usual unrolled fashion.
     2143     */
     2144#define IS_MATCHING_ENTRY(pSet, iEntry, pvHint) \
     2145        (   (pSet)->aEntries[(iEntry)].pvPage == (pvHint) \
     2146         &&   (uint32_t)(pSet)->aEntries[(iEntry)].cRefs + (pSet)->aEntries[(iEntry)].cInlinedRefs \
     2147            > (pSet)->aEntries[(iEntry)].cUnrefs )
     2148    if (     iEntry >= 1 && IS_MATCHING_ENTRY(pSet, iEntry - 1, pvHint))
     2149        iEntry = iEntry - 1;
     2150    else if (iEntry >= 2 && IS_MATCHING_ENTRY(pSet, iEntry - 2, pvHint))
     2151        iEntry = iEntry - 2;
     2152    else if (iEntry >= 3 && IS_MATCHING_ENTRY(pSet, iEntry - 3, pvHint))
     2153        iEntry = iEntry - 3;
     2154    else if (iEntry >= 4 && IS_MATCHING_ENTRY(pSet, iEntry - 4, pvHint))
     2155        iEntry = iEntry - 4;
     2156    else if (iEntry >= 5 && IS_MATCHING_ENTRY(pSet, iEntry - 5, pvHint))
     2157        iEntry = iEntry - 5;
     2158    else if (iEntry >= 6 && IS_MATCHING_ENTRY(pSet, iEntry - 6, pvHint))
     2159        iEntry = iEntry - 6;
     2160    else if (iEntry >= 7 && IS_MATCHING_ENTRY(pSet, iEntry - 7, pvHint))
     2161        iEntry = iEntry - 7;
     2162    else
     2163    {
     2164        /*
     2165         * Loop till we find it.
     2166         */
     2167        bool fFound = false;
     2168        if (iEntry > 7)
     2169        {
     2170            iEntry -= 7;
     2171            while (iEntry-- > 0)
     2172                if (IS_MATCHING_ENTRY(pSet, iEntry, pvHint))
    18012173                {
    1802                     pSet->aEntries[j] = pSet->aEntries[pSet->cEntries];
    1803                     pSet->aEntries[pSet->cEntries].iPage = UINT16_MAX;
    1804                     pSet->aEntries[pSet->cEntries].cRefs = 0;
     2174                    fFound = true;
     2175                    break;
    18052176                }
    1806                 else
    1807                 {
    1808                     pSet->aEntries[j].iPage = UINT16_MAX;
    1809                     pSet->aEntries[j].cRefs = 0;
    1810                 }
    1811             }
    1812             else
    1813             {
    1814                 /* migrate the max number of refs from j into i and quit the inner loop. */
    1815                 uint32_t cMigrate = UINT16_MAX - 1 - pSet->aEntries[i].cRefs;
    1816                 Assert(pSet->aEntries[j].cRefs > cMigrate);
    1817                 pSet->aEntries[j].cRefs -= cMigrate;
    1818                 pSet->aEntries[i].cRefs = UINT16_MAX - 1;
    1819                 break;
    1820             }
    1821         }
    1822     }
    1823 }
    1824 
    1825 
    1826 /**
    1827  * Common worker code for PGMDynMapHCPhys, pgmR0DynMapHCPageInlined and
    1828  * pgmR0DynMapGCPageInlined.
     2177        }
     2178        AssertMsgReturnVoid(fFound,
     2179                            ("pvHint=%p cEntries=%#x iSubset=%#x\n"
     2180                             "aEntries[0] = {%#x, %#x, %#x, %#x, %p}\n"
     2181                             "aEntries[1] = {%#x, %#x, %#x, %#x, %p}\n"
     2182                             "aEntries[2] = {%#x, %#x, %#x, %#x, %p}\n"
     2183                             "aEntries[3] = {%#x, %#x, %#x, %#x, %p}\n"
     2184                             "aEntries[4] = {%#x, %#x, %#x, %#x, %p}\n"
     2185                             "aEntries[5] = {%#x, %#x, %#x, %#x, %p}\n"
     2186                             ,
     2187                             pvHint, pSet->cEntries, pSet->iSubset,
     2188                             pSet->aEntries[0].iPage, pSet->aEntries[0].cRefs, pSet->aEntries[0].cInlinedRefs, pSet->aEntries[0].cUnrefs, pSet->aEntries[0].pvPage,
     2189                             pSet->aEntries[1].iPage, pSet->aEntries[1].cRefs, pSet->aEntries[1].cInlinedRefs, pSet->aEntries[1].cUnrefs, pSet->aEntries[1].pvPage,
     2190                             pSet->aEntries[2].iPage, pSet->aEntries[2].cRefs, pSet->aEntries[2].cInlinedRefs, pSet->aEntries[2].cUnrefs, pSet->aEntries[2].pvPage,
     2191                             pSet->aEntries[3].iPage, pSet->aEntries[3].cRefs, pSet->aEntries[3].cInlinedRefs, pSet->aEntries[3].cUnrefs, pSet->aEntries[3].pvPage,
     2192                             pSet->aEntries[4].iPage, pSet->aEntries[4].cRefs, pSet->aEntries[4].cInlinedRefs, pSet->aEntries[4].cUnrefs, pSet->aEntries[4].pvPage,
     2193                             pSet->aEntries[5].iPage, pSet->aEntries[5].cRefs, pSet->aEntries[5].cInlinedRefs, pSet->aEntries[5].cUnrefs, pSet->aEntries[5].pvPage));
     2194    }
     2195#undef IS_MATCHING_ENTRY
     2196
     2197    /*
     2198     * Update it.
     2199     */
     2200    uint32_t const  cTotalRefs = (uint32_t)pSet->aEntries[iEntry].cRefs + pSet->aEntries[iEntry].cInlinedRefs;
     2201    uint32_t const  cUnrefs    = pSet->aEntries[iEntry].cUnrefs;
     2202    LogFlow(("pgmRZDynMapUnusedHint: pvHint=%p #%u cRefs=%d cInlinedRefs=%d cUnrefs=%d (+1) cTotalRefs=%d %s(%d) %s\n",
     2203             pvHint, iEntry, pSet->aEntries[iEntry].cRefs, pSet->aEntries[iEntry].cInlinedRefs, cUnrefs, cTotalRefs, pszFile, iLine, pszFunction));
     2204    AssertReturnVoid(cTotalRefs > cUnrefs);
     2205
     2206    if (RT_LIKELY(cUnrefs < UINT16_MAX - 1))
     2207        pSet->aEntries[iEntry].cUnrefs++;
     2208    else if (pSet->aEntries[iEntry].cInlinedRefs)
     2209    {
     2210        uint32_t cSub = RT_MIN(pSet->aEntries[iEntry].cInlinedRefs, pSet->aEntries[iEntry].cUnrefs);
     2211        pSet->aEntries[iEntry].cInlinedRefs -= cSub;
     2212        pSet->aEntries[iEntry].cUnrefs      -= cSub;
     2213        pSet->aEntries[iEntry].cUnrefs++;
     2214    }
     2215    else
     2216        Log(("pgmRZDynMapUnusedHint: pvHint=%p ignored because of overflow! %s(%d) %s\n", pvHint, pszFile, iLine, pszFunction));
     2217}
     2218
     2219
     2220/**
     2221 * Common worker code for pgmRZDynMapHCPageInlined, pgmRZDynMapHCPageV2Inlined
     2222 * and pgmR0DynMapGCPageOffInlined.
    18292223 *
    18302224 * @returns VINF_SUCCESS, bails out to ring-3 on failure.
     
    18352229 * @remarks This is a very hot path.
    18362230 */
    1837 int pgmR0DynMapHCPageCommon(PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv)
    1838 {
    1839     LogFlow(("pgmR0DynMapHCPageCommon: pSet=%p HCPhys=%RHp ppv=%p\n", pSet, HCPhys, ppv));
    1840     AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags()));
    1841     PVMCPU pVCpu = PGMR0DYNMAP_2_VMCPU(pSet);
     2231int pgmRZDynMapHCPageCommon(PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
     2232{
     2233    AssertMsg(pSet->iCpu == PGMRZDYNMAP_CUR_CPU(), ("%d %d efl=%#x\n", pSet->iCpu, PGMRZDYNMAP_CUR_CPU(), ASMGetFlags()));
     2234    PVMCPU pVCpu = PGMRZDYNMAP_SET_2_VMCPU(pSet);
     2235    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a);
    18422236
    18432237    /*
    18442238     * Map it.
    18452239     */
    1846     void *pvPage;
    1847     uint32_t const  iPage = pgmR0DynMapPage(g_pPGMR0DynMap, HCPhys, pSet->iCpu, pVCpu, &pvPage);
     2240    void           *pvPage;
     2241    PPGMRZDYNMAP    pThis = PGMRZDYNMAP_SET_2_DYNMAP(pSet);
     2242    uint32_t        iPage = pgmR0DynMapPage(pThis, HCPhys, pSet->iCpu, pVCpu, &pvPage);
    18482243    if (RT_UNLIKELY(iPage == UINT32_MAX))
    18492244    {
    1850         RTAssertMsg2Weak("PGMDynMapHCPage: cLoad=%u/%u cPages=%u cGuardPages=%u\n",
    1851                          g_pPGMR0DynMap->cLoad, g_pPGMR0DynMap->cMaxLoad, g_pPGMR0DynMap->cPages, g_pPGMR0DynMap->cGuardPages);
    1852         if (!g_fPGMR0DynMapTestRunning)
    1853             VMMRZCallRing3NoCpu(PGMR0DYNMAP_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0);
    1854         *ppv = NULL;
    1855         return VERR_PGM_DYNMAP_FAILED;
     2245        /*
     2246         * We're out of mapping space, optimize our set to try remedy the
     2247         * situation.  (Only works if there are unreference hints.)
     2248         */
     2249        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetOptimize);
     2250        pgmDynMapOptimizeAutoSet(pSet);
     2251
     2252        iPage = pgmR0DynMapPage(pThis, HCPhys, pSet->iCpu, pVCpu, &pvPage);
     2253        if (RT_UNLIKELY(iPage == UINT32_MAX))
     2254        {
     2255            RTAssertMsg2Weak("pgmRZDynMapHCPageCommon: cLoad=%u/%u cPages=%u cGuardPages=%u\n",
     2256                             pThis->cLoad, pThis->cMaxLoad, pThis->cPages, pThis->cGuardPages);
     2257            if (!g_fPGMR0DynMapTestRunning)
     2258                VMMRZCallRing3NoCpu(PGMRZDYNMAP_SET_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0);
     2259            *ppv = NULL;
     2260            STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a);
     2261            return VERR_PGM_DYNMAP_FAILED;
     2262        }
    18562263    }
    18572264
     
    18692276    {
    18702277        unsigned iEntry = pSet->cEntries++;
    1871         pSet->aEntries[iEntry].cRefs  = 1;
    1872         pSet->aEntries[iEntry].iPage  = iPage;
    1873         pSet->aEntries[iEntry].pvPage = pvPage;
    1874         pSet->aEntries[iEntry].HCPhys = HCPhys;
     2278        pSet->aEntries[iEntry].cRefs        = 1;
     2279        pSet->aEntries[iEntry].cUnrefs      = 0;
     2280        pSet->aEntries[iEntry].cInlinedRefs = 0;
     2281        pSet->aEntries[iEntry].iPage        = iPage;
     2282        pSet->aEntries[iEntry].pvPage       = pvPage;
     2283        pSet->aEntries[iEntry].HCPhys       = HCPhys;
    18752284        pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry;
     2285        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/0/0 iPage=%#x  [a] %s(%d) %s\n",
     2286                 pSet, HCPhys, iEntry, iEntry + 1, pvPage, 1, iPage, pszFile, iLine, pszFunction));
    18762287    }
    18772288    /* Any of the last 5 pages? */
    18782289    else if (   pSet->aEntries[i - 0].iPage == iPage
    18792290             && pSet->aEntries[i - 0].cRefs < UINT16_MAX - 1)
     2291    {
    18802292        pSet->aEntries[i - 0].cRefs++;
     2293        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [0] %s(%d) %s\n", pSet, HCPhys, i - 0, pSet->cEntries, pvPage, pSet->aEntries[i - 0].cRefs, pSet->aEntries[i - 0].cInlinedRefs, pSet->aEntries[i - 0].cUnrefs, iPage, pszFile, iLine, pszFunction));
     2294    }
    18812295    else if (   pSet->aEntries[i - 1].iPage == iPage
    18822296             && pSet->aEntries[i - 1].cRefs < UINT16_MAX - 1)
     2297    {
    18832298        pSet->aEntries[i - 1].cRefs++;
     2299        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [1] %s(%d) %s\n", pSet, HCPhys, i - 1, pSet->cEntries, pvPage, pSet->aEntries[i - 1].cRefs, pSet->aEntries[i - 1].cInlinedRefs, pSet->aEntries[i - 1].cUnrefs, iPage, pszFile, iLine, pszFunction));
     2300    }
    18842301    else if (   pSet->aEntries[i - 2].iPage == iPage
    18852302             && pSet->aEntries[i - 2].cRefs < UINT16_MAX - 1)
     2303    {
    18862304        pSet->aEntries[i - 2].cRefs++;
     2305        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [2] %s(%d) %s\n", pSet, HCPhys, i - 2, pSet->cEntries, pvPage, pSet->aEntries[i - 2].cRefs, pSet->aEntries[i - 2].cInlinedRefs, pSet->aEntries[i - 2].cUnrefs, iPage, pszFile, iLine, pszFunction));
     2306    }
    18872307    else if (   pSet->aEntries[i - 3].iPage == iPage
    18882308             && pSet->aEntries[i - 3].cRefs < UINT16_MAX - 1)
     2309    {
    18892310        pSet->aEntries[i - 3].cRefs++;
     2311        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [4] %s(%d) %s\n", pSet, HCPhys, i - 3, pSet->cEntries, pvPage, pSet->aEntries[i - 3].cRefs, pSet->aEntries[i - 3].cInlinedRefs, pSet->aEntries[i - 3].cUnrefs, iPage, pszFile, iLine, pszFunction));
     2312    }
    18902313    else if (   pSet->aEntries[i - 4].iPage == iPage
    18912314             && pSet->aEntries[i - 4].cRefs < UINT16_MAX - 1)
     2315    {
    18922316        pSet->aEntries[i - 4].cRefs++;
     2317        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [4] %s(%d) %s\n", pSet, HCPhys, i - 4, pSet->cEntries, pvPage, pSet->aEntries[i - 4].cRefs, pSet->aEntries[i - 4].cInlinedRefs, pSet->aEntries[i - 4].cUnrefs, iPage, pszFile, iLine, pszFunction));
     2318    }
    18932319    /* Don't bother searching unless we're above a 60% load. */
    18942320    else if (RT_LIKELY(i <= (int32_t)RT_ELEMENTS(pSet->aEntries) * 60 / 100))
    18952321    {
    18962322        unsigned iEntry = pSet->cEntries++;
    1897         pSet->aEntries[iEntry].cRefs  = 1;
    1898         pSet->aEntries[iEntry].iPage  = iPage;
    1899         pSet->aEntries[iEntry].pvPage = pvPage;
    1900         pSet->aEntries[iEntry].HCPhys = HCPhys;
     2323        pSet->aEntries[iEntry].cRefs        = 1;
     2324        pSet->aEntries[iEntry].cUnrefs      = 0;
     2325        pSet->aEntries[iEntry].cInlinedRefs = 0;
     2326        pSet->aEntries[iEntry].iPage        = iPage;
     2327        pSet->aEntries[iEntry].pvPage       = pvPage;
     2328        pSet->aEntries[iEntry].HCPhys       = HCPhys;
    19012329        pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry;
     2330        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=1/0/0 iPage=%#x [b] %s(%d) %s\n", pSet, HCPhys, iEntry, pSet->cEntries, pvPage, iPage, pszFile, iLine, pszFunction));
    19022331    }
    19032332    else
     
    19112340            {
    19122341                pSet->aEntries[i].cRefs++;
    1913                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapSetSearchHits);
     2342                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetSearchHits);
     2343                LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [c] %s(%d) %s\n", pSet, HCPhys, i, pSet->cEntries, pvPage, pSet->aEntries[i].cRefs, pSet->aEntries[i].cInlinedRefs, pSet->aEntries[i].cUnrefs, iPage, pszFile, iLine, pszFunction));
    19142344                break;
    19152345            }
    19162346        if (i < 0)
    19172347        {
    1918             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapSetSearchMisses);
     2348            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetSearchMisses);
    19192349            if (pSet->iSubset < pSet->cEntries)
    19202350            {
    1921                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapSetSearchFlushes);
    1922                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(pSet->cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
     2351                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetSearchFlushes);
     2352                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(pSet->cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
    19232353                AssertMsg(pSet->cEntries < PGMMAPSET_MAX_FILL, ("%u\n", pSet->cEntries));
    19242354                pgmDynMapFlushSubset(pSet);
     
    19272357            if (RT_UNLIKELY(pSet->cEntries >= RT_ELEMENTS(pSet->aEntries)))
    19282358            {
    1929                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapSetOptimize);
     2359                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetOptimize);
    19302360                pgmDynMapOptimizeAutoSet(pSet);
    19312361            }
     
    19342364            {
    19352365                unsigned iEntry = pSet->cEntries++;
    1936                 pSet->aEntries[iEntry].cRefs  = 1;
    1937                 pSet->aEntries[iEntry].iPage  = iPage;
    1938                 pSet->aEntries[iEntry].pvPage = pvPage;
    1939                 pSet->aEntries[iEntry].HCPhys = HCPhys;
     2366                pSet->aEntries[iEntry].cRefs        = 1;
     2367                pSet->aEntries[iEntry].cUnrefs      = 0;
     2368                pSet->aEntries[iEntry].cInlinedRefs = 0;
     2369                pSet->aEntries[iEntry].iPage        = iPage;
     2370                pSet->aEntries[iEntry].pvPage       = pvPage;
     2371                pSet->aEntries[iEntry].HCPhys       = HCPhys;
    19402372                pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry;
     2373                LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=1/0/0 iPage=%#x [d] %s(%d) %s\n", pSet, HCPhys, iEntry, pSet->cEntries, pvPage, iPage, pszFile, iLine, pszFunction));
    19412374            }
    19422375            else
    19432376            {
    19442377                /* We're screwed. */
    1945                 pgmR0DynMapReleasePage(g_pPGMR0DynMap, iPage, 1);
    1946 
    1947                 RTAssertMsg2Weak("PGMDynMapHCPage: set is full!\n");
     2378                pgmRZDynMapReleasePage(pThis, iPage, 1);
     2379
     2380                RTAssertMsg2Weak("pgmRZDynMapHCPageCommon: set is full!\n");
    19482381                if (!g_fPGMR0DynMapTestRunning)
    1949                     VMMRZCallRing3NoCpu(PGMR0DYNMAP_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0);
     2382                    VMMRZCallRing3NoCpu(PGMRZDYNMAP_SET_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0);
    19502383                *ppv = NULL;
     2384                STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a);
    19512385                return VERR_PGM_DYNMAP_FULL_SET;
    19522386            }
     
    19552389
    19562390    *ppv = pvPage;
     2391    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a);
    19572392    return VINF_SUCCESS;
    19582393}
    1959 
    1960 
    1961 #if 0 /* Not used in R0, should internalized the other PGMDynMapHC/GCPage too. */
    1962 /* documented elsewhere - a bit of a mess. */
    1963 VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
    1964 {
    1965 #ifdef VBOX_WITH_STATISTICS
    1966     PVMCPU pVCpu = VMMGetCpu(pVM);
    1967 #endif
    1968     /*
    1969      * Validate state.
    1970      */
    1971     STAM_PROFILE_START(&pVCpu->pgm.s.StatR0DynMapHCPage, a);
    1972     AssertPtr(ppv);
    1973     AssertMsg(pVM->pgm.s.pvR0DynMapUsed == g_pPGMR0DynMap,
    1974               ("%p != %p\n", pVM->pgm.s.pvR0DynMapUsed, g_pPGMR0DynMap));
    1975     AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
    1976     PVMCPU          pVCpu   = VMMGetCpu(pVM);
    1977     AssertPtr(pVCpu);
    1978     PPGMMAPSET      pSet    = &pVCpu->pgm.s.AutoSet;
    1979     AssertMsg(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries),
    1980               ("%#x (%u)\n", pSet->cEntries, pSet->cEntries));
    1981 
    1982     /*
    1983      * Call common code.
    1984      */
    1985     int rc = pgmR0DynMapHCPageCommon(pSet, HCPhys, ppv);
    1986 
    1987     STAM_PROFILE_STOP(&pVCpu->pgm.s.StatR0DynMapHCPage, a);
    1988     return rc;
    1989 }
    1990 #endif
    19912394
    19922395
     
    20252428{
    20262429    LogRel(("pgmR0DynMapTest: ****** START ******\n"));
    2027     PPGMR0DYNMAP    pThis = g_pPGMR0DynMap;
    20282430    PPGMMAPSET      pSet  = &pVM->aCpus[0].pgm.s.AutoSet;
     2431    PPGMRZDYNMAP    pThis = PGMRZDYNMAP_SET_2_DYNMAP(pSet);
    20292432    uint32_t        i;
    20302433
     
    20472450    LogRel(("Test #1\n"));
    20482451    ASMIntDisable();
    2049     PGMDynMapStartAutoSet(&pVM->aCpus[0]);
     2452    PGMRZDynMapStartAutoSet(&pVM->aCpus[0]);
    20502453
    20512454    uint64_t cr3 = ASMGetCR3() & ~(uint64_t)PAGE_OFFSET_MASK;
    20522455    void    *pv  = (void *)(intptr_t)-1;
    20532456    void    *pv2 = (void *)(intptr_t)-2;
    2054     rc           = PGMDynMapHCPage(pVM, cr3, &pv);
    2055     int      rc2 = PGMDynMapHCPage(pVM, cr3, &pv2);
     2457    rc           = pgmRZDynMapHCPageCommon(pVM, cr3, &pv  RTLOG_COMMA_SRC_POS);
     2458    int      rc2 = pgmRZDynMapHCPageCommon(pVM, cr3, &pv2 RTLOG_COMMA_SRC_POS);
    20562459    ASMIntEnable();
    20572460    if (    RT_SUCCESS(rc2)
     
    20682471        LogRel(("Test #2\n"));
    20692472        ASMIntDisable();
    2070         PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
     2473        PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]);
    20712474        for (i = 0 ; i < UINT16_MAX*2 - 1 && RT_SUCCESS(rc) && pv2 == pv; i++)
    20722475        {
    20732476            pv2 = (void *)(intptr_t)-4;
    2074             rc = PGMDynMapHCPage(pVM, cr3, &pv2);
     2477            rc = pgmRZDynMapHCPageCommon(pVM, cr3, &pv2 RTLOG_COMMA_SRC_POS);
    20752478        }
    20762479        ASMIntEnable();
     
    21062509            LogRel(("Test #3\n"));
    21072510            ASMIntDisable();
    2108             PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
     2511            PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]);
    21092512            pv2 = NULL;
    21102513            for (i = 0 ; i < RT_ELEMENTS(pSet->aEntries) - 5 && RT_SUCCESS(rc) && pv2 != pv; i++)
    21112514            {
    21122515                pv2 = (void *)(intptr_t)(-5 - i);
    2113                 rc = PGMDynMapHCPage(pVM, cr3 + PAGE_SIZE * (i + 5), &pv2);
     2516                rc = pgmRZDynMapHCPageCommon(pVM, cr3 + PAGE_SIZE * (i + 5), &pv2 RTLOG_COMMA_SRC_POS);
    21142517            }
    21152518            ASMIntEnable();
     
    21342537                LogRel(("Test #4\n"));
    21352538                ASMIntDisable();
    2136                 PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
     2539                PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]);
    21372540                for (i = 0 ; i < RT_ELEMENTS(pSet->aEntries) + 2; i++)
    21382541                {
    2139                     rc = PGMDynMapHCPage(pVM, cr3 - PAGE_SIZE * (i + 5), &pv2);
     2542                    rc = pgmRZDynMapHCPageCommon(pVM, cr3 - PAGE_SIZE * (i + 5), &pv2 RTLOG_COMMA_SRC_POS);
    21402543                    if (RT_SUCCESS(rc))
    21412544                        rc = PGMR0DynMapAssertIntegrity();
     
    21492552                    LogRel(("Test #5\n"));
    21502553                    ASMIntDisable();
    2151                     PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
    2152                     PGMDynMapReleaseAutoSet(&pVM->aCpus[0]);
    2153                     PGMDynMapStartAutoSet(&pVM->aCpus[0]);
     2554                    PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]);
     2555                    PGMRZDynMapReleaseAutoSet(&pVM->aCpus[0]);
     2556                    PGMRZDynMapStartAutoSet(&pVM->aCpus[0]);
    21542557                    ASMIntEnable();
    21552558
     
    21792582        LogRel(("Test #5\n"));
    21802583        ASMIntDisable();
    2181         PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
     2584        PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]);
    21822585        RTHCPHYS  HCPhysPT = RTR0MemObjGetPagePhysAddr(pThis->pSegHead->ahMemObjPTs[0], 0);
    2183         rc  = PGMDynMapHCPage(pVM, HCPhysPT, &pv);
     2586        rc  = pgmRZDynMapHCPageCommon(pVM, HCPhysPT, &pv RTLOG_COMMA_SRC_POS);
    21842587        if (RT_SUCCESS(rc))
    21852588        {
     
    22162619    LogRel(("Cleanup.\n"));
    22172620    ASMIntDisable();
    2218     PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
    2219     PGMDynMapFlushAutoSet(&pVM->aCpus[0]);
    2220     PGMDynMapReleaseAutoSet(&pVM->aCpus[0]);
     2621    PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]);
     2622    PGMRZDynMapFlushAutoSet(&pVM->aCpus[0]);
     2623    PGMRZDynMapReleaseAutoSet(&pVM->aCpus[0]);
    22212624    ASMIntEnable();
    22222625
  • trunk/src/VBox/VMM/testcase/tstVMStructRC.cpp

    r31140 r31402  
    461461    GEN_CHECK_OFF(PGMCPU, offVCpu);
    462462    GEN_CHECK_OFF(PGMCPU, offPGM);
    463 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     463#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE) || defined(VBOX_WITH_RAW_MODE)
    464464    GEN_CHECK_OFF(PGMCPU, AutoSet);
    465465#endif
     
    568568    GEN_CHECK_OFF(PGM, HCPhysInterPaePML4);
    569569    GEN_CHECK_OFF(PGM, pbDynPageMapBaseGC);
    570     GEN_CHECK_OFF(PGM, iDynPageMapLast);
    571     GEN_CHECK_OFF(PGM, aHCPhysDynPageMapCache);
     570    GEN_CHECK_OFF(PGM, pRCDynMap);
    572571    GEN_CHECK_OFF(PGM, pvR0DynMapUsed);
    573572    GEN_CHECK_OFF(PGM, GCPhys4MBPSEMask);
     
    575574    GEN_CHECK_OFF(PGMCPU, fA20Enabled);
    576575    GEN_CHECK_OFF(PGMCPU, fSyncFlags);
    577     GEN_CHECK_OFF(PGM, aHCPhysDynPageMapCache);
    578     GEN_CHECK_OFF(PGM, aLockedDynPageMapCache);
    579576    GEN_CHECK_OFF(PGM, CritSect);
    580577    GEN_CHECK_OFF(PGM, pPoolR3);
     
    788785    GEN_CHECK_OFF(PGMPOOL, aPages[1]);
    789786    GEN_CHECK_OFF(PGMPOOL, aPages[PGMPOOL_IDX_FIRST - 1]);
     787    GEN_CHECK_SIZE(PGMRCDYNMAP);
     788    GEN_CHECK_OFF(PGMRCDYNMAP, u32Magic);
     789    GEN_CHECK_OFF(PGMRCDYNMAP, paPages);
     790    GEN_CHECK_OFF(PGMRCDYNMAP, cPages);
     791    GEN_CHECK_OFF(PGMRCDYNMAP, fLegacyMode);
     792    GEN_CHECK_OFF(PGMRCDYNMAP, cLoad);
     793    GEN_CHECK_OFF(PGMRCDYNMAP, cMaxLoad);
     794    GEN_CHECK_OFF(PGMRCDYNMAP, cGuardPages);
     795    GEN_CHECK_OFF(PGMRCDYNMAP, cUsers);
     796    GEN_CHECK_SIZE(PGMRCDYNMAPENTRY);
     797    GEN_CHECK_OFF(PGMRCDYNMAPENTRY, HCPhys);
     798    GEN_CHECK_OFF(PGMRCDYNMAPENTRY, pvPage);
     799    GEN_CHECK_OFF(PGMRCDYNMAPENTRY, cRefs);
     800    GEN_CHECK_OFF(PGMRCDYNMAPENTRY, uPte.pLegacy);
     801    GEN_CHECK_OFF(PGMRCDYNMAPENTRY, uPte.pPae);
     802    GEN_CHECK_OFF(PGMRCDYNMAPENTRY, uPte.pv);
     803    GEN_CHECK_OFF(PGMMAPSETENTRY, pvPage);
     804    GEN_CHECK_OFF(PGMMAPSETENTRY, iPage);
     805    GEN_CHECK_OFF(PGMMAPSETENTRY, cRefs);
     806    GEN_CHECK_OFF(PGMMAPSETENTRY, cInlinedRefs);
     807    GEN_CHECK_OFF(PGMMAPSETENTRY, cUnrefs);
     808    GEN_CHECK_OFF(PGMMAPSETENTRY, HCPhys);
    790809
    791810    GEN_CHECK_SIZE(REM);
  • trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp

    r31123 r31402  
    332332
    333333    /* pgm */
    334 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     334#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE)  || defined(VBOX_WITH_RAW_MODE)
    335335    CHECK_MEMBER_ALIGNMENT(PGMCPU, AutoSet, 8);
    336336#endif
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette