VirtualBox

Changeset 16321 in vbox


Ignore:
Timestamp:
Jan 28, 2009 4:36:24 PM (16 years ago)
Author:
vboxsync
Message:

More paging updates

Location:
trunk
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/pgm.h

    r16182 r16321  
    329329VMMDECL(int)        PGMMapSetPage(PVM pVM, RTGCPTR GCPtr, uint64_t cb, uint64_t fFlags);
    330330VMMDECL(int)        PGMMapModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
     331VMMDECL(int)        PGMMapActivateAll(PVM pVM);
     332VMMDECL(int)        PGMMapDeactivateAll(PVM pVM);
     333
    331334VMMDECL(int)        PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys);
    332335VMMDECL(int)        PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags);
     
    548551VMMR3DECL(bool)     PGMR3MapHasConflicts(PVM pVM, uint64_t cr3, bool fRawR0);
    549552VMMR3DECL(int)      PGMR3MapRead(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb);
    550 VMMR3DECL(int)      PGMR3MapActivate(PVM pVM);
    551 VMMR3DECL(int)      PGMR3MapDeactivate(PVM pVM);
    552553
    553554VMMR3DECL(int)      PGMR3HandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
  • trunk/src/VBox/VMM/PGMInternal.h

    r16317 r16321  
    29722972#endif
    29732973
     2974#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     2975void            pgmMapClearShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE);
     2976void            pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
     2977#endif
     2978
    29742979__END_DECLS
    29752980
  • trunk/src/VBox/VMM/PGMMap.cpp

    r16317 r16321  
    4444static int  pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
    4545static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
    46 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    47 static void pgmR3MapClearShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE);
    48 static void pgmR3MapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
    49 #endif
    5046
    5147
     
    906902
    907903#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    908     pgmR3MapClearShadowPDEs(pVM, pMap, iOldPDE);
     904    pgmMapClearShadowPDEs(pVM, pMap, iOldPDE);
    909905#endif
    910906
     
    942938}
    943939
    944 
    945 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    946 /**
    947  * Clears all PDEs involved with the mapping in the shadow page table.
    948  *
    949  * @param   pVM         The VM handle.
    950  * @param   pMap        Pointer to the mapping in question.
    951  * @param   iOldPDE     The index of the 32-bit PDE corresponding to the base of the mapping.
    952  */
    953 static void pgmR3MapClearShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE)
    954 {
    955     unsigned i = pMap->cPTs;
    956     PGMMODE  enmShadowMode = PGMGetShadowMode(pVM);
    957 
    958     if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
    959         return;
    960 
    961     iOldPDE += i;
    962     while (i-- > 0)
    963     {
    964         iOldPDE--;
    965 
    966         switch(enmShadowMode)
    967         {
    968         case PGMMODE_32_BIT:
    969         {
    970             PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
    971             AssertFatal(pShw32BitPd);
    972 
    973             pShw32BitPd->a[iOldPDE].u   = 0;
    974             break;
    975         }
    976 
    977         case PGMMODE_PAE:
    978         case PGMMODE_PAE_NX:
    979         {
    980             PX86PDPT  pPdpt = NULL;
    981             PX86PDPAE pShwPaePd = NULL;
    982 
    983             const unsigned iPD = iOldPDE / 256;         /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
    984             unsigned iPDE = iOldPDE * 2 % 512;
    985             pPdpt     = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
    986             pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPD << X86_PDPT_SHIFT));
    987             AssertFatal(pShwPaePd);
    988 
    989             pShwPaePd->a[iPDE].u = 0;
    990 
    991             iPDE++;
    992             AssertFatal(iPDE < 512);
    993 
    994             pShwPaePd->a[iPDE].u = 0;
    995             /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */
    996             pPdpt->a[iPD].u &= ~PGM_PLXFLAGS_MAPPING;
    997             break;
    998         }
    999         }
    1000     }
    1001 }
    1002 #endif
    1003 
    1004940/**
    1005941 * Sets all PDEs involved with the mapping in the shadow and intermediate page tables.
     
    1016952
    1017953#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1018     pgmR3MapSetShadowPDEs(pVM, pMap, iNewPDE);
     954    pgmMapSetShadowPDEs(pVM, pMap, iNewPDE);
    1019955#endif
    1020956
     
    10911027    }
    10921028}
    1093 
    1094 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1095 /**
    1096  * Sets all PDEs involved with the mapping in the shadow page table.
    1097  *
    1098  * @param   pVM         The VM handle.
    1099  * @param   pMap        Pointer to the mapping in question.
    1100  * @param   iNewPDE     The index of the 32-bit PDE corresponding to the base of the mapping.
    1101  */
    1102 static void pgmR3MapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
    1103 {
    1104     PPGM    pPGM = &pVM->pgm.s;
    1105     PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
    1106 
    1107     if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
    1108         return;
    1109 
    1110     Assert(enmShadowMode <= PGMMODE_PAE_NX);
    1111 
    1112     /*
    1113      * Init the page tables and insert them into the page directories.
    1114      */
    1115     unsigned i = pMap->cPTs;
    1116     iNewPDE += i;
    1117     while (i-- > 0)
    1118     {
    1119         iNewPDE--;
    1120 
    1121         switch(enmShadowMode)
    1122         {
    1123         case PGMMODE_32_BIT:
    1124         {
    1125             PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
    1126             AssertFatal(pShw32BitPd);
    1127 
    1128             if (pShw32BitPd->a[iNewPDE].n.u1Present)
    1129             {
    1130                 Assert(!(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING));
    1131                 pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVM->pgm.s.pShwPageCR3R3->idx, iNewPDE);
    1132             }
    1133 
    1134             X86PDE Pde;
    1135             /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
    1136             Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
    1137             pShw32BitPd->a[iNewPDE]   = Pde;
    1138             break;
    1139         }
    1140 
    1141         case PGMMODE_PAE:
    1142         case PGMMODE_PAE_NX:
    1143         {
    1144             PX86PDPT  pShwPdpt;
    1145             PX86PDPAE pShwPaePd;
    1146             const unsigned iPdPt = iNewPDE / 256;
    1147             unsigned iPDE = iNewPDE * 2 % 512;
    1148 
    1149             pShwPdpt  = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
    1150             Assert(pShwPdpt);
    1151             pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
    1152             AssertFatal(pShwPaePd);
    1153 
    1154             PPGMPOOLPAGE pPoolPagePde = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
    1155             AssertFatal(pPoolPagePde);
    1156 
    1157             if (pShwPaePd->a[iPDE].n.u1Present)
    1158             {
    1159                 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
    1160                 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePde->idx, iNewPDE);
    1161             }
    1162 
    1163             X86PDEPAE PdePae0;
    1164             PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
    1165             pShwPaePd->a[iPDE] = PdePae0;
    1166 
    1167             /* 2nd 2 MB PDE of the 4 MB region */
    1168             iPDE++;
    1169             AssertFatal(iPDE < 512);
    1170 
    1171             if (pShwPaePd->a[iPDE].n.u1Present)
    1172             {
    1173                 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
    1174                 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePde->idx, iNewPDE);
    1175             }
    1176 
    1177             X86PDEPAE PdePae1;
    1178             PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
    1179             pShwPaePd->a[iPDE] = PdePae1;
    1180 
    1181             /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */
    1182             pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
    1183         }
    1184         }
    1185     }
    1186 }
    1187 #endif
    11881029
    11891030/**
     
    15171358}
    15181359
    1519 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1520 /**
    1521  * Apply the hypervisor mappings to the active CR3.
    1522  *
    1523  * @returns VBox status.
    1524  * @param   pVM         The virtual machine.
    1525  */
    1526 VMMR3DECL(int) PGMR3MapActivate(PVM pVM)
    1527 {
    1528     /*
    1529      * Can skip this if mappings are safely fixed.
    1530      */
    1531     if (pVM->pgm.s.fMappingsFixed)
    1532         return VINF_SUCCESS;
    1533 
    1534     /*
    1535      * Iterate mappings.
    1536      */
    1537     for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
    1538     {
    1539         unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
    1540 
    1541         pgmR3MapSetShadowPDEs(pVM, pCur, iPDE);
    1542     }
    1543 
    1544     return VINF_SUCCESS;
    1545 }
    1546 
    1547 /**
    1548  * Remove the hypervisor mappings from the active CR3
    1549  *
    1550  * @returns VBox status.
    1551  * @param   pVM         The virtual machine.
    1552  */
    1553 VMMR3DECL(int) PGMR3MapDeactivate(PVM pVM)
    1554 {
    1555     /*
    1556      * Can skip this if mappings are safely fixed.
    1557      */
    1558     if (pVM->pgm.s.fMappingsFixed)
    1559         return VINF_SUCCESS;
    1560 
    1561     /*
    1562      * Iterate mappings.
    1563      */
    1564     for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
    1565     {
    1566         unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
    1567 
    1568         pgmR3MapClearShadowPDEs(pVM, pCur, iPDE);
    1569     }
    1570     return VINF_SUCCESS;
    1571 }
    1572 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
    1573 
    15741360/**
    15751361 * Read memory from the guest mappings.
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r16317 r16321  
    211211# include "PGMAllShw.h"
    212212
    213 /* Guest - protected mode */
     213/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
    214214# define PGM_GST_TYPE               PGM_TYPE_PROT
    215215# define PGM_GST_NAME(name)         PGM_GST_NAME_PROT(name)
     
    14181418VMMDECL(RTHCPHYS) PGMGetHyperAmd64CR3(PVM pVM)
    14191419{
    1420 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    14211420    return pVM->pgm.s.HCPhysShwCR3;
    1422 #else
    1423     return pVM->pgm.s.HCPhysShwCR3;
    1424 #endif
    14251421}
    14261422
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r16317 r16321  
    44184418PGM_BTH_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3)
    44194419{
     4420    /* Update guest paging info. */
    44204421#if PGM_GST_TYPE == PGM_TYPE_32BIT \
    44214422 || PGM_GST_TYPE == PGM_TYPE_PAE \
     
    45534554    int rc = VINF_SUCCESS;
    45544555#endif
     4556
     4557#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     4558    /* Update shadow paging info. */
     4559# if    PGM_SHW_TYPE == PGM_TYPE_32BITS \
     4560     || PGM_SHW_TYPE == PGM_TYPE_PAE    \
     4561     || PGM_SHW_TYPE == PGM_TYPE_AMD64
     4562
     4563    if (!HWACCMIsNestedPagingActive(pVM))
     4564    {
     4565        /* Apply all hypervisor mappings to the new CR3. */
     4566        PGMMapActivateAll(pVM);
     4567
     4568        /*
     4569         * Update the shadow root page as well since that's not fixed.
     4570         */
     4571        PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
     4572        if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))
     4573        {
     4574            /* It might have been freed already by a pool flush (see e.g. PGMR3MappingsUnfix). */
     4575            /** @todo Coordinate this better with the pool. */
     4576            if (pVM->pgm.s.CTX_SUFF(pShwPageCR3)->enmKind != PGMPOOLKIND_FREE)
     4577                pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3), SHW_POOL_ROOT_IDX, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->GCPhys >> PAGE_SHIFT);
     4578            pVM->pgm.s.pShwPageCR3R3 = 0;
     4579            pVM->pgm.s.pShwPageCR3R0 = 0;
     4580            pVM->pgm.s.pShwRootR3    = 0;
     4581#  ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
     4582            pVM->pgm.s.pShwRootR0    = 0;
     4583#  endif
     4584            pVM->pgm.s.HCPhysShwCR3  = 0;
     4585        }
     4586
     4587        Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32)));
     4588        rc = pgmPoolAlloc(pVM, GCPhysCR3, BTH_PGMPOOLKIND_ROOT, SHW_POOL_ROOT_IDX, GCPhysCR3 >> PAGE_SHIFT, &pVM->pgm.s.CTX_SUFF(pShwPageCR3));
     4589        if (rc == VERR_PGM_POOL_FLUSHED)
     4590        {
     4591            Log(("MapCR3: PGM pool flushed -> signal sync cr3\n"));
     4592            Assert(VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
     4593            return VINF_PGM_SYNC_CR3;
     4594        }
     4595        AssertRCReturn(rc, rc);
     4596#  ifdef IN_RING0
     4597        pVM->pgm.s.pShwPageCR3R3 = MMHyperCCToR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
     4598#  else
     4599        pVM->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
     4600#  endif
     4601        pVM->pgm.s.pShwRootR3    = (R3PTRTYPE(void *))pVM->pgm.s.CTX_SUFF(pShwPageCR3)->pvPageR3;
     4602        Assert(pVM->pgm.s.pShwRootR3);
     4603#  ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
     4604        pVM->pgm.s.pShwRootR0    = (R0PTRTYPE(void *))PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.CTX_SUFF(pShwPageCR3));
     4605#  endif
     4606        pVM->pgm.s.HCPhysShwCR3  = pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
     4607        rc = VINF_SUCCESS; /* clear it - pgmPoolAlloc returns hints. */
     4608    }
     4609# endif
     4610#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
     4611
    45554612    return rc;
    45564613}
     
    45684625    int rc = VINF_SUCCESS;
    45694626
     4627    /* Update guest paging info. */
    45704628#if PGM_GST_TYPE == PGM_TYPE_32BIT
    45714629    pVM->pgm.s.pGst32BitPdR3 = 0;
     
    45964654    pVM->pgm.s.pGstAmd64Pml4R0 = 0;
    45974655# endif
     4656# ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    45984657    if (!HWACCMIsNestedPagingActive(pVM))
    45994658    {
    46004659        pVM->pgm.s.pShwRootR3 = 0;
    4601 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
     4660#  ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    46024661        pVM->pgm.s.pShwRootR0 = 0;
    4603 # endif
     4662#  endif
    46044663        pVM->pgm.s.HCPhysShwCR3 = 0;
    4605 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    46064664        if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))
    46074665        {
     
    46114669            pVM->pgm.s.pShwPageCR3R0 = 0;
    46124670        }
     4671    }
    46134672# endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
    4614     }
    46154673
    46164674#else /* prot/real mode stub */
    46174675    /* nothing to do */
    46184676#endif
     4677
     4678#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     4679    /* Update shadow paging info. */
     4680# if    PGM_SHW_TYPE == PGM_TYPE_32BITS \
     4681     || PGM_SHW_TYPE == PGM_TYPE_PAE    \
     4682     || PGM_SHW_TYPE == PGM_TYPE_AMD64
     4683
     4684    if (!HWACCMIsNestedPagingActive(pVM))
     4685    {
     4686        /* @todo: dangerous as it's the current CR3! */
     4687        /* Remove the hypervisor mappings from the shadow page table. */
     4688        PGMMapDeactivateAll(pVM);
     4689
     4690        pVM->pgm.s.pShwRootR3 = 0;
     4691#  ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
     4692        pVM->pgm.s.pShwRootR0 = 0;
     4693#  endif
     4694        pVM->pgm.s.HCPhysShwCR3 = 0;
     4695        if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))
     4696        {
     4697            PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
     4698            pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3), SHW_POOL_ROOT_IDX, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->GCPhys >> PAGE_SHIFT);
     4699            pVM->pgm.s.pShwPageCR3R3 = 0;
     4700            pVM->pgm.s.pShwPageCR3R0 = 0;
     4701        }
     4702    }
     4703# endif
     4704#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
     4705
    46194706    return rc;
    46204707}
  • trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp

    r13232 r16321  
    210210
    211211
     212#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     213
     214/**
     215 * Sets all PDEs involved with the mapping in the shadow page table.
     216 *
     217 * @param   pVM         The VM handle.
     218 * @param   pMap        Pointer to the mapping in question.
     219 * @param   iNewPDE     The index of the 32-bit PDE corresponding to the base of the mapping.
     220 */
     221void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
     222{
     223    if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
     224        return;
     225
     226    PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
     227    Assert(enmShadowMode <= PGMMODE_PAE_NX);
     228
     229    /*
     230     * Init the page tables and insert them into the page directories.
     231     */
     232    unsigned i = pMap->cPTs;
     233    iNewPDE += i;
     234    while (i-- > 0)
     235    {
     236        iNewPDE--;
     237
     238        switch(enmShadowMode)
     239        {
     240        case PGMMODE_32_BIT:
     241        {
     242            PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
     243            AssertFatal(pShw32BitPd);
     244
     245            if (pShw32BitPd->a[iNewPDE].n.u1Present)
     246            {
     247                Assert(!(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING));
     248                pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iNewPDE);
     249            }
     250
     251            X86PDE Pde;
     252            /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
     253            Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
     254            pShw32BitPd->a[iNewPDE]   = Pde;
     255            break;
     256        }
     257
     258        case PGMMODE_PAE:
     259        case PGMMODE_PAE_NX:
     260        {
     261            PX86PDPT  pShwPdpt;
     262            PX86PDPAE pShwPaePd;
     263            const unsigned iPdPt = iNewPDE / 256;
     264            unsigned iPDE = iNewPDE * 2 % 512;
     265
     266            pShwPdpt  = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
     267            Assert(pShwPdpt);
     268            pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
     269            AssertFatal(pShwPaePd);
     270
     271            PPGMPOOLPAGE pPoolPagePde = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
     272            AssertFatal(pPoolPagePde);
     273
     274            if (pShwPaePd->a[iPDE].n.u1Present)
     275            {
     276                Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
     277                pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePde->idx, iNewPDE);
     278            }
     279
     280            X86PDEPAE PdePae0;
     281            PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
     282            pShwPaePd->a[iPDE] = PdePae0;
     283
     284            /* 2nd 2 MB PDE of the 4 MB region */
     285            iPDE++;
     286            AssertFatal(iPDE < 512);
     287
     288            if (pShwPaePd->a[iPDE].n.u1Present)
     289            {
     290                Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
     291                pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePde->idx, iNewPDE);
     292            }
     293
     294            X86PDEPAE PdePae1;
     295            PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
     296            pShwPaePd->a[iPDE] = PdePae1;
     297
     298            /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */
     299            pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
     300        }
     301        }
     302    }
     303}
     304
     305/**
     306 * Clears all PDEs involved with the mapping in the shadow page table.
     307 *
     308 * @param   pVM         The VM handle.
     309 * @param   pMap        Pointer to the mapping in question.
     310 * @param   iOldPDE     The index of the 32-bit PDE corresponding to the base of the mapping.
     311 */
     312void pgmMapClearShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE)
     313{
     314    unsigned i = pMap->cPTs;
     315    PGMMODE  enmShadowMode = PGMGetShadowMode(pVM);
     316
     317    if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
     318        return;
     319
     320    iOldPDE += i;
     321    while (i-- > 0)
     322    {
     323        iOldPDE--;
     324
     325        switch(enmShadowMode)
     326        {
     327        case PGMMODE_32_BIT:
     328        {
     329            PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
     330            AssertFatal(pShw32BitPd);
     331
     332            pShw32BitPd->a[iOldPDE].u   = 0;
     333            break;
     334        }
     335
     336        case PGMMODE_PAE:
     337        case PGMMODE_PAE_NX:
     338        {
     339            PX86PDPT  pPdpt = NULL;
     340            PX86PDPAE pShwPaePd = NULL;
     341
     342            const unsigned iPD = iOldPDE / 256;         /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
     343            unsigned iPDE = iOldPDE * 2 % 512;
     344            pPdpt     = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
     345            pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPD << X86_PDPT_SHIFT));
     346            AssertFatal(pShwPaePd);
     347
     348            pShwPaePd->a[iPDE].u = 0;
     349
     350            iPDE++;
     351            AssertFatal(iPDE < 512);
     352
     353            pShwPaePd->a[iPDE].u = 0;
     354            /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */
     355            pPdpt->a[iPD].u &= ~PGM_PLXFLAGS_MAPPING;
     356            break;
     357        }
     358        }
     359    }
     360}
     361
     362/**
     363 * Apply the hypervisor mappings to the active CR3.
     364 *
     365 * @returns VBox status.
     366 * @param   pVM         The virtual machine.
     367 */
     368VMMDECL(int) PGMMapActivateAll(PVM pVM)
     369{
     370    /*
     371     * Can skip this if mappings are safely fixed.
     372     */
     373    if (pVM->pgm.s.fMappingsFixed)
     374        return VINF_SUCCESS;
     375
     376    /*
     377     * Iterate mappings.
     378     */
     379    for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
     380    {
     381        unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
     382
     383        pgmMapSetShadowPDEs(pVM, pCur, iPDE);
     384    }
     385
     386    return VINF_SUCCESS;
     387}
     388
     389/**
     390 * Remove the hypervisor mappings from the active CR3
     391 *
     392 * @returns VBox status.
     393 * @param   pVM         The virtual machine.
     394 */
     395VMMDECL(int) PGMMapDeactivateAll(PVM pVM)
     396{
     397    /*
     398     * Can skip this if mappings are safely fixed.
     399     */
     400    if (pVM->pgm.s.fMappingsFixed)
     401        return VINF_SUCCESS;
     402
     403    /*
     404     * Iterate mappings.
     405     */
     406    for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
     407    {
     408        unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
     409
     410        pgmMapClearShadowPDEs(pVM, pCur, iPDE);
     411    }
     412    return VINF_SUCCESS;
     413}
     414#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
  • trunk/src/VBox/VMM/VMMAll/PGMAllShw.h

    r16317 r16321  
    110110#  define SHW_PDPE_PG_MASK      X86_PDPE_PG_MASK
    111111#  define SHW_TOTAL_PD_ENTRIES  (X86_PG_PAE_ENTRIES*X86_PG_PAE_PDPE_ENTRIES)
    112 #  define SHW_POOL_ROOT_IDX     PGMPOOL_IDX_PAE_PD
     112#  define SHW_POOL_ROOT_IDX     PGMPOOL_IDX_PDPT
    113113
    114114# endif
  • trunk/src/VBox/VMM/VMMSwitcher.cpp

    r15414 r16321  
    465465            }
    466466
     467#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     468            /* @todo No need for three GetHyper calls; one and the same base is used */
     469#endif
    467470            /*
    468471             * Store the 32-Bit CR3 (32-bit) for the hypervisor (shadow) memory context.
     
    470473            case FIX_HYPER_32BIT_CR3:
    471474            {
    472 
    473475                *uSrc.pu32 = PGMGetHyper32BitCR3(pVM);
    474476                break;
     
    480482            case FIX_HYPER_PAE_CR3:
    481483            {
    482 
    483484                *uSrc.pu32 = PGMGetHyperPaeCR3(pVM);
    484485                break;
     
    494495                break;
    495496            }
    496 
    497497            /*
    498498             * Store Hypervisor CS (16-bit).
     
    746746                    GCPtrGDT,
    747747                    PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM),
     748#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     749                    /* @todo No need for three GetHyper calls; one and the same base is used */
     750#endif
    748751                    PGMGetHyper32BitCR3(pVM), PGMGetHyperPaeCR3(pVM), PGMGetHyperAmd64CR3(pVM),
    749752                    SelCS, SelDS, SelCS64, SelTSS);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette