VirtualBox

Changeset 17212 in vbox


Ignore:
Timestamp:
Feb 27, 2009 4:12:26 PM (16 years ago)
Author:
vboxsync
Message:

Split up defines and code

File:
1 copied

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAllGstDefs.h

    r17205 r17212  
    122122
    123123
    124 /*******************************************************************************
    125 *   Internal Functions                                                         *
    126 *******************************************************************************/
    127 __BEGIN_DECLS
    128 PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
    129 PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
    130 PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPDE);
    131 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    132 PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
    133 PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM);
    134 #endif
    135 PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4);
    136 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    137 # ifndef IN_RING3
    138 PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
    139 #  if PGM_GST_TYPE == PGM_TYPE_PAE \
    140   || PGM_GST_TYPE == PGM_TYPE_AMD64
    141 PGM_GST_DECL(int, PAEWriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
    142 #  endif
    143 # endif
    144 #endif
    145 __END_DECLS
    146 
    147 
    148 
    149 /**
    150  * Gets effective Guest OS page information.
    151  *
    152  * When GCPtr is in a big page, the function will return as if it was a normal
    153  * 4KB page. If the need for distinguishing between big and normal page becomes
    154  * necessary at a later point, a PGMGstGetPage Ex() will be created for that
    155  * purpose.
    156  *
    157  * @returns VBox status.
    158  * @param   pVM         VM Handle.
    159  * @param   GCPtr       Guest Context virtual address of the page. Page aligned!
    160  * @param   pfFlags     Where to store the flags. These are X86_PTE_*, even for big pages.
    161  * @param   pGCPhys     Where to store the GC physical address of the page.
    162  *                      This is page aligned. The fact that the
    163  */
    164 PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
    165 {
    166 #if PGM_GST_TYPE == PGM_TYPE_REAL \
    167  || PGM_GST_TYPE == PGM_TYPE_PROT
    168     /*
    169      * Fake it.
    170      */
    171     if (pfFlags)
    172         *pfFlags = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
    173     if (pGCPhys)
    174         *pGCPhys = GCPtr & PAGE_BASE_GC_MASK;
    175     return VINF_SUCCESS;
    176 
    177 #elif PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
    178 
    179     /*
    180      * Get the PDE.
    181      */
    182 # if PGM_GST_TYPE == PGM_TYPE_32BIT
    183     X86PDE      Pde = pgmGstGet32bitPDE(&pVM->pgm.s, GCPtr);
    184 
    185 #elif PGM_GST_TYPE == PGM_TYPE_PAE
    186     /* pgmGstGetPaePDE will return 0 if the PDPTE is marked as not present.
    187      * All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx). */
    188     X86PDEPAE   Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
    189     bool        fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
    190 
    191 #elif PGM_GST_TYPE == PGM_TYPE_AMD64
    192     PX86PML4E   pPml4e;
    193     X86PDPE     Pdpe;
    194     X86PDEPAE   Pde = pgmGstGetLongModePDEEx(&pVM->pgm.s, GCPtr, &pPml4e, &Pdpe);
    195     bool        fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
    196 
    197     Assert(pPml4e);
    198     if (!(pPml4e->n.u1Present & Pdpe.n.u1Present))
    199         return VERR_PAGE_TABLE_NOT_PRESENT;
    200 
    201     /* Merge accessed, write, user and no-execute bits into the PDE. */
    202     Pde.n.u1Accessed  &= pPml4e->n.u1Accessed & Pdpe.lm.u1Accessed;
    203     Pde.n.u1Write     &= pPml4e->n.u1Write & Pdpe.lm.u1Write;
    204     Pde.n.u1User      &= pPml4e->n.u1User & Pdpe.lm.u1User;
    205     Pde.n.u1NoExecute &= pPml4e->n.u1NoExecute & Pdpe.lm.u1NoExecute;
    206 # endif
    207 
    208     /*
    209      * Lookup the page.
    210      */
    211     if (!Pde.n.u1Present)
    212         return VERR_PAGE_TABLE_NOT_PRESENT;
    213 
    214     if (    !Pde.b.u1Size
    215 # if PGM_GST_TYPE != PGM_TYPE_AMD64
    216         ||  !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE)
    217 # endif
    218         )
    219     {
    220         PGSTPT pPT;
    221         int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
    222         if (RT_FAILURE(rc))
    223             return rc;
    224 
    225         /*
    226          * Get PT entry and check presence.
    227          */
    228         const GSTPTE Pte = pPT->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
    229         if (!Pte.n.u1Present)
    230             return VERR_PAGE_NOT_PRESENT;
    231 
    232         /*
    233          * Store the result.
    234          * RW and US flags depend on all levels (bitwise AND) - except for legacy PAE
    235          * where the PDPE is simplified.
    236          */
    237         if (pfFlags)
    238         {
    239             *pfFlags = (Pte.u & ~GST_PTE_PG_MASK)
    240                      & ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US));
    241 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
    242             /* The NX bit is determined by a bitwise OR between the PT and PD */
    243             if (fNoExecuteBitValid)
    244                 *pfFlags |= (Pte.u & Pde.u & X86_PTE_PAE_NX);
    245 # endif
    246         }
    247         if (pGCPhys)
    248             *pGCPhys = Pte.u & GST_PTE_PG_MASK;
    249     }
    250     else
    251     {
    252         /*
    253          * Map big to 4k PTE and store the result
    254          */
    255         if (pfFlags)
    256         {
    257             *pfFlags = (Pde.u & ~(GST_PTE_PG_MASK | X86_PTE_PAT))
    258                      | ((Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT);
    259 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
    260             /* The NX bit is determined by a bitwise OR between the PT and PD */
    261             if (fNoExecuteBitValid)
    262                 *pfFlags |= (Pde.u & X86_PTE_PAE_NX);
    263 # endif
    264         }
    265         if (pGCPhys)
    266             *pGCPhys = GST_GET_PDE_BIG_PG_GCPHYS(Pde) | (GCPtr & (~GST_PDE_BIG_PG_MASK ^ ~GST_PTE_PG_MASK));
    267     }
    268     return VINF_SUCCESS;
    269 #else
    270 # error "shouldn't be here!"
    271     /* something else... */
    272     return VERR_NOT_SUPPORTED;
    273 #endif
    274 }
    275 
    276 
    277 /**
    278  * Modify page flags for a range of pages in the guest's tables
    279  *
    280  * The existing flags are ANDed with the fMask and ORed with the fFlags.
    281  *
    282  * @returns VBox status code.
    283  * @param   pVM         VM handle.
    284  * @param   GCPtr       Virtual address of the first page in the range. Page aligned!
    285  * @param   cb          Size (in bytes) of the page range to apply the modification to. Page aligned!
    286  * @param   fFlags      The OR  mask - page flags X86_PTE_*, excluding the page mask of course.
    287  * @param   fMask       The AND mask - page flags X86_PTE_*.
    288  */
    289 PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
    290 {
    291 #if PGM_GST_TYPE == PGM_TYPE_32BIT \
    292  || PGM_GST_TYPE == PGM_TYPE_PAE \
    293  || PGM_GST_TYPE == PGM_TYPE_AMD64
    294 
    295     for (;;)
    296     {
    297         /*
    298          * Get the PD entry.
    299          */
    300 # if PGM_GST_TYPE == PGM_TYPE_32BIT
    301         PX86PDE pPde = pgmGstGet32bitPDEPtr(&pVM->pgm.s, GCPtr);
    302 
    303 # elif PGM_GST_TYPE == PGM_TYPE_PAE
    304         /* pgmGstGetPaePDEPtr will return 0 if the PDPTE is marked as not present
    305          * All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx)
    306          */
    307         PX86PDEPAE pPde = pgmGstGetPaePDEPtr(&pVM->pgm.s, GCPtr);
    308         Assert(pPde);
    309         if (!pPde)
    310             return VERR_PAGE_TABLE_NOT_PRESENT;
    311 # elif PGM_GST_TYPE == PGM_TYPE_AMD64
    312         /** @todo Setting the r/w, u/s & nx bits might have no effect depending on the pdpte & pml4 values */
    313         PX86PDEPAE pPde = pgmGstGetLongModePDEPtr(&pVM->pgm.s, GCPtr);
    314         Assert(pPde);
    315         if (!pPde)
    316             return VERR_PAGE_TABLE_NOT_PRESENT;
    317 # endif
    318         GSTPDE Pde = *pPde;
    319         Assert(Pde.n.u1Present);
    320         if (!Pde.n.u1Present)
    321             return VERR_PAGE_TABLE_NOT_PRESENT;
    322 
    323         if (    !Pde.b.u1Size
    324 # if PGM_GST_TYPE != PGM_TYPE_AMD64
    325             ||  !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE)
    326 # endif
    327             )
    328         {
    329             /*
    330              * 4KB Page table
    331              *
    332              * Walk page tables and pages till we're done.
    333              */
    334             PGSTPT pPT;
    335             int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
    336             if (RT_FAILURE(rc))
    337                 return rc;
    338 
    339             unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
    340             while (iPTE < RT_ELEMENTS(pPT->a))
    341             {
    342                 GSTPTE Pte = pPT->a[iPTE];
    343                 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK))
    344                       | (fFlags & ~GST_PTE_PG_MASK);
    345                 pPT->a[iPTE] = Pte;
    346 
    347                 /* next page */
    348                 cb -= PAGE_SIZE;
    349                 if (!cb)
    350                     return VINF_SUCCESS;
    351                 GCPtr += PAGE_SIZE;
    352                 iPTE++;
    353             }
    354         }
    355         else
    356         {
    357             /*
    358              * 4MB Page table
    359              */
    360 # if PGM_GST_TYPE == PGM_TYPE_32BIT
    361             Pde.u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))
    362 # else
    363             Pde.u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))
    364 # endif
    365                   | (fFlags & ~GST_PTE_PG_MASK)
    366                   | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
    367             *pPde = Pde;
    368 
    369             /* advance */
    370             const unsigned cbDone = GST_BIG_PAGE_SIZE - (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
    371             if (cbDone >= cb)
    372                 return VINF_SUCCESS;
    373             cb    -= cbDone;
    374             GCPtr += cbDone;
    375         }
    376     }
    377 
    378 #else
    379     /* real / protected mode: ignore. */
    380     return VINF_SUCCESS;
    381 #endif
    382 }
    383 
    384 
    385 /**
    386  * Retrieve guest PDE information
    387  *
    388  * @returns VBox status code.
    389  * @param   pVM         The virtual machine.
    390  * @param   GCPtr       Guest context pointer
    391  * @param   pPDE        Pointer to guest PDE structure
    392  */
    393 PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPDE)
    394 {
    395 #if PGM_GST_TYPE == PGM_TYPE_32BIT \
    396  || PGM_GST_TYPE == PGM_TYPE_PAE   \
    397  || PGM_GST_TYPE == PGM_TYPE_AMD64
    398 
    399 # if PGM_GST_TYPE == PGM_TYPE_32BIT
    400     X86PDE    Pde = pgmGstGet32bitPDE(&pVM->pgm.s, GCPtr);
    401 # elif PGM_GST_TYPE == PGM_TYPE_PAE
    402     X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
    403 # elif PGM_GST_TYPE == PGM_TYPE_AMD64
    404     X86PDEPAE Pde = pgmGstGetLongModePDE(&pVM->pgm.s, GCPtr);
    405 # endif
    406 
    407     pPDE->u = (X86PGPAEUINT)Pde.u;
    408     return VINF_SUCCESS;
    409 #else
    410     AssertFailed();
    411     return VERR_NOT_IMPLEMENTED;
    412 #endif
    413 }
    414 
    415 
    416 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    417 
    418 #undef LOG_GROUP
    419 #define LOG_GROUP LOG_GROUP_PGM_POOL
    420 
    421 /**
    422  * Registers physical page monitors for the necessary paging
    423  * structures to detect conflicts with our guest mappings.
    424  *
    425  * This is always called after mapping CR3.
    426  * This is never called with fixed mappings.
    427  *
    428  * @returns VBox status, no specials.
    429  * @param   pVM             VM handle.
    430  * @param   GCPhysCR3       The physical address in the CR3 register.
    431  */
    432 PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3)
    433 {
    434     Assert(!pVM->pgm.s.fMappingsFixed);
    435     int rc = VINF_SUCCESS;
    436 
    437     /*
    438      * Register/Modify write phys handler for guest's CR3 if it changed.
    439      */
    440 #if PGM_GST_TYPE == PGM_TYPE_32BIT
    441 
    442     if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3)
    443     {
    444 # ifndef PGMPOOL_WITH_MIXED_PT_CR3
    445         const unsigned cbCR3Stuff = PGM_GST_TYPE == PGM_TYPE_PAE ? 32 : PAGE_SIZE;
    446         if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
    447             rc = PGMHandlerPhysicalModify(pVM, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1);
    448         else
    449             rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1,
    450                                               pVM->pgm.s.pfnR3GstWriteHandlerCR3, 0,
    451                                               pVM->pgm.s.pfnR0GstWriteHandlerCR3, 0,
    452                                               pVM->pgm.s.pfnRCGstWriteHandlerCR3, 0,
    453                                               pVM->pgm.s.pszR3GstWriteHandlerCR3);
    454 # else  /* PGMPOOL_WITH_MIXED_PT_CR3 */
    455         rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool),
    456                                          pVM->pgm.s.enmShadowMode == PGMMODE_PAE
    457                                       || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
    458                                       ? PGMPOOL_IDX_PAE_PD
    459                                       : PGMPOOL_IDX_PD,
    460                                       GCPhysCR3);
    461 # endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
    462         if (RT_FAILURE(rc))
    463         {
    464             AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
    465                              rc, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3));
    466             return rc;
    467         }
    468         pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3;
    469     }
    470 
    471 #elif PGM_GST_TYPE == PGM_TYPE_PAE
    472     /* Monitor the PDPT page */
    473     /*
    474      * Register/Modify write phys handler for guest's CR3 if it changed.
    475      */
    476 # ifndef PGMPOOL_WITH_MIXED_PT_CR3
    477     AssertFailed();
    478 # endif
    479     if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3)
    480     {
    481         rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PDPT, GCPhysCR3);
    482         if (RT_FAILURE(rc))
    483         {
    484             AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
    485                              rc, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3));
    486             return rc;
    487         }
    488         pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3;
    489     }
    490 
    491     /*
    492      * Do the 4 PDs.
    493      */
    494     PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
    495     for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
    496     {
    497         if (pGuestPDPT->a[i].n.u1Present)
    498         {
    499             RTGCPHYS GCPhys = pGuestPDPT->a[i].u & X86_PDPE_PG_MASK;
    500             if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != GCPhys)
    501             {
    502                 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX);
    503 
    504                 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i, GCPhys);
    505             }
    506 
    507             if (RT_FAILURE(rc))
    508             {
    509                 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
    510                                  rc, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i], GCPhys));
    511                 return rc;
    512             }
    513             pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = GCPhys;
    514         }
    515         else if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)
    516         {
    517             rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);
    518             AssertRC(rc);
    519             pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
    520         }
    521     }
    522 
    523 #else
    524     /* prot/real/amd64 mode stub */
    525 
    526 #endif
    527     return rc;
    528 }
    529 
    530 /**
    531  * Deregisters any physical page monitors installed by MonitorCR3.
    532  *
    533  * @returns VBox status code, no specials.
    534  * @param   pVM         The VM handle.
    535  */
    536 PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM)
    537 {
    538     int rc = VINF_SUCCESS;
    539 
    540     /*
    541      * Deregister the access handlers.
    542      *
    543      * PGMSyncCR3 will reinstall it if required and PGMSyncCR3 will be executed
    544      * before we enter GC again.
    545      */
    546 #if PGM_GST_TYPE == PGM_TYPE_32BIT
    547     if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
    548     {
    549 # ifndef PGMPOOL_WITH_MIXED_PT_CR3
    550         rc = PGMHandlerPhysicalDeregister(pVM, pVM->pgm.s.GCPhysGstCR3Monitored);
    551         AssertRCReturn(rc, rc);
    552 # else /* PGMPOOL_WITH_MIXED_PT_CR3 */
    553         rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool),
    554                                            pVM->pgm.s.enmShadowMode == PGMMODE_PAE
    555                                         || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
    556                                         ? PGMPOOL_IDX_PAE_PD
    557                                         : PGMPOOL_IDX_PD);
    558         AssertRCReturn(rc, rc);
    559 # endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
    560         pVM->pgm.s.GCPhysGstCR3Monitored = NIL_RTGCPHYS;
    561     }
    562 
    563 #elif PGM_GST_TYPE == PGM_TYPE_PAE
    564     /* The PDPT page */
    565 # ifndef PGMPOOL_WITH_MIXED_PT_CR3
    566     AssertFailed();
    567 # endif
    568 
    569     if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
    570     {
    571         rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PDPT);
    572         AssertRC(rc);
    573     }
    574 
    575     /* The 4 PDs. */
    576     for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
    577     {
    578         if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)
    579         {
    580             Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX);
    581             int rc2 = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);
    582             AssertRC(rc2);
    583             if (RT_FAILURE(rc2))
    584                 rc = rc2;
    585             pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
    586         }
    587     }
    588 #else
    589     /* prot/real/amd64 mode stub */
    590 #endif
    591     return rc;
    592 
    593 }
    594 
    595 #undef LOG_GROUP
    596 #define LOG_GROUP LOG_GROUP_PGM
    597 
    598 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
    599 
    600 
    601 #if PGM_GST_TYPE == PGM_TYPE_32BIT \
    602  || PGM_GST_TYPE == PGM_TYPE_PAE \
    603  || PGM_GST_TYPE == PGM_TYPE_AMD64
    604 /**
    605  * Updates one virtual handler range.
    606  *
    607  * @returns 0
    608  * @param   pNode   Pointer to a PGMVIRTHANDLER.
    609  * @param   pvUser  Pointer to a PGMVHUARGS structure (see PGM.cpp).
    610  */
    611 static DECLCALLBACK(int) PGM_GST_NAME(VirtHandlerUpdateOne)(PAVLROGCPTRNODECORE pNode, void *pvUser)
    612 {
    613     PPGMVIRTHANDLER pCur  = (PPGMVIRTHANDLER)pNode;
    614     PPGMHVUSTATE    pState = (PPGMHVUSTATE)pvUser;
    615     Assert(pCur->enmType != PGMVIRTHANDLERTYPE_HYPERVISOR);
    616 
    617 #if PGM_GST_TYPE == PGM_TYPE_32BIT
    618     PX86PD          pPDSrc = pgmGstGet32bitPDPtr(&pState->pVM->pgm.s);
    619 #endif
    620 
    621     RTGCPTR         GCPtr = pCur->Core.Key;
    622 #if PGM_GST_MODE != PGM_MODE_AMD64
    623     /* skip all stuff above 4GB if not AMD64 mode. */
    624     if (GCPtr >= _4GB)
    625         return 0;
    626 #endif
    627 
    628     unsigned        offPage = GCPtr & PAGE_OFFSET_MASK;
    629     unsigned        iPage = 0;
    630     while (iPage < pCur->cPages)
    631     {
    632 #if PGM_GST_TYPE == PGM_TYPE_32BIT
    633         X86PDE      Pde = pPDSrc->a[GCPtr >> X86_PD_SHIFT];
    634 #elif PGM_GST_TYPE == PGM_TYPE_PAE
    635         X86PDEPAE   Pde = pgmGstGetPaePDE(&pState->pVM->pgm.s, GCPtr);
    636 #elif PGM_GST_TYPE == PGM_TYPE_AMD64
    637         X86PDEPAE   Pde = pgmGstGetLongModePDE(&pState->pVM->pgm.s, GCPtr);
    638 #endif
    639         if (Pde.n.u1Present)
    640         {
    641             if (    !Pde.b.u1Size
    642 # if PGM_GST_TYPE != PGM_TYPE_AMD64
    643                 ||  !(pState->cr4 & X86_CR4_PSE)
    644 # endif
    645                 )
    646             {
    647                 /*
    648                  * Normal page table.
    649                  */
    650                 PGSTPT pPT;
    651                 int rc = PGM_GCPHYS_2_PTR(pState->pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
    652                 if (RT_SUCCESS(rc))
    653                 {
    654                     for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
    655                          iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
    656                          iPTE++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
    657                     {
    658                         GSTPTE      Pte = pPT->a[iPTE];
    659                         RTGCPHYS    GCPhysNew;
    660                         if (Pte.n.u1Present)
    661                             GCPhysNew = (RTGCPHYS)(pPT->a[iPTE].u & GST_PTE_PG_MASK) + offPage;
    662                         else
    663                             GCPhysNew = NIL_RTGCPHYS;
    664                         if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
    665                         {
    666                             if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
    667                                 pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
    668 #ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
    669                             AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
    670                                              ("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%RGp\n",
    671                                               pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
    672                                               pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
    673 #endif
    674                             pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
    675                             pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
    676                         }
    677                     }
    678                 }
    679                 else
    680                 {
    681                     /* not-present. */
    682                     offPage = 0;
    683                     AssertRC(rc);
    684                     for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
    685                          iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
    686                          iPTE++, iPage++, GCPtr += PAGE_SIZE)
    687                     {
    688                         if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
    689                         {
    690                             pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
    691 #ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
    692                             AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
    693                                              ("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
    694                                               pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
    695                                               pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias));
    696 #endif
    697                             pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
    698                             pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
    699                         }
    700                     }
    701                 }
    702             }
    703             else
    704             {
    705                 /*
    706                  * 2/4MB page.
    707                  */
    708                 RTGCPHYS GCPhys = (RTGCPHYS)(Pde.u & GST_PDE_PG_MASK);
    709                 for (unsigned i4KB = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
    710                      i4KB < PAGE_SIZE / sizeof(GSTPDE) && iPage < pCur->cPages;
    711                      i4KB++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
    712                 {
    713                     RTGCPHYS GCPhysNew = GCPhys + (i4KB << PAGE_SHIFT) + offPage;
    714                     if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
    715                     {
    716                         if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
    717                             pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
    718 #ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
    719                         AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
    720                                          ("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%RGp\n",
    721                                           pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
    722                                           pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
    723 #endif
    724                         pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
    725                         pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
    726                     }
    727                 }
    728             } /* pde type */
    729         }
    730         else
    731         {
    732             /* not-present. */
    733             for (unsigned cPages = (GST_PT_MASK + 1) - ((GCPtr >> GST_PT_SHIFT) & GST_PT_MASK);
    734                  cPages && iPage < pCur->cPages;
    735                  iPage++, GCPtr += PAGE_SIZE)
    736             {
    737                 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
    738                 {
    739                     pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
    740                     pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
    741                     pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
    742                 }
    743             }
    744             offPage = 0;
    745         }
    746     } /* for pages in virtual mapping. */
    747 
    748     return 0;
    749 }
    750 #endif /* 32BIT, PAE and AMD64 */
    751 
    752 
    753 /**
    754  * Updates the virtual page access handlers.
    755  *
    756  * @returns true if bits were flushed.
    757  * @returns false if bits weren't flushed.
    758  * @param   pVM     VM handle.
    759  * @param   pPDSrc  The page directory.
    760  * @param   cr4     The cr4 register value.
    761  */
    762 PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4)
    763 {
    764 #if PGM_GST_TYPE == PGM_TYPE_32BIT \
    765  || PGM_GST_TYPE == PGM_TYPE_PAE \
    766  || PGM_GST_TYPE == PGM_TYPE_AMD64
    767 
    768     /** @todo
    769      * In theory this is not sufficient: the guest can change a single page in a range with invlpg
    770      */
    771 
    772     /*
    773      * Resolve any virtual address based access handlers to GC physical addresses.
    774      * This should be fairly quick.
    775      */
    776     PGMHVUSTATE State;
    777 
    778     pgmLock(pVM);
    779     STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualUpdate), a);
    780     State.pVM   = pVM;
    781     State.fTodo = pVM->pgm.s.fSyncFlags;
    782     State.cr4   = cr4;
    783     RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, PGM_GST_NAME(VirtHandlerUpdateOne), &State);
    784     STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualUpdate), a);
    785 
    786 
    787     /*
    788      * Set / reset bits?
    789      */
    790     if (State.fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL)
    791     {
    792         STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualReset), b);
    793         Log(("pgmR3VirtualHandlersUpdate: resets bits\n"));
    794         RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualResetOne, pVM);
    795         pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
    796         STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualReset), b);
    797     }
    798     pgmUnlock(pVM);
    799 
    800     return !!(State.fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL);
    801 
    802 #else /* real / protected */
    803     return false;
    804 #endif
    805 }
    806 
    807 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    808 
    809 #if PGM_GST_TYPE == PGM_TYPE_32BIT && !defined(IN_RING3)
    810 
    811 /**
    812  * Write access handler for the Guest CR3 page in 32-bit mode.
    813  *
    814  * This will try interpret the instruction, if failure fail back to the recompiler.
    815  * Check if the changed PDEs are marked present and conflicts with our
    816  * mappings. If conflict, we'll switch to the host context and resolve it there
    817  *
    818  * @returns VBox status code (appropritate for trap handling and GC return).
    819  * @param   pVM         VM Handle.
    820  * @param   uErrorCode  CPU Error code.
    821  * @param   pRegFrame   Trap register frame.
    822  * @param   pvFault     The fault address (cr2).
    823  * @param   GCPhysFault The GC physical address corresponding to pvFault.
    824  * @param   pvUser      User argument.
    825  */
    826 PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
    827 {
    828     AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
    829 
    830     /*
    831      * Try interpret the instruction.
    832      */
    833     uint32_t cb;
    834     int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
    835     if (RT_SUCCESS(rc) && cb)
    836     {
    837         /*
    838          * Check if the modified PDEs are present and mappings.
    839          */
    840         const RTGCPTR   offPD = GCPhysFault & PAGE_OFFSET_MASK;
    841         const unsigned  iPD1  = offPD / sizeof(X86PDE);
    842         const unsigned  iPD2  = (offPD + cb - 1) / sizeof(X86PDE);
    843 
    844         Assert(cb > 0 && cb <= 8);
    845         Assert(iPD1 < X86_PG_ENTRIES);
    846         Assert(iPD2 < X86_PG_ENTRIES);
    847 
    848 #ifdef DEBUG
    849         Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%x\n", iPD1, iPD1 << X86_PD_SHIFT));
    850         if (iPD1 != iPD2)
    851             Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%x\n", iPD2, iPD2 << X86_PD_SHIFT));
    852 #endif
    853 
    854         if (!pVM->pgm.s.fMappingsFixed)
    855         {
    856             PX86PD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
    857             if (    (   pPDSrc->a[iPD1].n.u1Present
    858                      && pgmGetMapping(pVM, (RTGCPTR)(iPD1 << X86_PD_SHIFT)) )
    859                 ||  (   iPD1 != iPD2
    860                      && pPDSrc->a[iPD2].n.u1Present
    861                      && pgmGetMapping(pVM, (RTGCPTR)(iPD2 << X86_PD_SHIFT)) )
    862                )
    863             {
    864                 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteConflict);
    865                 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
    866                 if (rc == VINF_SUCCESS)
    867                     rc = VINF_PGM_SYNC_CR3;
    868                 Log(("pgmXXGst32BitWriteHandlerCR3: detected conflict iPD1=%#x iPD2=%#x - returns %Rrc\n", iPD1, iPD2, rc));
    869                 return rc;
    870             }
    871         }
    872 
    873         STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);
    874     }
    875     else
    876     {
    877         Assert(RT_FAILURE(rc));
    878         if (rc == VERR_EM_INTERPRETER)
    879             rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
    880         Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
    881         STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);
    882     }
    883     return rc;
    884 }
    885 
    886 #endif /* PGM_TYPE_32BIT && !IN_RING3 */
    887 #if PGM_GST_TYPE == PGM_TYPE_PAE && !defined(IN_RING3)
    888 
    889 /**
    890  * Write access handler for the Guest CR3 page in PAE mode.
    891  *
    892  * This will try interpret the instruction, if failure fail back to the recompiler.
    893  * Check if the changed PDEs are marked present and conflicts with our
    894  * mappings. If conflict, we'll switch to the host context and resolve it there
    895  *
    896  * @returns VBox status code (appropritate for trap handling and GC return).
    897  * @param   pVM         VM Handle.
    898  * @param   uErrorCode  CPU Error code.
    899  * @param   pRegFrame   Trap register frame.
    900  * @param   pvFault     The fault address (cr2).
    901  * @param   GCPhysFault The GC physical address corresponding to pvFault.
    902  * @param   pvUser      User argument.
    903  */
    904 PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
    905 {
    906     AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
    907 
    908     /*
    909      * Try interpret the instruction.
    910      */
    911     uint32_t cb;
    912     int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
    913     if (RT_SUCCESS(rc) && cb)
    914     {
    915         /*
    916          * Check if any of the PDs have changed.
    917          * We'll simply check all of them instead of figuring out which one/two to check.
    918          */
    919         PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
    920         for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
    921         {
    922             if (    pGuestPDPT->a[i].n.u1Present
    923                 &&      (pGuestPDPT->a[i].u & X86_PDPE_PG_MASK)
    924                     !=  pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])
    925             {
    926                 /*
    927                  * The PDPE has changed.
    928                  * We will schedule a monitoring update for the next TLB Flush,
    929                  * InvalidatePage or SyncCR3.
    930                  *
    931                  * This isn't perfect, because a lazy page sync might be dealing with an half
    932                  * updated PDPE. However, we assume that the guest OS is disabling interrupts
    933                  * and being extremely careful (cmpxchg8b) when updating a PDPE where it's
    934                  * executing.
    935                  */
    936                 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
    937                 Log(("pgmXXGstPaeWriteHandlerCR3: detected updated PDPE; [%d] = %#llx, Old GCPhys=%RGp\n",
    938                      i, pGuestPDPT->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]));
    939             }
    940         }
    941 
    942         STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);
    943     }
    944     else
    945     {
    946         Assert(RT_FAILURE(rc));
    947         STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);
    948         if (rc == VERR_EM_INTERPRETER)
    949             rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
    950     }
    951     Log(("pgmXXGstPaeWriteHandlerCR3: returns %Rrc\n", rc));
    952     return rc;
    953 }
    954 
    955 
    956 /**
    957  * Write access handler for the Guest PDs in PAE mode.
    958  *
    959  * This will try interpret the instruction, if failure fail back to the recompiler.
    960  * Check if the changed PDEs are marked present and conflicts with our
    961  * mappings. If conflict, we'll switch to the host context and resolve it there
    962  *
    963  * @returns VBox status code (appropritate for trap handling and GC return).
    964  * @param   pVM         VM Handle.
    965  * @param   uErrorCode  CPU Error code.
    966  * @param   pRegFrame   Trap register frame.
    967  * @param   pvFault     The fault address (cr2).
    968  * @param   GCPhysFault The GC physical address corresponding to pvFault.
    969  * @param   pvUser      User argument.
    970  */
    971 PGM_GST_DECL(int, WriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
    972 {
    973     AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
    974 
    975     /*
    976      * Try interpret the instruction.
    977      */
    978     uint32_t cb;
    979     int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
    980     if (RT_SUCCESS(rc) && cb)
    981     {
    982         /*
    983          * Figure out which of the 4 PDs this is.
    984          */
    985         RTGCPTR i;
    986         PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
    987         for (i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
    988             if (pGuestPDPT->a[i].u == (GCPhysFault & X86_PTE_PAE_PG_MASK))
    989             {
    990                 PX86PDPAE       pPDSrc = pgmGstGetPaePD(&pVM->pgm.s, i << X86_PDPT_SHIFT);
    991                 const RTGCPTR   offPD  = GCPhysFault & PAGE_OFFSET_MASK;
    992                 const unsigned  iPD1   = offPD / sizeof(X86PDEPAE);
    993                 const unsigned  iPD2   = (offPD + cb - 1) / sizeof(X86PDEPAE);
    994 
    995                 Assert(cb > 0 && cb <= 8);
    996                 Assert(iPD1 < X86_PG_PAE_ENTRIES);
    997                 Assert(iPD2 < X86_PG_PAE_ENTRIES);
    998 
    999 # ifdef LOG_ENABLED
    1000                 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD1=%#05x (%x)\n",
    1001                      i, iPD1, (i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT)));
    1002                 if (iPD1 != iPD2)
    1003                     Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD2=%#05x (%x)\n",
    1004                          i, iPD2, (i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT)));
    1005 # endif
    1006 
    1007                 if (!pVM->pgm.s.fMappingsFixed)
    1008                 {
    1009                     if (    (   pPDSrc->a[iPD1].n.u1Present
    1010                              && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT))) )
    1011                         ||  (   iPD1 != iPD2
    1012                              && pPDSrc->a[iPD2].n.u1Present
    1013                              && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT))) )
    1014                        )
    1015                     {
    1016                         Log(("pgmXXGstPaeWriteHandlerPD: detected conflict iPD1=%#x iPD2=%#x\n", iPD1, iPD2));
    1017                         STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteConflict);
    1018                         VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
    1019                         return VINF_PGM_SYNC_CR3;
    1020                     }
    1021                 }
    1022                 break; /* ASSUMES no duplicate entries... */
    1023             }
    1024         Assert(i < 4);
    1025 
    1026         STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);
    1027     }
    1028     else
    1029     {
    1030         Assert(RT_FAILURE(rc));
    1031         if (rc == VERR_EM_INTERPRETER)
    1032             rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
    1033         else
    1034             Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
    1035         STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);
    1036     }
    1037     return rc;
    1038 }
    1039 
    1040 #endif /* PGM_TYPE_PAE && !IN_RING3 */
    1041 
    1042 #endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette