VirtualBox

Changeset 16260 in vbox


Ignore:
Timestamp:
Jan 27, 2009 10:45:39 AM (16 years ago)
Author:
vboxsync
Message:

More paging updates

Location:
trunk/src/VBox/VMM
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/PGMGst.h

    r14301 r16260  
    113113static DECLCALLBACK(int) pgmR3Gst32BitWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
    114114static DECLCALLBACK(int) pgmR3GstPAEWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
    115 static DECLCALLBACK(int) pgmR3GstPAEWriteHandlerPD(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
    116115#endif
    117116
     
    404403}
    405404
    406 # if 0
    407 /**
    408  * Physical write access for Guest CR3.
    409  *
    410  * @returns VINF_SUCCESS if the handler have carried out the operation.
    411  * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
    412  * @param   pVM             VM Handle.
    413  * @param   GCPhys          The physical address the guest is writing to.
    414  * @param   pvPhys          The HC mapping of that address.
    415  * @param   pvBuf           What the guest is reading/writing.
    416  * @param   cbBuf           How much it's reading/writing.
    417  * @param   enmAccessType   The access type.
    418  * @param   pvUser          User argument.
    419  */
    420 static DECLCALLBACK(int) pgmR3GstPAEWriteHandlerPD(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
    421 {
    422     AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
    423     Assert(enmAccessType == PGMACCESSTYPE_WRITE);
    424     Log2(("pgmR3GstPAEWriteHandlerPD: ff=%#x GCPhys=%RGp pvPhys=%p cbBuf=%d pvBuf={%.*Rhxs}\n", pVM->fForcedActions, GCPhys, pvPhys, cbBuf, cbBuf, pvBuf));
    425 
    426     /*
    427      * Do the write operation.
    428      */
    429     memcpy(pvPhys, pvBuf, cbBuf);
    430     if (    !pVM->pgm.s.fMappingsFixed
    431         &&  !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
    432     {
    433         /*
    434          * Figure out which of the 4 PDs this is.
    435          */
    436         unsigned i;
    437         for (i = 0; i < 4; i++)
    438             if (pVM->pgm.s.pGstPaePdptHC->a[i].u == (GCPhys & X86_PTE_PAE_PG_MASK))
    439             {
    440                 PX86PDPAE       pPDSrc = pgmGstGetPaePD(&pVM->pgm.s, i << X86_PDPT_SHIFT);
    441                 const RTGCPTR   offPD  = GCPhys & PAGE_OFFSET_MASK;
    442                 const unsigned  iPD1   = offPD / sizeof(X86PDEPAE);
    443                 const unsigned  iPD2   = (offPD + cbBuf - 1) / sizeof(X86PDEPAE);
    444                 Assert(iPD1 - iPD2 <= 1);
    445                 if (    (   pPDSrc->a[iPD1].n.u1Present
    446                          && pgmGetMapping(pVM, (i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT)) )
    447                     ||  (   iPD1 != iPD2
    448                          && pPDSrc->a[iPD2].n.u1Present
    449                          && pgmGetMapping(pVM, (i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT)) )
    450                    )
    451                 {
    452                     Log(("pgmR3GstPaePD3WriteHandler: detected conflict. i=%d iPD1=%#x iPD2=%#x GCPhys=%RGp\n",
    453                          i, iPD1, iPD2, GCPhys));
    454                     STAM_COUNTER_INC(&pVM->pgm.s.StatR3GuestPDWriteConflict);
    455                     VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
    456                 }
    457                 break; /* ASSUMES no duplicate entries... */
    458             }
    459         Assert(i < 4);
    460     }
    461 
    462     STAM_COUNTER_INC(&pVM->pgm.s.StatR3GuestPDWrite);
    463     return VINF_SUCCESS;
    464 }
    465 # endif
    466 
    467405#endif /* PAE */
    468406
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r16232 r16260  
    154154
    155155#   ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     156    PX86PDPAE       pPDDst;
    156157#    if PGM_GST_TYPE != PGM_TYPE_PAE
    157     PX86PDPAE       pPDDst;
    158158    X86PDPE         PdpeSrc;
    159159
     
    32343234    return VINF_SUCCESS;
    32353235
    3236 #elif PGM_SHW_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
     3236#elif PGM_SHW_TYPE == PGM_TYPE_AMD64
    32373237    /*
    32383238     * AMD64 (Shw & Gst) - No need to check all paging levels; we zero
     
    32423242
    32433243#else /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT && PGM_SHW_TYPE != PGM_TYPE_AMD64 */
     3244
     3245# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     3246    /* Nothing to do when mappings are fixed. */
     3247    if (!pVM->pgm.s.fMappingsFixed)
     3248        return VINF_SUCCESS;
     3249# endif
     3250
    32443251    /*
    32453252     * PAE and 32-bit legacy mode (shadow).
     
    32793286    PPGMPOOL    pPool         = pVM->pgm.s.CTX_SUFF(pPool);
    32803287
     3288#  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     3289    /* Mappings are always enabled when we get here. */
     3290    Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
     3291    pMapping      = pVM->pgm.s.CTX_SUFF(pMappings);
     3292    iPdNoMapping  = (pMapping) ? (pMapping->GCPtr >> GST_PD_SHIFT) : ~0U;
     3293#  else
    32813294    /* Only check mappings if they are supposed to be put into the shadow page table. */
    32823295    if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
     
    32903303        iPdNoMapping  = ~0U;
    32913304    }
     3305#  endif
    32923306
    32933307#  if PGM_GST_TYPE == PGM_TYPE_PAE
     
    33003314        PX86PDPT        pPdptDst  = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
    33013315
     3316#   ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    33023317        if (pPDSrc == NULL)
    33033318        {
     
    33213336            continue;
    33223337        }
     3338#    endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
    33233339#  else  /* PGM_GST_TYPE != PGM_TYPE_PAE */
    33243340    {
     
    33583374#   endif
    33593375                {
     3376#   ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    33603377                    if (pVM->pgm.s.fMappingsFixed)
    33613378                    {
     
    33643381                        Assert(PGM_GST_TYPE == PGM_TYPE_32BIT || (iPD + cPTs - 1) / X86_PG_PAE_ENTRIES == iPD / X86_PG_PAE_ENTRIES);
    33653382                        iPD += cPTs - 1;
    3366 #   if PGM_SHW_TYPE != PGM_GST_TYPE /* SHW==PAE && GST==32BIT */
     3383#    if PGM_SHW_TYPE != PGM_GST_TYPE /* SHW==PAE && GST==32BIT */
    33673384                        pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, (uint32_t)(iPD + 1) << GST_PD_SHIFT);
    3368 #   else
     3385#    else
    33693386                        pPDEDst += cPTs;
    3370 #   endif
     3387#    endif
    33713388                        pMapping = pMapping->CTX_SUFF(pNext);
    33723389                        iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
    33733390                        continue;
    33743391                    }
     3392#   endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
    33753393#   ifdef IN_RING3
    33763394#    if PGM_GST_TYPE == PGM_TYPE_32BIT
     
    33983416#  endif /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
    33993417
     3418#  ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    34003419                /*
    34013420                 * Sync page directory entry.
     
    34863505                    pPDEDst++;
    34873506                } /* foreach 2MB PAE PDE in 4MB guest PDE */
     3507#  endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
    34883508            }
    34893509#  if PGM_GST_TYPE == PGM_TYPE_PAE
     
    34933513#  endif
    34943514            {
     3515#  ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    34953516                /*
    34963517                 * Check if there is any page directory to mark not present here.
    34973518                 */
    3498 if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
     3519 if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
    34993520                for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */
    3500 elif PGM_GST_TYPE == PGM_TYPE_PAE
     3521 elif PGM_GST_TYPE == PGM_TYPE_PAE
    35013522                const unsigned iPdShw = iPD + iPdpt * X86_PG_PAE_ENTRIES;
    3502 else
     3523 else
    35033524                const unsigned iPdShw = iPD;
    3504 endif
     3525 endif
    35053526                {
    35063527                    if (pPDEDst->n.u1Present)
     
    35123533                    pPDEDst++;
    35133534                }
     3535#  endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
    35143536            }
    35153537            else
     
    35223544
    35233545                Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
     3546#   ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    35243547                if (pVM->pgm.s.fMappingsFixed)
    35253548                {
     
    35293552                }
    35303553                else
     3554#   endif
    35313555                {
    35323556                    /*
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette