VirtualBox

Changeset 26616 in vbox


Ignore:
Timestamp:
Feb 17, 2010 3:44:15 PM (15 years ago)
Author:
vboxsync
Message:

More large page work (disabled)

Location:
trunk
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/pgm.h

    r26606 r26616  
    534534VMMR3DECL(void)     PGMR3PhysChunkInvalidateTLB(PVM pVM);
    535535VMMR3DECL(int)      PGMR3PhysAllocateHandyPages(PVM pVM);
    536 VMMR3DECL(int)      PGMR3PhysAllocateLargeHandyPage(PVM pVM);
     536VMMR3DECL(int)      PGMR3PhysAllocateLargeHandyPage(PVM pVM, RTGCPHYS GCPhys);
    537537
    538538VMMR3DECL(void)     PGMR3ReleaseOwnedLocks(PVM pVM);
  • trunk/src/VBox/VMM/PGMInternal.h

    r26606 r26616  
    612612     *  - [8-9]: u2HandlerVirtStateY - the virtual handler state
    613613     *    (PGM_PAGE_HNDL_VIRT_STATE_*).
    614      *  - [14]:  u1LargePage - flag indicating that it's part of a large (2 MB) page
     614     *  - [13-14]: u2PDEType  - paging structure needed to map the page (PGM_PAGE_PDE_TYPE_*)
    615615     *  - [15]:  fWrittenToY - flag indicating that a write monitored page was
    616616     *    written to when set.
     
    832832#define PGM_PAGE_IS_WRITTEN_TO(pPage)       ( !!((pPage)->u16MiscY.au8[1] & UINT8_C(0x80)) )
    833833
    834 /**
    835  * Marks the page as part of a large continuous page
     834/** @name PT usage values (PGMPAGE::u2PDEType).
     835 *
     836 * @{ */
     837/** Either as a PT or PDE. */
     838#define PGM_PAGE_PDE_TYPE_DONTCARE             0
     839/** Must use a page table to map the range. */
     840#define PGM_PAGE_PDE_TYPE_PT                   1
     841/** Can use a page directory entry to map the continous range. */
     842#define PGM_PAGE_PDE_TYPE_PDE                  2
     843/** @} */
     844
     845/**
     846 * Set the PDE type of the page
    836847 * @param   pPage       Pointer to the physical guest page tracking structure.
    837  */
    838 #define PGM_PAGE_SET_LARGE_PAGE(pPage)      do { (pPage)->u16MiscY.au8[1] |= UINT8_C(0x40); } while (0)
    839 
    840 /**
    841  * Clears the page as part of a large continuous page indicator.
    842  * @param   pPage       Pointer to the physical guest page tracking structure.
    843  */
    844 #define PGM_PAGE_CLEAR_LARGE_PAGE(pPage)    do { (pPage)->u16MiscY.au8[1] &= UINT8_C(0xbf); } while (0)
     848 * @param   uType       PGM_PAGE_PDE_TYPE_*
     849 */
     850#define PGM_PAGE_SET_PDE_TYPE(pPage, uType) \
     851    do { \
     852        (pPage)->u16MiscY.au8[1] = ((pPage)->u16MiscY.au8[1] & UINT8_C(0x9f)) \
     853                                 | (((uType)                 & UINT8_C(0x03)) << 5); \
     854    } while (0)
    845855
    846856/**
     
    849859 * @param   pPage       Pointer to the physical guest page tracking structure.
    850860 */
    851 #define PGM_PAGE_IS_LARGE_PAGE(pPage)       ( !!((pPage)->u16MiscY.au8[1] & UINT8_C(0x40)) )
     861#define PGM_PAGE_GET_PDE_TYPE(pPage)       ( ((pPage)->u16MiscY.au8[1] & UINT8_C(0x60)) >> 5)
    852862
    853863/** Enabled optimized access handler tests.
     
    33083318
    33093319int             pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
     3320int             pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys);
    33103321int             pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys);
    33113322int             pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys);
  • trunk/src/VBox/VMM/PGMPhys.cpp

    r26606 r26616  
    12561256                {
    12571257                    case PGMPAGETYPE_RAM:
     1258                        /* @todo deal with large pages. */
     1259                        Assert(PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE);
     1260
    12581261                        if (!PGM_PAGE_IS_ZERO(pPage))
    12591262                        {
     
    20622065            PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ZERO);
    20632066            PGM_PAGE_SET_PAGEID(pPageDst, NIL_GMM_PAGEID);
     2067            PGM_PAGE_SET_PDE_TYPE(pPageDst, PGM_PAGE_PDE_TYPE_DONTCARE);
    20642068
    20652069            pVM->pgm.s.cZeroPages++;
     
    31733177 *
    31743178 * @param   pVM         The VM handle.
    3175  */
    3176 VMMR3DECL(int) PGMR3PhysAllocateLargeHandyPage(PVM pVM)
     3179 * @param   GCPhys      GC physical start address of the 2 MB range
     3180 */
     3181VMMR3DECL(int) PGMR3PhysAllocateLargeHandyPage(PVM pVM, RTGCPHYS GCPhys)
    31773182{
    31783183    pgmLock(pVM);
     
    31863191        RTHCPHYS HCPhys = pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys;
    31873192
    3188         /*
    3189          * Clear the pages.
     3193        void *pv;
     3194
     3195        /* Map the large page into our address space.
     3196         *
     3197         * Note: assuming that within the 2 MB range:
     3198         * - GCPhys + PAGE_SIZE = HCPhys + PAGE_SIZE (whole point of this exercise)
     3199         * - user space mapping is continuous as well
     3200         * - page id (GCPhys) + 1 = page id (GCPhys + PAGE_SIZE)
    31903201         */
    3191         for (unsigned i = 0; i < _2M/PAGE_SIZE; i++)
    3192         {
    3193             void *pv;
    3194 
    3195             /* Map the large page into our address space. Could only fail the first time */
    3196             rc = pgmPhysPageMapByPageID(pVM, idPage, HCPhys, &pv);
    3197             AssertLogRelMsgBreak(RT_SUCCESS(rc), ("idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc", idPage, HCPhys, rc));
    3198             ASMMemZeroPage(pv);
    3199             idPage++;
    3200             HCPhys += PAGE_SIZE;
    3201             Log3(("PGMR3PhysAllocateLargePage: idPage=%#x HCPhys=%RGp\n", idPage, HCPhys));
    3202         }
     3202        rc = pgmPhysPageMapByPageID(pVM, idPage, HCPhys, &pv);
     3203        AssertLogRelMsg(RT_SUCCESS(rc), ("idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc", idPage, HCPhys, rc));
     3204
     3205        if (RT_SUCCESS(rc))
     3206        {
     3207            /*
     3208             * Clear the pages.
     3209             */
     3210            for (unsigned i = 0; i < _2M/PAGE_SIZE; i++)
     3211            {
     3212                ASMMemZeroPage(pv);
     3213
     3214                PPGMPAGE pPage;
     3215                rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
     3216                AssertRC(rc);
     3217
     3218                Assert(PGM_PAGE_IS_ZERO(pPage));
     3219                STAM_COUNTER_INC(&pVM->pgm.s.StatRZPageReplaceZero);
     3220                pVM->pgm.s.cZeroPages--;
     3221
     3222                /*
     3223                 * Do the PGMPAGE modifications.
     3224                 */
     3225                pVM->pgm.s.cPrivatePages++;
     3226                PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
     3227                PGM_PAGE_SET_PAGEID(pPage, idPage);
     3228                PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
     3229                PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PDE);
     3230
     3231                /* Somewhat dirty assumption that page ids are increasing. */
     3232                idPage++;
     3233
     3234                HCPhys += PAGE_SIZE;
     3235                GCPhys += PAGE_SIZE;
     3236
     3237                pv = (void *)((uintptr_t)pv + PAGE_SIZE);
     3238
     3239                Log3(("PGMR3PhysAllocateLargePage: idPage=%#x HCPhys=%RGp\n", idPage, HCPhys));
     3240            }
     3241            /* Flush all TLBs. */
     3242            PGM_INVL_ALL_VCPU_TLBS(pVM);
     3243            PGMPhysInvalidatePageMapTLB(pVM);
     3244       }
    32033245        pVM->pgm.s.cLargeHandyPages = 0;
    32043246    }
     
    33893431    PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
    33903432    PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID);
     3433    PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_DONTCARE);
    33913434
    33923435    /* Flush physical page map TLB entry. */
  • trunk/src/VBox/VMM/VMM.cpp

    r26606 r26616  
    20752075        case VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE:
    20762076        {
    2077             pVCpu->vmm.s.rcCallRing3 = PGMR3PhysAllocateLargeHandyPage(pVM);
     2077            pVCpu->vmm.s.rcCallRing3 = PGMR3PhysAllocateLargeHandyPage(pVM, pVCpu->vmm.s.u64CallRing3Arg);
    20782078            break;
    20792079        }
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r26577 r26616  
    29372937    Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/
    29382938
    2939 # if PGM_SHW_TYPE == PGM_TYPE_EPT
    2940 
     2939# if (PGM_SHW_TYPE == PGM_TYPE_EPT) && (HC_ARCH_BITS == 64) && defined(RT_OS_WINDOWS) && defined(DEBUG_sandervl)
     2940    PPGMPAGE pPage;
     2941    rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPtrPage & SHW_PD_MASK, &pPage);
     2942    if (    RT_SUCCESS(rc)
     2943        &&  PGM_PAGE_GET_TYPE(pPage)  == PGMPAGETYPE_RAM)
     2944    {
     2945        RTHCPHYS HCPhys = NIL_RTHCPHYS;
     2946        unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pPage);
     2947
     2948        if  (uPDEType == PGM_PAGE_PDE_TYPE_PDE)
     2949        {
     2950            /* Previously allocated 2 MB range can be reused. */
     2951            Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
     2952            HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
     2953        }
     2954        else
     2955        if  (   uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
     2956             && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ZERO)
     2957        {
     2958            RTGCPHYS GCPhysBase = GCPtrPage & SHW_PD_MASK;
     2959            RTGCPHYS GCPhys = GCPhysBase;
     2960            unsigned iPage;
     2961
     2962            /* Lazy approach: check all pages in the 2 MB range.
     2963             * The whole range must be ram and unallocated
     2964             */
     2965            for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
     2966            {
     2967                rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
     2968                if  (   RT_FAILURE(rc)
     2969                     || PGM_PAGE_GET_TYPE(pPage)  != PGMPAGETYPE_RAM
     2970                     || PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED)
     2971                {
     2972                    break;
     2973                }
     2974                Assert(PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
     2975                GCPhys += PAGE_SIZE;
     2976            }
     2977            /* Fetch the start page of the 2 MB range again. */
     2978            rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage);
     2979            AssertRC(rc);   /* can't fail */
     2980
     2981            if (iPage != _2M/PAGE_SIZE)
     2982            {
     2983                /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
     2984                PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT);
     2985            }
     2986            else
     2987            {
     2988                rc = pgmPhysAllocLargePage(pVM, GCPhysBase);
     2989                if (RT_SUCCESS(rc))
     2990                {   
     2991                    Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
     2992                    HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
     2993                }
     2994            }
     2995        }
     2996
     2997        if (HCPhys != NIL_RTHCPHYS)
     2998        {
     2999            PdeDst.u &= X86_PDE_AVL_MASK;
     3000            PdeDst.u |= HCPhys;
     3001            PdeDst.n.u1Present   = 1;
     3002            PdeDst.n.u1Write     = 1;
     3003            PdeDst.n.u1Execute   = 1;
     3004            PdeDst.b.u1Size      = 1;
     3005            PdeDst.b.u1IgnorePAT = 1;
     3006            PdeDst.b.u3EMT       = VMX_EPT_MEMTYPE_WB;
     3007            ASMAtomicWriteSize(pPdeDst, PdeDst.u);
     3008
     3009            STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
     3010            return VINF_SUCCESS;
     3011        }
     3012    }
    29413013# endif
    29423014
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r26491 r26616  
    436436    PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
    437437    PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
     438    PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT);
    438439    PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
    439440
     
    444445}
    445446
     447/**
     448 * Replace a 2 MB range of zero pages with new pages that we can write to.
     449 *
     450 * @returns The following VBox status codes.
     451 * @retval  VINF_SUCCESS on success, pPage is modified.
     452 * @retval  VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
     453 * @retval  VERR_EM_NO_MEMORY if we're totally out of memory.
     454 *
     455 * @todo    Propagate VERR_EM_NO_MEMORY up the call tree.
     456 *
     457 * @param   pVM         The VM address.
     458 * @param   GCPhys      The address of the page.
     459 *
     460 * @remarks Must be called from within the PGM critical section. It may
     461 *          nip back to ring-3/0 in some cases.
     462 *
     463 * @remarks This function shouldn't really fail, however if it does
     464 *          it probably means we've screwed up the size of handy pages and/or
     465 *          the low-water mark. Or, that some device I/O is causing a lot of
     466 *          pages to be allocated while while the host is in a low-memory
     467 *          condition. This latter should be handled elsewhere and in a more
     468 *          controlled manner, it's on the @bugref{3170} todo list...
     469 */
     470int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys)
     471{
     472    LogFlow(("pgmPhysAllocLargePage: %RGp\n", GCPhys));
     473
     474    /*
     475     * Prereqs.
     476     */
     477    Assert(PGMIsLocked(pVM));
     478    Assert((GCPhys & X86_PD_PAE_MASK) == 0);
     479
     480#ifdef IN_RING3
     481    int rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhys);
     482#else
     483    int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhys);
     484#endif
     485    return rc;
     486}
    446487
    447488/**
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette