Changeset 26616 in vbox
- Timestamp:
- Feb 17, 2010 3:44:15 PM (15 years ago)
- Location:
- trunk
- Files:
-
- 6 edited
-
include/VBox/pgm.h (modified) (1 diff)
-
src/VBox/VMM/PGMInternal.h (modified) (4 diffs)
-
src/VBox/VMM/PGMPhys.cpp (modified) (5 diffs)
-
src/VBox/VMM/VMM.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMAll/PGMAllBth.h (modified) (1 diff)
-
src/VBox/VMM/VMMAll/PGMAllPhys.cpp (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/pgm.h
r26606 r26616 534 534 VMMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM); 535 535 VMMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM); 536 VMMR3DECL(int) PGMR3PhysAllocateLargeHandyPage(PVM pVM );536 VMMR3DECL(int) PGMR3PhysAllocateLargeHandyPage(PVM pVM, RTGCPHYS GCPhys); 537 537 538 538 VMMR3DECL(void) PGMR3ReleaseOwnedLocks(PVM pVM); -
trunk/src/VBox/VMM/PGMInternal.h
r26606 r26616 612 612 * - [8-9]: u2HandlerVirtStateY - the virtual handler state 613 613 * (PGM_PAGE_HNDL_VIRT_STATE_*). 614 * - [1 4]: u1LargePage - flag indicating that it's part of a large (2 MB) page614 * - [13-14]: u2PDEType - paging structure needed to map the page (PGM_PAGE_PDE_TYPE_*) 615 615 * - [15]: fWrittenToY - flag indicating that a write monitored page was 616 616 * written to when set. … … 832 832 #define PGM_PAGE_IS_WRITTEN_TO(pPage) ( !!((pPage)->u16MiscY.au8[1] & UINT8_C(0x80)) ) 833 833 834 /** 835 * Marks the page as part of a large continuous page 834 /** @name PT usage values (PGMPAGE::u2PDEType). 835 * 836 * @{ */ 837 /** Either as a PT or PDE. */ 838 #define PGM_PAGE_PDE_TYPE_DONTCARE 0 839 /** Must use a page table to map the range. */ 840 #define PGM_PAGE_PDE_TYPE_PT 1 841 /** Can use a page directory entry to map the continous range. */ 842 #define PGM_PAGE_PDE_TYPE_PDE 2 843 /** @} */ 844 845 /** 846 * Set the PDE type of the page 836 847 * @param pPage Pointer to the physical guest page tracking structure. 837 */ 838 #define PGM_PAGE_SET_LARGE_PAGE(pPage) do { (pPage)->u16MiscY.au8[1] |= UINT8_C(0x40); } while (0) 839 840 /** 841 * Clears the page as part of a large continuous page indicator. 842 * @param pPage Pointer to the physical guest page tracking structure. 843 */ 844 #define PGM_PAGE_CLEAR_LARGE_PAGE(pPage) do { (pPage)->u16MiscY.au8[1] &= UINT8_C(0xbf); } while (0) 848 * @param uType PGM_PAGE_PDE_TYPE_* 849 */ 850 #define PGM_PAGE_SET_PDE_TYPE(pPage, uType) \ 851 do { \ 852 (pPage)->u16MiscY.au8[1] = ((pPage)->u16MiscY.au8[1] & UINT8_C(0x9f)) \ 853 | (((uType) & UINT8_C(0x03)) << 5); \ 854 } while (0) 845 855 846 856 /** … … 849 859 * @param pPage Pointer to the physical guest page tracking structure. 850 860 */ 851 #define PGM_PAGE_ IS_LARGE_PAGE(pPage) ( !!((pPage)->u16MiscY.au8[1] & UINT8_C(0x40)))861 #define PGM_PAGE_GET_PDE_TYPE(pPage) ( ((pPage)->u16MiscY.au8[1] & UINT8_C(0x60)) >> 5) 852 862 853 863 /** Enabled optimized access handler tests. … … 3308 3318 3309 3319 int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys); 3320 int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys); 3310 3321 int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys); 3311 3322 int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys); -
trunk/src/VBox/VMM/PGMPhys.cpp
r26606 r26616 1256 1256 { 1257 1257 case PGMPAGETYPE_RAM: 1258 /* @todo deal with large pages. */ 1259 Assert(PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE); 1260 1258 1261 if (!PGM_PAGE_IS_ZERO(pPage)) 1259 1262 { … … 2062 2065 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ZERO); 2063 2066 PGM_PAGE_SET_PAGEID(pPageDst, NIL_GMM_PAGEID); 2067 PGM_PAGE_SET_PDE_TYPE(pPageDst, PGM_PAGE_PDE_TYPE_DONTCARE); 2064 2068 2065 2069 pVM->pgm.s.cZeroPages++; … … 3173 3177 * 3174 3178 * @param pVM The VM handle. 3175 */ 3176 VMMR3DECL(int) PGMR3PhysAllocateLargeHandyPage(PVM pVM) 3179 * @param GCPhys GC physical start address of the 2 MB range 3180 */ 3181 VMMR3DECL(int) PGMR3PhysAllocateLargeHandyPage(PVM pVM, RTGCPHYS GCPhys) 3177 3182 { 3178 3183 pgmLock(pVM); … … 3186 3191 RTHCPHYS HCPhys = pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys; 3187 3192 3188 /* 3189 * Clear the pages. 3193 void *pv; 3194 3195 /* Map the large page into our address space. 3196 * 3197 * Note: assuming that within the 2 MB range: 3198 * - GCPhys + PAGE_SIZE = HCPhys + PAGE_SIZE (whole point of this exercise) 3199 * - user space mapping is continuous as well 3200 * - page id (GCPhys) + 1 = page id (GCPhys + PAGE_SIZE) 3190 3201 */ 3191 for (unsigned i = 0; i < _2M/PAGE_SIZE; i++) 3192 { 3193 void *pv; 3194 3195 /* Map the large page into our address space. Could only fail the first time */ 3196 rc = pgmPhysPageMapByPageID(pVM, idPage, HCPhys, &pv); 3197 AssertLogRelMsgBreak(RT_SUCCESS(rc), ("idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc", idPage, HCPhys, rc)); 3198 ASMMemZeroPage(pv); 3199 idPage++; 3200 HCPhys += PAGE_SIZE; 3201 Log3(("PGMR3PhysAllocateLargePage: idPage=%#x HCPhys=%RGp\n", idPage, HCPhys)); 3202 } 3202 rc = pgmPhysPageMapByPageID(pVM, idPage, HCPhys, &pv); 3203 AssertLogRelMsg(RT_SUCCESS(rc), ("idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc", idPage, HCPhys, rc)); 3204 3205 if (RT_SUCCESS(rc)) 3206 { 3207 /* 3208 * Clear the pages. 3209 */ 3210 for (unsigned i = 0; i < _2M/PAGE_SIZE; i++) 3211 { 3212 ASMMemZeroPage(pv); 3213 3214 PPGMPAGE pPage; 3215 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage); 3216 AssertRC(rc); 3217 3218 Assert(PGM_PAGE_IS_ZERO(pPage)); 3219 STAM_COUNTER_INC(&pVM->pgm.s.StatRZPageReplaceZero); 3220 pVM->pgm.s.cZeroPages--; 3221 3222 /* 3223 * Do the PGMPAGE modifications. 3224 */ 3225 pVM->pgm.s.cPrivatePages++; 3226 PGM_PAGE_SET_HCPHYS(pPage, HCPhys); 3227 PGM_PAGE_SET_PAGEID(pPage, idPage); 3228 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED); 3229 PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PDE); 3230 3231 /* Somewhat dirty assumption that page ids are increasing. */ 3232 idPage++; 3233 3234 HCPhys += PAGE_SIZE; 3235 GCPhys += PAGE_SIZE; 3236 3237 pv = (void *)((uintptr_t)pv + PAGE_SIZE); 3238 3239 Log3(("PGMR3PhysAllocateLargePage: idPage=%#x HCPhys=%RGp\n", idPage, HCPhys)); 3240 } 3241 /* Flush all TLBs. */ 3242 PGM_INVL_ALL_VCPU_TLBS(pVM); 3243 PGMPhysInvalidatePageMapTLB(pVM); 3244 } 3203 3245 pVM->pgm.s.cLargeHandyPages = 0; 3204 3246 } … … 3389 3431 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO); 3390 3432 PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID); 3433 PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_DONTCARE); 3391 3434 3392 3435 /* Flush physical page map TLB entry. */ -
trunk/src/VBox/VMM/VMM.cpp
r26606 r26616 2075 2075 case VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE: 2076 2076 { 2077 pVCpu->vmm.s.rcCallRing3 = PGMR3PhysAllocateLargeHandyPage(pVM );2077 pVCpu->vmm.s.rcCallRing3 = PGMR3PhysAllocateLargeHandyPage(pVM, pVCpu->vmm.s.u64CallRing3Arg); 2078 2078 break; 2079 2079 } -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r26577 r26616 2937 2937 Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/ 2938 2938 2939 # if PGM_SHW_TYPE == PGM_TYPE_EPT 2940 2939 # if (PGM_SHW_TYPE == PGM_TYPE_EPT) && (HC_ARCH_BITS == 64) && defined(RT_OS_WINDOWS) && defined(DEBUG_sandervl) 2940 PPGMPAGE pPage; 2941 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPtrPage & SHW_PD_MASK, &pPage); 2942 if ( RT_SUCCESS(rc) 2943 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM) 2944 { 2945 RTHCPHYS HCPhys = NIL_RTHCPHYS; 2946 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pPage); 2947 2948 if (uPDEType == PGM_PAGE_PDE_TYPE_PDE) 2949 { 2950 /* Previously allocated 2 MB range can be reused. */ 2951 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED); 2952 HCPhys = PGM_PAGE_GET_HCPHYS(pPage); 2953 } 2954 else 2955 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE 2956 && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ZERO) 2957 { 2958 RTGCPHYS GCPhysBase = GCPtrPage & SHW_PD_MASK; 2959 RTGCPHYS GCPhys = GCPhysBase; 2960 unsigned iPage; 2961 2962 /* Lazy approach: check all pages in the 2 MB range. 2963 * The whole range must be ram and unallocated 2964 */ 2965 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++) 2966 { 2967 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage); 2968 if ( RT_FAILURE(rc) 2969 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM 2970 || PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED) 2971 { 2972 break; 2973 } 2974 Assert(PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_DONTCARE); 2975 GCPhys += PAGE_SIZE; 2976 } 2977 /* Fetch the start page of the 2 MB range again. */ 2978 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage); 2979 AssertRC(rc); /* can't fail */ 2980 2981 if (iPage != _2M/PAGE_SIZE) 2982 { 2983 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */ 2984 PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT); 2985 } 2986 else 2987 { 2988 rc = pgmPhysAllocLargePage(pVM, GCPhysBase); 2989 if (RT_SUCCESS(rc)) 2990 { 2991 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED); 2992 HCPhys = PGM_PAGE_GET_HCPHYS(pPage); 2993 } 2994 } 2995 } 2996 2997 if (HCPhys != NIL_RTHCPHYS) 2998 { 2999 PdeDst.u &= X86_PDE_AVL_MASK; 3000 PdeDst.u |= HCPhys; 3001 PdeDst.n.u1Present = 1; 3002 PdeDst.n.u1Write = 1; 3003 PdeDst.n.u1Execute = 1; 3004 PdeDst.b.u1Size = 1; 3005 PdeDst.b.u1IgnorePAT = 1; 3006 PdeDst.b.u3EMT = VMX_EPT_MEMTYPE_WB; 3007 ASMAtomicWriteSize(pPdeDst, PdeDst.u); 3008 3009 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPT), a); 3010 return VINF_SUCCESS; 3011 } 3012 } 2941 3013 # endif 2942 3014 -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r26491 r26616 436 436 PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage); 437 437 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED); 438 PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT); 438 439 PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhys); 439 440 … … 444 445 } 445 446 447 /** 448 * Replace a 2 MB range of zero pages with new pages that we can write to. 449 * 450 * @returns The following VBox status codes. 451 * @retval VINF_SUCCESS on success, pPage is modified. 452 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending. 453 * @retval VERR_EM_NO_MEMORY if we're totally out of memory. 454 * 455 * @todo Propagate VERR_EM_NO_MEMORY up the call tree. 456 * 457 * @param pVM The VM address. 458 * @param GCPhys The address of the page. 459 * 460 * @remarks Must be called from within the PGM critical section. It may 461 * nip back to ring-3/0 in some cases. 462 * 463 * @remarks This function shouldn't really fail, however if it does 464 * it probably means we've screwed up the size of handy pages and/or 465 * the low-water mark. Or, that some device I/O is causing a lot of 466 * pages to be allocated while while the host is in a low-memory 467 * condition. This latter should be handled elsewhere and in a more 468 * controlled manner, it's on the @bugref{3170} todo list... 469 */ 470 int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys) 471 { 472 LogFlow(("pgmPhysAllocLargePage: %RGp\n", GCPhys)); 473 474 /* 475 * Prereqs. 476 */ 477 Assert(PGMIsLocked(pVM)); 478 Assert((GCPhys & X86_PD_PAE_MASK) == 0); 479 480 #ifdef IN_RING3 481 int rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhys); 482 #else 483 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhys); 484 #endif 485 return rc; 486 } 446 487 447 488 /**
Note:
See TracChangeset
for help on using the changeset viewer.

