- Timestamp:
- Oct 20, 2021 12:50:11 AM (3 years ago)
- Location:
- trunk
- Files:
-
- 1 deleted
- 19 edited
-
include/VBox/vmm/mm.h (modified) (1 diff)
-
include/VBox/vmm/pgm.h (modified) (5 diffs)
-
src/VBox/VMM/Makefile.kmk (modified) (2 diffs)
-
src/VBox/VMM/VMMAll/PGMAll.cpp (modified) (9 diffs)
-
src/VBox/VMM/VMMAll/PGMAllBth.h (modified) (16 diffs)
-
src/VBox/VMM/VMMAll/PGMAllMap.cpp (deleted)
-
src/VBox/VMM/VMMAll/PGMAllPool.cpp (modified) (6 diffs)
-
src/VBox/VMM/VMMAll/PGMAllShw.h (modified) (1 diff)
-
src/VBox/VMM/VMMR3/MMHyper.cpp (modified) (9 diffs)
-
src/VBox/VMM/VMMR3/PDMLdr.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMR3/PGM.cpp (modified) (13 diffs)
-
src/VBox/VMM/VMMR3/PGMDbg.cpp (modified) (15 diffs)
-
src/VBox/VMM/VMMR3/PGMMap.cpp (modified) (4 diffs)
-
src/VBox/VMM/VMMR3/PGMPhys.cpp (modified) (10 diffs)
-
src/VBox/VMM/VMMR3/PGMPool.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMR3/PGMSavedState.cpp (modified) (6 diffs)
-
src/VBox/VMM/VMMR3/TM.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMR3/VM.cpp (modified) (2 diffs)
-
src/VBox/VMM/include/PGMInline.h (modified) (1 diff)
-
src/VBox/VMM/include/PGMInternal.h (modified) (17 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/mm.h
r91266 r91854 266 266 #define MMHYPER_AONR_FLAGS_KERNEL_MAPPING RT_BIT(0) 267 267 /** @} */ 268 VMMR3DECL(int) MMR3HyperSetGuard(PVM pVM, void *pvStart, size_t cb, bool fSet);269 #ifndef PGM_WITHOUT_MAPPINGS270 VMMR3DECL(int) MMR3HyperMapHCPhys(PVM pVM, void *pvR3, RTR0PTR pvR0, RTHCPHYS HCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr);271 VMMR3DECL(int) MMR3HyperMapGCPhys(PVM pVM, RTGCPHYS GCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr);272 VMMR3DECL(int) MMR3HyperReserve(PVM pVM, unsigned cb, const char *pszDesc, PRTGCPTR pGCPtr);273 #endif274 VMMR3DECL(int) MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages, const char *pszDesc, PRTGCPTR pGCPtr);275 VMMR3DECL(int) MMR3HyperReserveFence(PVM pVM);276 268 VMMR3DECL(RTHCPHYS) MMR3HyperHCVirt2HCPhys(PVM pVM, void *pvHC); 277 269 VMMR3DECL(int) MMR3HyperHCVirt2HCPhysEx(PVM pVM, void *pvHC, PRTHCPHYS pHCPhys); 278 #ifndef PGM_WITHOUT_MAPPINGS279 VMMR3_INT_DECL(int) MMR3HyperQueryInfoFromHCPhys(PVM pVM, RTHCPHYS HCPhys, char *pszWhat, size_t cbWhat, uint32_t *pcbAlloc);280 VMMR3DECL(int) MMR3HyperReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb);281 #endif282 270 /** @} */ 283 271 -
trunk/include/VBox/vmm/pgm.h
r91848 r91854 54 54 PGMRELOCATECALL_RELOCATE 55 55 } PGMRELOCATECALL; 56 57 58 /** No guest context mappings (might be removed entirely later, if we don't59 * need it again (see new raw-mode ideas)).60 * @internal */61 #define PGM_WITHOUT_MAPPINGS62 56 63 57 … … 325 319 VMMDECL(int) PGMIsValidAccess(PVMCPUCC pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess); 326 320 VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault); 327 #ifndef PGM_WITHOUT_MAPPINGS328 VMMDECL(int) PGMMap(PVM pVM, RTGCPTR GCPtr, RTHCPHYS HCPhys, uint32_t cbPages, unsigned fFlags);329 VMMDECL(int) PGMMapGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys);330 VMMDECL(int) PGMMapSetPage(PVM pVM, RTGCPTR GCPtr, uint64_t cb, uint64_t fFlags);331 VMMDECL(int) PGMMapModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);332 # ifndef IN_RING0333 VMMDECL(bool) PGMMapHasConflicts(PVM pVM);334 # endif335 # ifdef VBOX_STRICT336 VMMDECL(void) PGMMapCheck(PVM pVM);337 # endif338 #endif /* !PGM_WITHOUT_MAPPINGS */339 321 VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys); 340 322 VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fFlags); … … 694 676 VMMR3_INT_DECL(void) PGMR3EnableNemMode(PVM pVM); 695 677 VMMR3DECL(int) PGMR3Init(PVM pVM); 696 VMMR3DECL(int) PGMR3InitDynMap(PVM pVM);697 678 VMMR3DECL(int) PGMR3InitFinalize(PVM pVM); 698 679 VMMR3_INT_DECL(int) PGMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat); … … 725 706 VMMR3_INT_DECL(RTGCPHYS) PGMR3PhysMmio2GetMappingAddress(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2); 726 707 VMMR3_INT_DECL(int) PGMR3PhysMmio2ChangeRegionNo(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, uint32_t iNewRegion); 727 VMMR3_INT_DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys);728 708 729 709 … … 747 727 VMMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc); 748 728 VMMDECL(void) PGMR3PhysSetA20(PVMCPU pVCpu, bool fEnable); 749 #ifndef PGM_WITHOUT_MAPPINGS 750 /** @name PGMR3MapPT flags. 751 * @{ */ 752 /** The mapping may be unmapped later. The default is permanent mappings. */ 753 # define PGMR3MAPPT_FLAGS_UNMAPPABLE RT_BIT(0) 754 /** @} */ 755 VMMR3DECL(int) PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, uint32_t cb, uint32_t fFlags, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc); 756 VMMR3DECL(int) PGMR3UnmapPT(PVM pVM, RTGCPTR GCPtr); 757 VMMR3DECL(int) PGMR3FinalizeMappings(PVM pVM); 758 VMMR3DECL(bool) PGMR3MappingsNeedReFixing(PVM pVM); 759 # if defined(VBOX_WITH_RAW_MODE) || HC_ARCH_BITS == 32 /* (latter for 64-bit guests on 32-bit hosts) */ 760 VMMR3DECL(int) PGMR3MapIntermediate(PVM pVM, RTUINTPTR Addr, RTHCPHYS HCPhys, unsigned cbPages); 761 # endif 762 VMMR3DECL(int) PGMR3MapRead(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb); 763 #endif /* !PGM_WITHOUT_MAPPINGS */ 729 764 730 VMMR3DECL(int) PGMR3MappingsSize(PVM pVM, uint32_t *pcb); 765 731 VMMR3DECL(int) PGMR3MappingsFix(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb); -
trunk/src/VBox/VMM/Makefile.kmk
r91808 r91854 198 198 VMMAll/PGMAll.cpp \ 199 199 VMMAll/PGMAllHandler.cpp \ 200 VMMAll/PGMAllMap.cpp \201 200 VMMAll/PGMAllPhys.cpp \ 202 201 VMMAll/PGMAllPool.cpp \ … … 542 541 VMMAll/PGMAll.cpp \ 543 542 VMMAll/PGMAllHandler.cpp \ 544 VMMAll/PGMAllMap.cpp \545 543 VMMAll/PGMAllPhys.cpp \ 546 544 VMMAll/PGMAllPool.cpp \ -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r91712 r91854 1002 1002 1003 1003 1004 #ifndef PGM_WITHOUT_MAPPINGS1005 /**1006 * Gets the mapping corresponding to the specified address (if any).1007 *1008 * @returns Pointer to the mapping.1009 * @returns NULL if not1010 *1011 * @param pVM The cross context VM structure.1012 * @param GCPtr The guest context pointer.1013 */1014 PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)1015 {1016 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);1017 while (pMapping)1018 {1019 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)1020 break;1021 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)1022 return pMapping;1023 pMapping = pMapping->CTX_SUFF(pNext);1024 }1025 return NULL;1026 }1027 #endif1028 1029 1030 1004 /** 1031 1005 * Verifies a range of pages for read or write access … … 1229 1203 PGM_UNLOCK(pVM); 1230 1204 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a); 1231 1232 #ifdef IN_RING31233 /*1234 * Check if we have a pending update of the CR3 monitoring.1235 */1236 if ( RT_SUCCESS(rc)1237 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))1238 {1239 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;1240 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));1241 }1242 #endif /* IN_RING3 */1243 1205 1244 1206 /* Ignore all irrelevant error codes. */ … … 2484 2446 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, fPdpesMapped); 2485 2447 if (RT_LIKELY(rc == VINF_SUCCESS)) 2486 { 2487 if (pgmMapAreMappingsFloating(pVM)) 2488 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3; 2489 } 2448 { } 2490 2449 else 2491 2450 { … … 2494 2453 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3; 2495 2454 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3; 2496 if (pgmMapAreMappingsFloating(pVM))2497 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;2498 2455 } 2499 2456 … … 2514 2471 } 2515 2472 #endif 2516 /*2517 * Check if we have a pending update of the CR3 monitoring.2518 */2519 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)2520 {2521 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;2522 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));2523 }2524 2473 if (fGlobal) 2525 2474 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3Global)); … … 2565 2514 /* We assume we're only called in nested paging mode. */ 2566 2515 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT); 2567 Assert(!pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));2568 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));2569 2516 2570 2517 /* … … 2708 2655 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 2709 2656 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL); 2710 }2711 2712 /*2713 * Check if we have a pending update of the CR3 monitoring.2714 */2715 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)2716 {2717 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;2718 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsFixed);2719 Assert(pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));2720 2657 } 2721 2658 } … … 3747 3684 } 3748 3685 3686 3749 3687 #ifdef VBOX_STRICT 3750 3751 # ifndef PGM_WITHOUT_MAPPINGS3752 /**3753 * Asserts that there are no mapping conflicts.3754 *3755 * @returns Number of conflicts.3756 * @param pVM The cross context VM structure.3757 */3758 VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)3759 {3760 unsigned cErrors = 0;3761 3762 /* Only applies to raw mode -> 1 VPCU */3763 Assert(pVM->cCpus == 1);3764 PVMCPU pVCpu = &VMCC_GET_CPU_0(pVM);3765 3766 /*3767 * Check for mapping conflicts.3768 */3769 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);3770 pMapping;3771 pMapping = pMapping->CTX_SUFF(pNext))3772 {3773 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */3774 for (RTGCPTR GCPtr = pMapping->GCPtr; GCPtr <= pMapping->GCPtrLast; GCPtr += PAGE_SIZE)3775 {3776 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);3777 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)3778 {3779 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));3780 cErrors++;3781 break;3782 }3783 }3784 }3785 3786 return cErrors;3787 }3788 # endif /* !PGM_WITHOUT_MAPPINGS */3789 3790 3791 3688 /** 3792 3689 * Asserts that everything related to the guest CR3 is correctly shadowed. … … 3817 3714 return cErrors; 3818 3715 } 3819 3820 3716 #endif /* VBOX_STRICT */ 3821 3717 -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r91580 r91854 128 128 pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)); 129 129 130 # ifndef PGM_WITHOUT_MAPPINGS131 /* Remove the hypervisor mappings from the shadow page table. */132 pgmMapDeactivateCR3(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));133 # endif134 135 130 pgmPoolFreeByPage(pPool, pOldShwPageCR3, NIL_PGMPOOL_IDX, UINT32_MAX); 136 131 pVCpu->pgm.s.pShwPageCR3R3 = NIL_RTR3PTR; … … 154 149 /* Set the current hypervisor CR3. */ 155 150 CPUMSetHyperCR3(pVCpu, PGMGetHyperCR3(pVCpu)); 156 157 # ifndef PGM_WITHOUT_MAPPINGS158 /* Apply all hypervisor mappings to the new CR3. */159 rc = pgmMapActivateCR3(pVM, pNewShwPageCR3);160 # endif161 151 162 152 PGM_UNLOCK(pVM); … … 185 175 PGM_BTH_DECL(VBOXSTRICTRC, Trap0eHandlerGuestFault)(PVMCPUCC pVCpu, PGSTPTWALK pGstWalk, RTGCUINT uErr) 186 176 { 187 # if !defined(PGM_WITHOUT_MAPPINGS) && (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE)188 /*189 * Check for write conflicts with our hypervisor mapping.190 *191 * If the guest happens to access a non-present page, where our hypervisor192 * is currently mapped, then we'll create a #PF storm in the guest.193 */194 if ( (uErr & (X86_TRAP_PF_P | X86_TRAP_PF_RW)) == (X86_TRAP_PF_P | X86_TRAP_PF_RW)195 && pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM))196 && MMHyperIsInsideArea(pVCpu->CTX_SUFF(pVM), pGstWalk->Core.GCPtr))197 {198 /* Force a CR3 sync to check for conflicts and emulate the instruction. */199 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);200 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2GuestTrap; });201 return VINF_EM_RAW_EMULATE_INSTR;202 }203 # endif204 205 177 /* 206 178 * Calc the error code for the guest trap. … … 677 649 return VINF_PGM_SYNC_CR3; 678 650 } 679 680 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(PGM_WITHOUT_MAPPINGS)681 /*682 * Check if this address is within any of our mappings.683 *684 * This is *very* fast and it's gonna save us a bit of effort below and prevent685 * us from screwing ourself with MMIO2 pages which have a GC Mapping (VRam).686 * (BTW, it's impossible to have physical access handlers in a mapping.)687 */688 if (pgmMapAreMappingsEnabled(pVM))689 {690 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);691 for ( ; pMapping; pMapping = pMapping->CTX_SUFF(pNext))692 {693 if (pvFault < pMapping->GCPtr)694 break;695 if (pvFault - pMapping->GCPtr < pMapping->cb)696 {697 /*698 * The first thing we check is if we've got an undetected conflict.699 */700 if (pgmMapAreMappingsFloating(pVM))701 {702 unsigned iPT = pMapping->cb >> GST_PD_SHIFT;703 while (iPT-- > 0)704 if (GstWalk.pPde[iPT].n.u1Present)705 {706 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eConflicts);707 Log(("Trap0e: Detected Conflict %RGv-%RGv\n", pMapping->GCPtr, pMapping->GCPtrLast));708 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync,right? */709 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2Mapping; });710 return VINF_PGM_SYNC_CR3;711 }712 }713 714 /*715 * Pretend we're not here and let the guest handle the trap.716 */717 TRPMSetErrorCode(pVCpu, uErr & ~X86_TRAP_PF_P);718 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eGuestPFMapping);719 LogFlow(("PGM: Mapping access -> route trap to recompiler!\n"));720 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2Mapping; });721 return VINF_EM_RAW_GUEST_TRAP;722 }723 }724 } /* pgmAreMappingsEnabled(&pVM->pgm.s) */725 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */726 651 727 652 /* … … 1103 1028 # endif 1104 1029 { 1105 # ifndef PGM_WITHOUT_MAPPINGS1106 Assert(!pPdptDst || !(pPdptDst->a[iPdpt].u & PGM_PLXFLAGS_MAPPING));1107 # endif1108 1030 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePageSkipped)); 1109 1031 PGM_INVL_PG(pVCpu, GCPtrPage); … … 1212 1134 Assert( (PdeSrc.u & X86_PDE_US) == (PdeDst.u & X86_PDE_US) 1213 1135 && ((PdeSrc.u & X86_PDE_RW) || !(PdeDst.u & X86_PDE_RW) || pVCpu->pgm.s.cNetwareWp0Hacks > 0)); 1214 # ifndef PGM_WITHOUT_MAPPINGS1215 if (PdeDst.u & PGM_PDFLAGS_MAPPING)1216 {1217 /*1218 * Conflict - Let SyncPT deal with it to avoid duplicate code.1219 */1220 Assert(pgmMapAreMappingsEnabled(pVM));1221 Assert(PGMGetGuestMode(pVCpu) <= PGMMODE_PAE);1222 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage);1223 }1224 else1225 # endif /* !PGM_WITHOUT_MAPPINGS */1226 1136 if (!fIsBigPage) 1227 1137 { … … 1321 1231 * Page directory is not present, mark shadow PDE not present. 1322 1232 */ 1323 # ifndef PGM_WITHOUT_MAPPINGS 1324 if (!(PdeDst.u & PGM_PDFLAGS_MAPPING)) 1325 # endif 1326 { 1327 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst); 1328 SHW_PDE_ATOMIC_SET(*pPdeDst, 0); 1329 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePagePDNPs)); 1330 PGM_INVL_PG(pVCpu, GCPtrPage); 1331 } 1332 # ifndef PGM_WITHOUT_MAPPINGS 1333 else 1334 { 1335 Assert(pgmMapAreMappingsEnabled(pVM)); 1336 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePagePDMappings)); 1337 } 1338 # endif 1233 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst); 1234 SHW_PDE_ATOMIC_SET(*pPdeDst, 0); 1235 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePagePDNPs)); 1236 PGM_INVL_PG(pVCpu, GCPtrPage); 1339 1237 } 1340 1238 return rc; … … 2538 2436 # endif 2539 2437 2540 # ifndef PGM_WITHOUT_MAPPINGS 2541 /* 2542 * Check for conflicts. 2543 * RC: In case of a conflict we'll go to Ring-3 and do a full SyncCR3. 2544 * R3: Simply resolve the conflict. 2545 */ 2546 if (PdeDst.u & PGM_PDFLAGS_MAPPING) 2547 { 2548 Assert(pgmMapAreMappingsEnabled(pVM)); 2549 # ifndef IN_RING3 2550 Log(("SyncPT: Conflict at %RGv\n", GCPtrPage)); 2551 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncPT), a); 2552 return VERR_ADDRESS_CONFLICT; 2553 2554 # else /* IN_RING3 */ 2555 PPGMMAPPING pMapping = pgmGetMapping(pVM, (RTGCPTR)GCPtrPage); 2556 Assert(pMapping); 2557 # if PGM_GST_TYPE == PGM_TYPE_32BIT 2558 rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, GCPtrPage & (GST_PD_MASK << GST_PD_SHIFT)); 2559 # elif PGM_GST_TYPE == PGM_TYPE_PAE 2560 rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, GCPtrPage & (GST_PD_MASK << GST_PD_SHIFT)); 2561 # else 2562 AssertFailed(); NOREF(pMapping); /* can't happen for amd64 */ 2563 # endif 2564 if (RT_FAILURE(rc)) 2565 { 2566 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncPT), a); 2567 return rc; 2568 } 2569 PdeDst = *pPdeDst; 2570 # endif /* IN_RING3 */ 2571 } 2572 # endif /* !PGM_WITHOUT_MAPPINGS */ 2573 Assert(!SHW_PDE_IS_P(PdeDst)); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/ 2438 Assert(!SHW_PDE_IS_P(PdeDst)); /* We're only supposed to call SyncPT on PDE!P.*/ 2574 2439 2575 2440 /* … … 2985 2850 SHWPDE PdeDst = *pPdeDst; 2986 2851 2987 # ifndef PGM_WITHOUT_MAPPINGS2988 Assert(!(PdeDst.u & PGM_PDFLAGS_MAPPING));2989 # endif2990 2852 Assert(!SHW_PDE_IS_P(PdeDst)); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/ 2991 2853 … … 3235 3097 PdeDst = pPDDst->a[iPDDst]; 3236 3098 # endif 3237 # ifndef PGM_WITHOUT_MAPPINGS 3238 if (!(PdeDst.u & PGM_PDFLAGS_MAPPING)) 3239 # endif 3099 if (!(PdeDst.u & X86_PDE_P)) 3240 3100 { 3241 if (!(PdeDst.u & X86_PDE_P)) 3242 { 3243 /** @todo r=bird: This guy will set the A bit on the PDE, 3244 * probably harmless. */ 3245 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage); 3246 } 3247 else 3248 { 3249 /* Note! We used to sync PGM_SYNC_NR_PAGES pages, which triggered assertions in CSAM, because 3250 * R/W attributes of nearby pages were reset. Not sure how that could happen. Anyway, it 3251 * makes no sense to prefetch more than one page. 3252 */ 3253 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, GCPtrPage, 1, 0); 3254 if (RT_SUCCESS(rc)) 3255 rc = VINF_SUCCESS; 3256 } 3101 /** @todo r=bird: This guy will set the A bit on the PDE, 3102 * probably harmless. */ 3103 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage); 3104 } 3105 else 3106 { 3107 /* Note! We used to sync PGM_SYNC_NR_PAGES pages, which triggered assertions in CSAM, because 3108 * R/W attributes of nearby pages were reset. Not sure how that could happen. Anyway, it 3109 * makes no sense to prefetch more than one page. 3110 */ 3111 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, GCPtrPage, 1, 0); 3112 if (RT_SUCCESS(rc)) 3113 rc = VINF_SUCCESS; 3257 3114 } 3258 3115 PGM_UNLOCK(pVM); … … 3480 3337 * Nested / EPT / None - No work. 3481 3338 */ 3482 Assert(!pgmMapAreMappingsEnabled(pVM));3483 3339 return VINF_SUCCESS; 3484 3340 … … 3488 3344 * out the shadow parts when the guest modifies its tables. 3489 3345 */ 3490 Assert(!pgmMapAreMappingsEnabled(pVM));3491 3346 return VINF_SUCCESS; 3492 3347 3493 3348 #else /* !PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) && PGM_SHW_TYPE != PGM_TYPE_AMD64 */ 3494 3349 3495 # ifndef PGM_WITHOUT_MAPPINGS3496 /*3497 * Check for and resolve conflicts with our guest mappings if they3498 * are enabled and not fixed.3499 */3500 if (pgmMapAreMappingsFloating(pVM))3501 {3502 int rc = pgmMapResolveConflicts(pVM);3503 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3);3504 if (rc == VINF_SUCCESS)3505 { /* likely */ }3506 else if (rc == VINF_PGM_SYNC_CR3)3507 {3508 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));3509 return VINF_PGM_SYNC_CR3;3510 }3511 else if (RT_FAILURE(rc))3512 return rc;3513 else3514 AssertMsgFailed(("%Rrc\n", rc));3515 }3516 # else3517 Assert(!pgmMapAreMappingsEnabled(pVM));3518 # endif3519 3350 return VINF_SUCCESS; 3520 3351 #endif /* !PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) && PGM_SHW_TYPE != PGM_TYPE_AMD64 */ … … 3763 3594 # else 3764 3595 const SHWPDE PdeDst = pPDDst->a[iPDDst]; 3765 # endif3766 # ifndef PGM_WITHOUT_MAPPINGS3767 if (PdeDst.u & PGM_PDFLAGS_MAPPING)3768 {3769 Assert(pgmMapAreMappingsEnabled(pVM));3770 if ((PdeDst.u & X86_PDE_AVL_MASK) != PGM_PDFLAGS_MAPPING)3771 {3772 AssertMsgFailed(("Mapping shall only have PGM_PDFLAGS_MAPPING set! PdeDst.u=%#RX64\n", (uint64_t)PdeDst.u));3773 cErrors++;3774 continue;3775 }3776 }3777 else3778 3596 # endif 3779 3597 if ( (PdeDst.u & X86_PDE_P) … … 4411 4229 # endif 4412 4230 4413 # ifndef PGM_WITHOUT_MAPPINGS4414 /*4415 * Apply all hypervisor mappings to the new CR3.4416 * Note that SyncCR3 will be executed in case CR3 is changed in a guest paging mode; this will4417 * make sure we check for conflicts in the new CR3 root.4418 */4419 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)4420 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));4421 # endif4422 int const rc3 = pgmMapActivateCR3(pVM, pNewShwPageCR3);4423 AssertRCReturn(rc3, rc3);4424 # endif4425 4426 4231 /* Set the current hypervisor CR3. */ 4427 4232 CPUMSetHyperCR3(pVCpu, PGMGetHyperCR3(pVCpu)); … … 4432 4237 { 4433 4238 Assert(pOldShwPageCR3->enmKind != PGMPOOLKIND_FREE); 4434 # ifndef PGM_WITHOUT_MAPPINGS 4435 /* Remove the hypervisor mappings from the shadow page table. */ 4436 pgmMapDeactivateCR3(pVM, pOldShwPageCR3); 4437 # endif 4239 4438 4240 /* Mark the page as unlocked; allow flushing again. */ 4439 4241 pgmPoolUnlockPage(pPool, pOldShwPageCR3); … … 4498 4300 PGM_LOCK_VOID(pVM); 4499 4301 4500 # ifndef PGM_WITHOUT_MAPPINGS4501 if (pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))4502 /* Remove the hypervisor mappings from the shadow page table. */4503 pgmMapDeactivateCR3(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));4504 # endif4505 4506 4302 if (pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)) 4507 4303 { -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r91247 r91854 466 466 const unsigned iShw = off / sizeof(X86PDEPAE); 467 467 X86PGPAEUINT const uPde = uShw.pPDPae->a[iShw].u; 468 #ifndef PGM_WITHOUT_MAPPINGS469 Assert(!(uPde & PGM_PDFLAGS_MAPPING));470 #endif471 468 if (uPde & X86_PDE_P) 472 469 { … … 483 480 AssertBreak(iShw2 < RT_ELEMENTS(uShw.pPDPae->a)); 484 481 X86PGPAEUINT const uPde2 = uShw.pPDPae->a[iShw2].u; 485 #ifndef PGM_WITHOUT_MAPPINGS486 Assert(!(uPde2 & PGM_PDFLAGS_MAPPING));487 #endif488 482 if (uPde2 & X86_PDE_P) 489 483 { … … 3760 3754 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD: 3761 3755 Assert(iUserTable < X86_PG_PAE_ENTRIES); 3762 #ifndef PGM_WITHOUT_MAPPINGS3763 Assert(!(u.pau64[iUserTable] & PGM_PDFLAGS_MAPPING));3764 #endif3765 3756 break; 3766 3757 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT: … … 4447 4438 { 4448 4439 X86PGUINT const uPde = pShwPD->a[i].u; 4449 #ifndef PGM_WITHOUT_MAPPINGS4450 if ((uPde & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) == X86_PDE_P)4451 #else4452 4440 if (uPde & X86_PDE_P) 4453 #endif4454 4441 { 4455 4442 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPD->a[i].u & X86_PDE_PG_MASK); … … 4475 4462 { 4476 4463 X86PGPAEUINT const uPde = pShwPD->a[i].u; 4477 #ifndef PGM_WITHOUT_MAPPINGS4478 if ((uPde & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) == X86_PDE_P)4479 #else4480 4464 if (uPde & X86_PDE_P) 4481 #endif4482 4465 { 4483 4466 #ifdef PGM_WITH_LARGE_PAGES … … 4519 4502 X86PGPAEUINT const uPdpe = pShwPDPT->a[i].u; 4520 4503 Assert((uPdpe & (X86_PDPE_PAE_MBZ_MASK | UINT64_C(0x7ff0000000000200))) == 0); 4521 if ( uPdpe & X86_PDPE_P 4522 #ifndef PGM_WITHOUT_MAPPINGS 4523 && !(uPdpe & PGM_PLXFLAGS_MAPPING) 4524 #endif 4525 ) 4504 if (uPdpe & X86_PDPE_P) 4526 4505 { 4527 4506 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, uPdpe & X86_PDPE_PG_MASK); -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r90439 r91854 416 416 */ 417 417 PSHWPT pPT; 418 # ifndef PGM_WITHOUT_MAPPINGS 419 if (!(Pde.u & PGM_PDFLAGS_MAPPING)) 420 # endif 421 { 422 int rc2 = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT); 423 if (RT_FAILURE(rc2)) 424 return rc2; 425 } 426 # ifndef PGM_WITHOUT_MAPPINGS 427 else /* mapping: */ 428 { 429 # if PGM_SHW_TYPE == PGM_TYPE_AMD64 \ 430 || PGM_SHW_TYPE == PGM_TYPE_EPT \ 431 || defined(PGM_WITHOUT_MAPPINGS) 432 AssertFailed(); /* can't happen */ 433 pPT = NULL; /* shut up MSC */ 434 # else 435 Assert(pgmMapAreMappingsEnabled(pVM)); 436 437 PPGMMAPPING pMap = pgmGetMapping(pVM, (RTGCPTR)GCPtr); 438 AssertMsgReturn(pMap, ("GCPtr=%RGv\n", GCPtr), VERR_PGM_MAPPING_IPE); 439 # if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_NESTED_32BIT 440 pPT = pMap->aPTs[(GCPtr - pMap->GCPtr) >> X86_PD_SHIFT].CTX_SUFF(pPT); 441 # else /* PAE */ 442 pPT = pMap->aPTs[(GCPtr - pMap->GCPtr) >> X86_PD_SHIFT].CTX_SUFF(paPaePTs); 443 # endif 444 # endif 445 } 446 # endif /* !PGM_WITHOUT_MAPPINGS */ 418 int rc2 = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT); 419 if (RT_FAILURE(rc2)) 420 return rc2; 447 421 const unsigned iPt = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK; 448 422 SHWPTE Pte = pPT->a[iPt]; -
trunk/src/VBox/VMM/VMMR3/MMHyper.cpp
r91266 r91854 39 39 * Internal Functions * 40 40 *********************************************************************************************************************************/ 41 #ifndef PGM_WITHOUT_MAPPINGS42 static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode,43 void *pvUser);44 #endif45 41 static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup); 46 42 static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap, PRTR0PTR pR0PtrHeap); 47 43 static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC); 48 44 static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 45 static int MMR3HyperReserveFence(PVM pVM); 46 static int MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages, 47 const char *pszDesc, PRTGCPTR pGCPtr); 49 48 50 49 … … 222 221 223 222 /** 224 * Finalizes the HMA mapping .223 * Finalizes the HMA mapping (obsolete). 225 224 * 226 225 * This is called later during init, most (all) HMA allocations should be done … … 239 238 AssertRC(rc); 240 239 241 #ifndef PGM_WITHOUT_MAPPINGS242 /*243 * Adjust and create the HMA mapping.244 */245 while ((RTINT)pVM->mm.s.offHyperNextStatic + 64*_1K < (RTINT)pVM->mm.s.cbHyperArea - _4M)246 pVM->mm.s.cbHyperArea -= _4M;247 rc = PGMR3MapPT(pVM, pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea, 0 /*fFlags*/,248 mmR3HyperRelocateCallback, NULL, "Hypervisor Memory Area");249 if (RT_FAILURE(rc))250 return rc;251 #endif252 240 pVM->mm.s.fPGMInitialized = true; 253 254 #ifndef PGM_WITHOUT_MAPPINGS255 /*256 * Do all the delayed mappings.257 */258 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uintptr_t)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);259 for (;;)260 {261 RTGCPTR GCPtr = pVM->mm.s.pvHyperAreaGC + pLookup->off;262 uint32_t cPages = pLookup->cb >> PAGE_SHIFT;263 switch (pLookup->enmType)264 {265 case MMLOOKUPHYPERTYPE_LOCKED:266 {267 PCRTHCPHYS paHCPhysPages = pLookup->u.Locked.paHCPhysPages;268 for (uint32_t i = 0; i < cPages; i++)269 {270 rc = PGMMap(pVM, GCPtr + (i << PAGE_SHIFT), paHCPhysPages[i], PAGE_SIZE, 0);271 AssertRCReturn(rc, rc);272 }273 break;274 }275 276 case MMLOOKUPHYPERTYPE_HCPHYS:277 rc = PGMMap(pVM, GCPtr, pLookup->u.HCPhys.HCPhys, pLookup->cb, 0);278 break;279 280 case MMLOOKUPHYPERTYPE_GCPHYS:281 {282 const RTGCPHYS GCPhys = pLookup->u.GCPhys.GCPhys;283 const uint32_t cb = pLookup->cb;284 for (uint32_t off = 0; off < cb; off += PAGE_SIZE)285 {286 RTHCPHYS HCPhys;287 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);288 if (RT_FAILURE(rc))289 break;290 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);291 if (RT_FAILURE(rc))292 break;293 }294 break;295 }296 297 case MMLOOKUPHYPERTYPE_MMIO2:298 {299 const RTGCPHYS offEnd = pLookup->u.MMIO2.off + pLookup->cb;300 for (RTGCPHYS offCur = pLookup->u.MMIO2.off; offCur < offEnd; offCur += PAGE_SIZE)301 {302 RTHCPHYS HCPhys;303 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pLookup->u.MMIO2.pDevIns, pLookup->u.MMIO2.iSubDev,304 pLookup->u.MMIO2.iRegion, offCur, &HCPhys);305 if (RT_FAILURE(rc))306 break;307 rc = PGMMap(pVM, GCPtr + (offCur - pLookup->u.MMIO2.off), HCPhys, PAGE_SIZE, 0);308 if (RT_FAILURE(rc))309 break;310 }311 break;312 }313 314 case MMLOOKUPHYPERTYPE_DYNAMIC:315 /* do nothing here since these are either fences or managed by someone else using PGM. */316 break;317 318 default:319 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));320 break;321 }322 323 if (RT_FAILURE(rc))324 {325 AssertMsgFailed(("rc=%Rrc cb=%d off=%#RX32 enmType=%d pszDesc=%s\n",326 rc, pLookup->cb, pLookup->off, pLookup->enmType, pLookup->pszDesc));327 return rc;328 }329 330 /* next */331 if (pLookup->offNext == (int32_t)NIL_OFFSET)332 break;333 pLookup = (PMMLOOKUPHYPER)((uintptr_t)pLookup + pLookup->offNext);334 }335 #endif /* !PGM_WITHOUT_MAPPINGS */336 241 337 242 LogFlow(("MMR3HyperInitFinalize: returns VINF_SUCCESS\n")); … … 339 244 } 340 245 341 342 #ifndef PGM_WITHOUT_MAPPINGS343 /**344 * Callback function which will be called when PGM is trying to find a new345 * location for the mapping.346 *347 * The callback is called in two modes, 1) the check mode and 2) the relocate mode.348 * In 1) the callback should say if it objects to a suggested new location. If it349 * accepts the new location, it is called again for doing it's relocation.350 *351 *352 * @returns true if the location is ok.353 * @returns false if another location should be found.354 * @param pVM The cross context VM structure.355 * @param GCPtrOld The old virtual address.356 * @param GCPtrNew The new virtual address.357 * @param enmMode Used to indicate the callback mode.358 * @param pvUser User argument. Ignored.359 * @remark The return value is no a failure indicator, it's an acceptance360 * indicator. Relocation can not fail!361 */362 static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew,363 PGMRELOCATECALL enmMode, void *pvUser)364 {365 NOREF(pvUser);366 switch (enmMode)367 {368 /*369 * Verify location - all locations are good for us.370 */371 case PGMRELOCATECALL_SUGGEST:372 return true;373 374 /*375 * Execute the relocation.376 */377 case PGMRELOCATECALL_RELOCATE:378 {379 /*380 * Accepted!381 */382 AssertMsg(GCPtrOld == pVM->mm.s.pvHyperAreaGC,383 ("GCPtrOld=%RGv pVM->mm.s.pvHyperAreaGC=%RGv\n", GCPtrOld, pVM->mm.s.pvHyperAreaGC));384 Log(("Relocating the hypervisor from %RGv to %RGv\n", GCPtrOld, GCPtrNew));385 386 /*387 * Relocate the VM structure and ourselves.388 */389 RTGCINTPTR offDelta = GCPtrNew - GCPtrOld;390 pVM->pVMRC += offDelta;391 for (VMCPUID i = 0; i < pVM->cCpus; i++)392 pVM->aCpus[i].pVMRC = pVM->pVMRC;393 394 pVM->mm.s.pvHyperAreaGC += offDelta;395 Assert(pVM->mm.s.pvHyperAreaGC < _4G);396 pVM->mm.s.pHyperHeapRC += offDelta;397 pVM->mm.s.pHyperHeapR3->pbHeapRC += offDelta;398 pVM->mm.s.pHyperHeapR3->pVMRC = pVM->pVMRC;399 400 /*401 * Relocate the rest.402 */403 VMR3Relocate(pVM, offDelta);404 return true;405 }406 407 default:408 AssertMsgFailed(("Invalid relocation mode %d\n", enmMode));409 }410 411 return false;412 }413 414 415 /**416 * Maps contiguous HC physical memory into the hypervisor region in the GC.417 *418 * @return VBox status code.419 *420 * @param pVM The cross context VM structure.421 * @param pvR3 Ring-3 address of the memory. Must be page aligned!422 * @param pvR0 Optional ring-0 address of the memory.423 * @param HCPhys Host context physical address of the memory to be424 * mapped. Must be page aligned!425 * @param cb Size of the memory. Will be rounded up to nearest page.426 * @param pszDesc Description.427 * @param pGCPtr Where to store the GC address.428 */429 VMMR3DECL(int) MMR3HyperMapHCPhys(PVM pVM, void *pvR3, RTR0PTR pvR0, RTHCPHYS HCPhys, size_t cb,430 const char *pszDesc, PRTGCPTR pGCPtr)431 {432 LogFlow(("MMR3HyperMapHCPhys: pvR3=%p pvR0=%p HCPhys=%RHp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n",433 pvR3, pvR0, HCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));434 435 /*436 * Validate input.437 */438 AssertReturn(RT_ALIGN_P(pvR3, PAGE_SIZE) == pvR3, VERR_INVALID_PARAMETER);439 AssertReturn(RT_ALIGN_T(pvR0, PAGE_SIZE, RTR0PTR) == pvR0, VERR_INVALID_PARAMETER);440 AssertReturn(RT_ALIGN_T(HCPhys, PAGE_SIZE, RTHCPHYS) == HCPhys, VERR_INVALID_PARAMETER);441 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);442 443 /*444 * Add the memory to the hypervisor area.445 */446 uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);447 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);448 RTGCPTR GCPtr;449 PMMLOOKUPHYPER pLookup;450 int rc = mmR3HyperMap(pVM, cbAligned, pszDesc, &GCPtr, &pLookup);451 if (RT_SUCCESS(rc))452 {453 pLookup->enmType = MMLOOKUPHYPERTYPE_HCPHYS;454 pLookup->u.HCPhys.pvR3 = pvR3;455 pLookup->u.HCPhys.pvR0 = pvR0;456 pLookup->u.HCPhys.HCPhys = HCPhys;457 458 /*459 * Update the page table.460 */461 if (pVM->mm.s.fPGMInitialized)462 rc = PGMMap(pVM, GCPtr, HCPhys, cbAligned, 0);463 if (RT_SUCCESS(rc))464 *pGCPtr = GCPtr;465 }466 return rc;467 }468 469 470 /**471 * Maps contiguous GC physical memory into the hypervisor region in the GC.472 *473 * @return VBox status code.474 *475 * @param pVM The cross context VM structure.476 * @param GCPhys Guest context physical address of the memory to be mapped. Must be page aligned!477 * @param cb Size of the memory. Will be rounded up to nearest page.478 * @param pszDesc Mapping description.479 * @param pGCPtr Where to store the GC address.480 */481 VMMR3DECL(int) MMR3HyperMapGCPhys(PVM pVM, RTGCPHYS GCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)482 {483 LogFlow(("MMR3HyperMapGCPhys: GCPhys=%RGp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", GCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));484 485 /*486 * Validate input.487 */488 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);489 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);490 491 /*492 * Add the memory to the hypervisor area.493 */494 cb = RT_ALIGN_Z(cb, PAGE_SIZE);495 RTGCPTR GCPtr;496 PMMLOOKUPHYPER pLookup;497 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);498 if (RT_SUCCESS(rc))499 {500 pLookup->enmType = MMLOOKUPHYPERTYPE_GCPHYS;501 pLookup->u.GCPhys.GCPhys = GCPhys;502 503 /*504 * Update the page table.505 */506 for (unsigned off = 0; off < cb; off += PAGE_SIZE)507 {508 RTHCPHYS HCPhys;509 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);510 AssertRC(rc);511 if (RT_FAILURE(rc))512 {513 AssertMsgFailed(("rc=%Rrc GCPhys=%RGp off=%#x %s\n", rc, GCPhys, off, pszDesc));514 break;515 }516 if (pVM->mm.s.fPGMInitialized)517 {518 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);519 AssertRC(rc);520 if (RT_FAILURE(rc))521 {522 AssertMsgFailed(("rc=%Rrc GCPhys=%RGp off=%#x %s\n", rc, GCPhys, off, pszDesc));523 break;524 }525 }526 }527 528 if (RT_SUCCESS(rc) && pGCPtr)529 *pGCPtr = GCPtr;530 }531 return rc;532 }533 534 535 /**536 * Maps a portion of an MMIO2 region into the hypervisor region.537 *538 * Callers of this API must never deregister the MMIO2 region before the539 * VM is powered off. If this becomes a requirement MMR3HyperUnmapMMIO2540 * API will be needed to perform cleanups.541 *542 * @return VBox status code.543 *544 * @param pVM The cross context VM structure.545 * @param pDevIns The device owning the MMIO2 memory.546 * @param iSubDev The sub-device number.547 * @param iRegion The region.548 * @param off The offset into the region. Will be rounded down to closest page boundary.549 * @param cb The number of bytes to map. Will be rounded up to the closest page boundary.550 * @param pszDesc Mapping description.551 * @param pRCPtr Where to store the RC address.552 */553 VMMR3DECL(int) MMR3HyperMapMMIO2(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,554 const char *pszDesc, PRTRCPTR pRCPtr)555 {556 LogFlow(("MMR3HyperMapMMIO2: pDevIns=%p iSubDev=%#x iRegion=%#x off=%RGp cb=%RGp pszDesc=%p:{%s} pRCPtr=%p\n",557 pDevIns, iSubDev, iRegion, off, cb, pszDesc, pszDesc, pRCPtr));558 int rc;559 560 /*561 * Validate input.562 */563 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);564 AssertReturn(off + cb > off, VERR_INVALID_PARAMETER);565 uint32_t const offPage = off & PAGE_OFFSET_MASK;566 off &= ~(RTGCPHYS)PAGE_OFFSET_MASK;567 cb += offPage;568 cb = RT_ALIGN_Z(cb, PAGE_SIZE);569 const RTGCPHYS offEnd = off + cb;570 AssertReturn(offEnd > off, VERR_INVALID_PARAMETER);571 for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)572 {573 RTHCPHYS HCPhys;574 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iSubDev, iRegion, offCur, &HCPhys);575 AssertMsgRCReturn(rc, ("rc=%Rrc - iSubDev=%#x iRegion=%#x off=%RGp\n", rc, iSubDev, iRegion, off), rc);576 }577 578 /*579 * Add the memory to the hypervisor area.580 */581 RTGCPTR GCPtr;582 PMMLOOKUPHYPER pLookup;583 rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);584 if (RT_SUCCESS(rc))585 {586 pLookup->enmType = MMLOOKUPHYPERTYPE_MMIO2;587 pLookup->u.MMIO2.pDevIns = pDevIns;588 pLookup->u.MMIO2.iSubDev = iSubDev;589 pLookup->u.MMIO2.iRegion = iRegion;590 pLookup->u.MMIO2.off = off;591 592 /*593 * Update the page table.594 */595 if (pVM->mm.s.fPGMInitialized)596 {597 for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)598 {599 RTHCPHYS HCPhys;600 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iSubDev, iRegion, offCur, &HCPhys);601 AssertRCReturn(rc, rc);602 rc = PGMMap(pVM, GCPtr + (offCur - off), HCPhys, PAGE_SIZE, 0);603 if (RT_FAILURE(rc))604 {605 AssertMsgFailed(("rc=%Rrc offCur=%RGp %s\n", rc, offCur, pszDesc));606 break;607 }608 }609 }610 611 if (RT_SUCCESS(rc))612 {613 GCPtr |= offPage;614 *pRCPtr = GCPtr;615 AssertLogRelReturn(*pRCPtr == GCPtr, VERR_INTERNAL_ERROR);616 }617 }618 return rc;619 }620 621 #endif /* !PGM_WITHOUT_MAPPINGS */622 246 623 247 /** … … 634 258 * @param pGCPtr Where to store the GC address corresponding to pvR3. 635 259 */ 636 VMMR3DECL(int)MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages,637 const char *pszDesc, PRTGCPTR pGCPtr)260 static int MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages, 261 const char *pszDesc, PRTGCPTR pGCPtr) 638 262 { 639 263 LogFlow(("MMR3HyperMapPages: pvR3=%p pvR0=%p cPages=%zu paPages=%p pszDesc=%p:{%s} pGCPtr=%p\n", … … 675 299 } 676 300 677 #ifndef PGM_WITHOUT_MAPPINGS 678 if (pVM->mm.s.fPGMInitialized) 679 { 680 for (size_t i = 0; i < cPages; i++) 681 { 682 rc = PGMMap(pVM, GCPtr + (i << PAGE_SHIFT), paHCPhysPages[i], PAGE_SIZE, 0); 683 AssertRCBreak(rc); 684 } 685 } 686 #endif 687 if (RT_SUCCESS(rc)) 688 { 689 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED; 690 pLookup->u.Locked.pvR3 = pvR3; 691 pLookup->u.Locked.pvR0 = pvR0; 692 pLookup->u.Locked.paHCPhysPages = paHCPhysPages; 693 694 /* done. */ 695 *pGCPtr = GCPtr; 696 return rc; 697 } 698 /* Don't care about failure clean, we're screwed if this fails anyway. */ 699 } 301 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED; 302 pLookup->u.Locked.pvR3 = pvR3; 303 pLookup->u.Locked.pvR0 = pvR0; 304 pLookup->u.Locked.paHCPhysPages = paHCPhysPages; 305 306 /* done. */ 307 *pGCPtr = GCPtr; 308 return rc; 309 } 310 /* Don't care about failure clean, we're screwed if this fails anyway. */ 700 311 } 701 312 702 313 return rc; 703 314 } 704 705 706 #ifndef PGM_WITHOUT_MAPPINGS707 /**708 * Reserves a hypervisor memory area.709 * Most frequent usage is fence pages and dynamically mappings like the guest PD and PDPT.710 *711 * @return VBox status code.712 *713 * @param pVM The cross context VM structure.714 * @param cb Size of the memory. Will be rounded up to nearest page.715 * @param pszDesc Mapping description.716 * @param pGCPtr Where to store the assigned GC address. Optional.717 */718 VMMR3DECL(int) MMR3HyperReserve(PVM pVM, unsigned cb, const char *pszDesc, PRTGCPTR pGCPtr)719 {720 LogFlow(("MMR3HyperMapHCRam: cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", (int)cb, pszDesc, pszDesc, pGCPtr));721 722 /*723 * Validate input.724 */725 if ( cb <= 0726 || !pszDesc727 || !*pszDesc)728 {729 AssertMsgFailed(("Invalid parameter\n"));730 return VERR_INVALID_PARAMETER;731 }732 733 /*734 * Add the memory to the hypervisor area.735 */736 RTGCPTR GCPtr;737 PMMLOOKUPHYPER pLookup;738 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);739 if (RT_SUCCESS(rc))740 {741 pLookup->enmType = MMLOOKUPHYPERTYPE_DYNAMIC;742 if (pGCPtr)743 *pGCPtr = GCPtr;744 return VINF_SUCCESS;745 }746 return rc;747 }748 #endif /* !PGM_WITHOUT_MAPPINGS */749 315 750 316 … … 755 321 * @param pVM The cross context VM structure. 756 322 */ 757 VMMR3DECL(int) MMR3HyperReserveFence(PVM pVM) 758 { 759 #ifndef PGM_WITHOUT_MAPPINGS 760 return MMR3HyperReserve(pVM, cb, "fence", NULL); 761 #else 323 static int MMR3HyperReserveFence(PVM pVM) 324 { 762 325 RT_NOREF(pVM); 763 326 return VINF_SUCCESS; 764 #endif765 327 } 766 328 … … 1139 701 1140 702 /** 1141 * Set / unset guard status on one or more hyper heap pages.1142 *1143 * @returns VBox status code (first failure).1144 * @param pVM The cross context VM structure.1145 * @param pvStart The hyper heap page address. Must be page1146 * aligned.1147 * @param cb The number of bytes. Must be page aligned.1148 * @param fSet Whether to set or unset guard page status.1149 */1150 VMMR3DECL(int) MMR3HyperSetGuard(PVM pVM, void *pvStart, size_t cb, bool fSet)1151 {1152 /*1153 * Validate input.1154 */1155 AssertReturn(!((uintptr_t)pvStart & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);1156 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);1157 AssertReturn(cb <= UINT32_MAX, VERR_INVALID_PARAMETER);1158 PMMLOOKUPHYPER pLookup = mmR3HyperLookupR3(pVM, pvStart);1159 AssertReturn(pLookup, VERR_INVALID_PARAMETER);1160 AssertReturn(pLookup->enmType == MMLOOKUPHYPERTYPE_LOCKED, VERR_INVALID_PARAMETER);1161 1162 /*1163 * Get down to business.1164 * Note! We quietly ignore errors from the support library since the1165 * protection stuff isn't possible to implement on all platforms.1166 */1167 uint8_t *pbR3 = (uint8_t *)pLookup->u.Locked.pvR3;1168 RTR0PTR R0Ptr = pLookup->u.Locked.pvR0 != (uintptr_t)pLookup->u.Locked.pvR31169 ? pLookup->u.Locked.pvR01170 : NIL_RTR0PTR;1171 uint32_t off = (uint32_t)((uint8_t *)pvStart - pbR3);1172 int rc;1173 if (fSet)1174 {1175 #ifndef PGM_WITHOUT_MAPPINGS1176 rc = PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pvStart), cb, 0);1177 #else1178 rc = VINF_SUCCESS;1179 #endif1180 SUPR3PageProtect(pbR3, R0Ptr, off, (uint32_t)cb, RTMEM_PROT_NONE);1181 }1182 else1183 {1184 #ifndef PGM_WITHOUT_MAPPINGS1185 rc = PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pvStart), cb, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);1186 #else1187 rc = VINF_SUCCESS;1188 #endif1189 SUPR3PageProtect(pbR3, R0Ptr, off, (uint32_t)cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE);1190 }1191 return rc;1192 }1193 1194 1195 /**1196 703 * Convert hypervisor HC virtual address to HC physical address. 1197 704 * … … 1244 751 } 1245 752 1246 #ifndef PGM_WITHOUT_MAPPINGS1247 1248 /**1249 * Implements the hcphys-not-found return case of MMR3HyperQueryInfoFromHCPhys.1250 *1251 * @returns VINF_SUCCESS, VINF_BUFFER_OVERFLOW.1252 * @param pVM The cross context VM structure.1253 * @param HCPhys The host physical address to look for.1254 * @param pLookup The HMA lookup entry corresponding to HCPhys.1255 * @param pszWhat Where to return the description.1256 * @param cbWhat Size of the return buffer.1257 * @param pcbAlloc Where to return the size of whatever it is.1258 */1259 static int mmR3HyperQueryInfoFromHCPhysFound(PVM pVM, RTHCPHYS HCPhys, PMMLOOKUPHYPER pLookup,1260 char *pszWhat, size_t cbWhat, uint32_t *pcbAlloc)1261 {1262 NOREF(pVM); NOREF(HCPhys);1263 *pcbAlloc = pLookup->cb;1264 int rc = RTStrCopy(pszWhat, cbWhat, pLookup->pszDesc);1265 return rc == VERR_BUFFER_OVERFLOW ? VINF_BUFFER_OVERFLOW : rc;1266 }1267 1268 1269 /**1270 * Scans the HMA for the physical page and reports back a description if found.1271 *1272 * @returns VINF_SUCCESS, VINF_BUFFER_OVERFLOW, VERR_NOT_FOUND.1273 * @param pVM The cross context VM structure.1274 * @param HCPhys The host physical address to look for.1275 * @param pszWhat Where to return the description.1276 * @param cbWhat Size of the return buffer.1277 * @param pcbAlloc Where to return the size of whatever it is.1278 */1279 VMMR3_INT_DECL(int) MMR3HyperQueryInfoFromHCPhys(PVM pVM, RTHCPHYS HCPhys, char *pszWhat, size_t cbWhat, uint32_t *pcbAlloc)1280 {1281 RTHCPHYS HCPhysPage = HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK;1282 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);1283 for (;;)1284 {1285 switch (pLookup->enmType)1286 {1287 case MMLOOKUPHYPERTYPE_LOCKED:1288 {1289 uint32_t i = pLookup->cb >> PAGE_SHIFT;1290 while (i-- > 0)1291 if (pLookup->u.Locked.paHCPhysPages[i] == HCPhysPage)1292 return mmR3HyperQueryInfoFromHCPhysFound(pVM, HCPhys, pLookup, pszWhat, cbWhat, pcbAlloc);1293 break;1294 }1295 1296 case MMLOOKUPHYPERTYPE_HCPHYS:1297 {1298 if (pLookup->u.HCPhys.HCPhys - HCPhysPage < pLookup->cb)1299 return mmR3HyperQueryInfoFromHCPhysFound(pVM, HCPhys, pLookup, pszWhat, cbWhat, pcbAlloc);1300 break;1301 }1302 1303 case MMLOOKUPHYPERTYPE_MMIO2:1304 case MMLOOKUPHYPERTYPE_GCPHYS:1305 case MMLOOKUPHYPERTYPE_DYNAMIC:1306 {1307 /* brute force. */1308 uint32_t i = pLookup->cb >> PAGE_SHIFT;1309 while (i-- > 0)1310 {1311 RTGCPTR GCPtr = pLookup->off + pVM->mm.s.pvHyperAreaGC;1312 RTHCPHYS HCPhysCur;1313 int rc = PGMMapGetPage(pVM, GCPtr, NULL, &HCPhysCur);1314 if (RT_SUCCESS(rc) && HCPhysCur == HCPhysPage)1315 return mmR3HyperQueryInfoFromHCPhysFound(pVM, HCPhys, pLookup, pszWhat, cbWhat, pcbAlloc);1316 }1317 break;1318 }1319 default:1320 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));1321 break;1322 }1323 1324 /* next */1325 if ((unsigned)pLookup->offNext == NIL_OFFSET)1326 break;1327 pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);1328 }1329 return VERR_NOT_FOUND;1330 }1331 1332 1333 /**1334 * Read hypervisor memory from GC virtual address.1335 *1336 * @returns VBox status code.1337 * @param pVM The cross context VM structure.1338 * @param pvDst Destination address (HC of course).1339 * @param GCPtr GC virtual address.1340 * @param cb Number of bytes to read.1341 *1342 * @remarks For DBGF only.1343 */1344 VMMR3DECL(int) MMR3HyperReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb)1345 {1346 if (GCPtr - pVM->mm.s.pvHyperAreaGC >= pVM->mm.s.cbHyperArea)1347 return VERR_INVALID_POINTER;1348 return PGMR3MapRead(pVM, pvDst, GCPtr, cb);1349 }1350 1351 #endif /* !PGM_WITHOUT_MAPPINGS */1352 753 1353 754 /** -
trunk/src/VBox/VMM/VMMR3/PDMLdr.cpp
r86510 r91854 565 565 { 566 566 RTGCPTR GCPtr; 567 rc = MMR3HyperMapPages(pVM, pModule->pvBits, NIL_RTR0PTR, 568 cPages, paPages, pModule->szName, &GCPtr); 567 rc = VERR_NOT_IMPLEMENTED; //MMR3HyperMapPages(pVM, pModule->pvBits, NIL_RTR0PTR, cPages, paPages, pModule->szName, &GCPtr); 569 568 if (RT_SUCCESS(rc)) 570 569 { 571 MMR3HyperReserveFence(pVM);570 //MMR3HyperReserveFence(pVM); 572 571 573 572 /* -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r91848 r91854 824 824 pVM->pgm.s.enmHostMode = SUPPAGINGMODE_INVALID; 825 825 pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64(32) - 1; /* default; checked later */ 826 #ifndef PGM_WITHOUT_MAPPINGS827 pVM->pgm.s.GCPtrPrevRamRangeMapping = MM_HYPER_AREA_ADDRESS;828 #endif829 826 830 827 rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "RamPreAlloc", &pVM->pgm.s.fRamPreAlloc, … … 1005 1002 "Add 'nost' if the statistics are unwanted, use together with 'all' or explicit selection.", 1006 1003 pgmR3InfoHandlers); 1007 #ifndef PGM_WITHOUT_MAPPINGS1008 DBGFR3InfoRegisterInternal(pVM, "mappings",1009 "Dumps guest mappings.",1010 pgmR3MapInfo);1011 #endif1012 1004 1013 1005 pgmR3InitStats(pVM); … … 1079 1071 1080 1072 pVM->pgm.s.enmHostMode = SUPPAGINGMODE_INVALID; 1081 1082 #ifndef PGM_WITHOUT_MAPPINGS1083 /*1084 * Allocate static mapping space for whatever the cr3 register1085 * points to and in the case of PAE mode to the 4 PDs.1086 */1087 int rc = MMR3HyperReserve(pVM, PAGE_SIZE * 5, "CR3 mapping", &pVM->pgm.s.GCPtrCR3Mapping);1088 if (RT_FAILURE(rc))1089 {1090 AssertMsgFailed(("Failed to reserve two pages for cr mapping in HMA, rc=%Rrc\n", rc));1091 return rc;1092 }1093 MMR3HyperReserveFence(pVM);1094 #endif1095 1096 #if 01097 /*1098 * Allocate pages for the three possible intermediate contexts1099 * (AMD64, PAE and plain 32-Bit). We maintain all three contexts1100 * for the sake of simplicity. The AMD64 uses the PAE for the1101 * lower levels, making the total number of pages 11 (3 + 7 + 1).1102 *1103 * We assume that two page tables will be enought for the core code1104 * mappings (HC virtual and identity).1105 */1106 pVM->pgm.s.pInterPD = (PX86PD)MMR3PageAllocLow(pVM); AssertReturn(pVM->pgm.s.pInterPD, VERR_NO_PAGE_MEMORY);1107 pVM->pgm.s.apInterPTs[0] = (PX86PT)MMR3PageAllocLow(pVM); AssertReturn(pVM->pgm.s.apInterPTs[0], VERR_NO_PAGE_MEMORY);1108 pVM->pgm.s.apInterPTs[1] = (PX86PT)MMR3PageAllocLow(pVM); AssertReturn(pVM->pgm.s.apInterPTs[1], VERR_NO_PAGE_MEMORY);1109 pVM->pgm.s.apInterPaePTs[0] = (PX86PTPAE)MMR3PageAlloc(pVM); AssertReturn(pVM->pgm.s.apInterPaePTs[0], VERR_NO_PAGE_MEMORY);1110 pVM->pgm.s.apInterPaePTs[1] = (PX86PTPAE)MMR3PageAlloc(pVM); AssertReturn(pVM->pgm.s.apInterPaePTs[1], VERR_NO_PAGE_MEMORY);1111 pVM->pgm.s.apInterPaePDs[0] = (PX86PDPAE)MMR3PageAlloc(pVM); AssertReturn(pVM->pgm.s.apInterPaePDs[0], VERR_NO_PAGE_MEMORY);1112 pVM->pgm.s.apInterPaePDs[1] = (PX86PDPAE)MMR3PageAlloc(pVM); AssertReturn(pVM->pgm.s.apInterPaePDs[1], VERR_NO_PAGE_MEMORY);1113 pVM->pgm.s.apInterPaePDs[2] = (PX86PDPAE)MMR3PageAlloc(pVM); AssertReturn(pVM->pgm.s.apInterPaePDs[2], VERR_NO_PAGE_MEMORY);1114 pVM->pgm.s.apInterPaePDs[3] = (PX86PDPAE)MMR3PageAlloc(pVM); AssertReturn(pVM->pgm.s.apInterPaePDs[3], VERR_NO_PAGE_MEMORY);1115 pVM->pgm.s.pInterPaePDPT = (PX86PDPT)MMR3PageAllocLow(pVM); AssertReturn(pVM->pgm.s.pInterPaePDPT, VERR_NO_PAGE_MEMORY);1116 pVM->pgm.s.pInterPaePDPT64 = (PX86PDPT)MMR3PageAllocLow(pVM); AssertReturn(pVM->pgm.s.pInterPaePDPT64, VERR_NO_PAGE_MEMORY);1117 pVM->pgm.s.pInterPaePML4 = (PX86PML4)MMR3PageAllocLow(pVM); AssertReturn(pVM->pgm.s.pInterPaePML4, VERR_NO_PAGE_MEMORY);1118 1119 pVM->pgm.s.HCPhysInterPD = MMPage2Phys(pVM, pVM->pgm.s.pInterPD);1120 AssertRelease(pVM->pgm.s.HCPhysInterPD != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPD & PAGE_OFFSET_MASK));1121 pVM->pgm.s.HCPhysInterPaePDPT = MMPage2Phys(pVM, pVM->pgm.s.pInterPaePDPT);1122 AssertRelease(pVM->pgm.s.HCPhysInterPaePDPT != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePDPT & PAGE_OFFSET_MASK));1123 pVM->pgm.s.HCPhysInterPaePML4 = MMPage2Phys(pVM, pVM->pgm.s.pInterPaePML4);1124 AssertRelease(pVM->pgm.s.HCPhysInterPaePML4 != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePML4 & PAGE_OFFSET_MASK) && pVM->pgm.s.HCPhysInterPaePML4 < 0xffffffff);1125 1126 /*1127 * Initialize the pages, setting up the PML4 and PDPT for repetitive 4GB action.1128 */1129 ASMMemZeroPage(pVM->pgm.s.pInterPD);1130 ASMMemZeroPage(pVM->pgm.s.apInterPTs[0]);1131 ASMMemZeroPage(pVM->pgm.s.apInterPTs[1]);1132 1133 ASMMemZeroPage(pVM->pgm.s.apInterPaePTs[0]);1134 ASMMemZeroPage(pVM->pgm.s.apInterPaePTs[1]);1135 1136 ASMMemZeroPage(pVM->pgm.s.pInterPaePDPT);1137 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.apInterPaePDs); i++)1138 {1139 ASMMemZeroPage(pVM->pgm.s.apInterPaePDs[i]);1140 pVM->pgm.s.pInterPaePDPT->a[i].u = X86_PDPE_P | PGM_PLXFLAGS_PERMANENT1141 | MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[i]);1142 }1143 1144 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.pInterPaePDPT64->a); i++)1145 {1146 const unsigned iPD = i % RT_ELEMENTS(pVM->pgm.s.apInterPaePDs);1147 pVM->pgm.s.pInterPaePDPT64->a[i].u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A | PGM_PLXFLAGS_PERMANENT1148 | MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[iPD]);1149 }1150 1151 RTHCPHYS HCPhysInterPaePDPT64 = MMPage2Phys(pVM, pVM->pgm.s.pInterPaePDPT64);1152 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.pInterPaePML4->a); i++)1153 pVM->pgm.s.pInterPaePML4->a[i].u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A | PGM_PLXFLAGS_PERMANENT1154 | HCPhysInterPaePDPT64;1155 #endif1156 1073 1157 1074 /* … … 1428 1345 PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2InvalidPhys, "/PGM/CPU%u/RZ/Trap0e/Time2/InvalidPhys", "Profiling of the Trap0eHandler body when the cause is access to an invalid physical guest address."); 1429 1346 PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2MakeWritable, "/PGM/CPU%u/RZ/Trap0e/Time2/MakeWritable", "Profiling of the Trap0eHandler body when the cause is that a page needed to be made writeable."); 1430 PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2Mapping, "/PGM/CPU%u/RZ/Trap0e/Time2/Mapping", "Profiling of the Trap0eHandler body when the cause is related to the guest mappings.");1431 1347 PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2Misc, "/PGM/CPU%u/RZ/Trap0e/Time2/Misc", "Profiling of the Trap0eHandler body when the cause is not known."); 1432 1348 PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2OutOfSync, "/PGM/CPU%u/RZ/Trap0e/Time2/OutOfSync", "Profiling of the Trap0eHandler body when the cause is an out-of-sync page."); … … 1438 1354 PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2Wp0RoUsUnhack, "/PGM/CPU%u/RZ/Trap0e/Time2/WP0R0USUnhack", "Profiling of the Trap0eHandler body when the cause is CR0.WP and netware hack to be disabled."); 1439 1355 PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eConflicts, "/PGM/CPU%u/RZ/Trap0e/Conflicts", "The number of times #PF was caused by an undetected conflict."); 1440 PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersMapping, "/PGM/CPU%u/RZ/Trap0e/Handlers/Mapping", "Number of traps due to access handlers in mappings.");1441 1356 PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersOutOfSync, "/PGM/CPU%u/RZ/Trap0e/Handlers/OutOfSync", "Number of traps due to out-of-sync handled pages."); 1442 1357 PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersPhysAll, "/PGM/CPU%u/RZ/Trap0e/Handlers/PhysAll", "Number of traps due to physical all-access handlers."); … … 1457 1372 PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eSNXE, "/PGM/CPU%u/RZ/Trap0e/Err/Supervisor/NXE", "Number of supervisor mode NXE page faults."); 1458 1373 PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eGuestPF, "/PGM/CPU%u/RZ/Trap0e/GuestPF", "Number of real guest page faults."); 1459 PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eGuestPFMapping, "/PGM/CPU%u/RZ/Trap0e/GuestPF/InMapping", "Number of real guest page faults in a mapping.");1460 1374 PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eWPEmulInRZ, "/PGM/CPU%u/RZ/Trap0e/WP/InRZ", "Number of guest page faults due to X86_CR0_WP emulation."); 1461 1375 PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eWPEmulToR3, "/PGM/CPU%u/RZ/Trap0e/WP/ToR3", "Number of guest page faults due to X86_CR0_WP emulation (forward to R3 for emulation)."); … … 1541 1455 PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePage4MBPages, "/PGM/CPU%u/RZ/InvalidatePage/4MBPages", "The number of times PGMInvalidatePage() was called for a 4MB page."); 1542 1456 PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePage4MBPagesSkip, "/PGM/CPU%u/RZ/InvalidatePage/4MBPagesSkip","The number of times PGMInvalidatePage() skipped a 4MB page."); 1543 PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePagePDMappings, "/PGM/CPU%u/RZ/InvalidatePage/PDMappings", "The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict).");1544 1457 PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePagePDNAs, "/PGM/CPU%u/RZ/InvalidatePage/PDNAs", "The number of times PGMInvalidatePage() was called for a not accessed page directory."); 1545 1458 PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePagePDNPs, "/PGM/CPU%u/RZ/InvalidatePage/PDNPs", "The number of times PGMInvalidatePage() was called for a not present page directory."); … … 1589 1502 PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePage4MBPages, "/PGM/CPU%u/R3/InvalidatePage/4MBPages", "The number of times PGMInvalidatePage() was called for a 4MB page."); 1590 1503 PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePage4MBPagesSkip, "/PGM/CPU%u/R3/InvalidatePage/4MBPagesSkip","The number of times PGMInvalidatePage() skipped a 4MB page."); 1591 PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePagePDMappings, "/PGM/CPU%u/R3/InvalidatePage/PDMappings", "The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict).");1592 1504 PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePagePDNAs, "/PGM/CPU%u/R3/InvalidatePage/PDNAs", "The number of times PGMInvalidatePage() was called for a not accessed page directory."); 1593 1505 PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePagePDNPs, "/PGM/CPU%u/R3/InvalidatePage/PDNPs", "The number of times PGMInvalidatePage() was called for a not present page directory."); … … 1617 1529 1618 1530 /** 1619 * Init the PGM bits that rely on VMMR0 and MM to be fully initialized.1620 *1621 * The dynamic mapping area will also be allocated and initialized at this1622 * time. We could allocate it during PGMR3Init of course, but the mapping1623 * wouldn't be allocated at that time preventing us from setting up the1624 * page table entries with the dummy page.1625 *1626 * @returns VBox status code.1627 * @param pVM The cross context VM structure.1628 */1629 VMMR3DECL(int) PGMR3InitDynMap(PVM pVM)1630 {1631 #ifndef PGM_WITHOUT_MAPPINGS1632 RTGCPTR GCPtr;1633 int rc;1634 1635 /*1636 * Reserve space for the dynamic mappings.1637 */1638 rc = MMR3HyperReserve(pVM, MM_HYPER_DYNAMIC_SIZE, "Dynamic mapping", &GCPtr);1639 if (RT_SUCCESS(rc))1640 pVM->pgm.s.pbDynPageMapBaseGC = GCPtr;1641 1642 if ( RT_SUCCESS(rc)1643 && (pVM->pgm.s.pbDynPageMapBaseGC >> X86_PD_PAE_SHIFT) != ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> X86_PD_PAE_SHIFT))1644 {1645 rc = MMR3HyperReserve(pVM, MM_HYPER_DYNAMIC_SIZE, "Dynamic mapping not crossing", &GCPtr);1646 if (RT_SUCCESS(rc))1647 pVM->pgm.s.pbDynPageMapBaseGC = GCPtr;1648 }1649 if (RT_SUCCESS(rc))1650 {1651 AssertRelease((pVM->pgm.s.pbDynPageMapBaseGC >> X86_PD_PAE_SHIFT) == ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> X86_PD_PAE_SHIFT));1652 MMR3HyperReserveFence(pVM);1653 }1654 return rc;1655 #else1656 RT_NOREF(pVM);1657 return VINF_SUCCESS;1658 #endif1659 }1660 1661 1662 /**1663 1531 * Ring-3 init finalizing. 1664 1532 * … … 1668 1536 VMMR3DECL(int) PGMR3InitFinalize(PVM pVM) 1669 1537 { 1670 #ifndef PGM_WITHOUT_MAPPINGS1671 int rc = VERR_IPE_UNINITIALIZED_STATUS; /* (MSC incorrectly thinks it can be used uninitialized) */1672 1673 /*1674 * Reserve space for the dynamic mappings.1675 * Initialize the dynamic mapping pages with dummy pages to simply the cache.1676 */1677 /* get the pointer to the page table entries. */1678 PPGMMAPPING pMapping = pgmGetMapping(pVM, pVM->pgm.s.pbDynPageMapBaseGC);1679 AssertRelease(pMapping);1680 const uintptr_t off = pVM->pgm.s.pbDynPageMapBaseGC - pMapping->GCPtr;1681 const unsigned iPT = off >> X86_PD_SHIFT;1682 const unsigned iPG = (off >> X86_PT_SHIFT) & X86_PT_MASK;1683 pVM->pgm.s.paDynPageMap32BitPTEsGC = pMapping->aPTs[iPT].pPTRC + iPG * sizeof(pMapping->aPTs[0].pPTR3->a[0]);1684 pVM->pgm.s.paDynPageMapPaePTEsGC = pMapping->aPTs[iPT].paPaePTsRC + iPG * sizeof(pMapping->aPTs[0].paPaePTsR3->a[0]);1685 1686 /* init cache area */1687 RTHCPHYS HCPhysDummy = MMR3PageDummyHCPhys(pVM);1688 for (uint32_t offDynMap = 0; offDynMap < MM_HYPER_DYNAMIC_SIZE; offDynMap += PAGE_SIZE)1689 {1690 rc = PGMMap(pVM, pVM->pgm.s.pbDynPageMapBaseGC + offDynMap, HCPhysDummy, PAGE_SIZE, 0);1691 AssertRCReturn(rc, rc);1692 }1693 #endif1694 1695 1538 /* 1696 1539 * Determine the max physical address width (MAXPHYADDR) and apply it to … … 1772 1615 * Allocate memory if we're supposed to do that. 1773 1616 */ 1774 #ifdef PGM_WITHOUT_MAPPINGS1775 1617 int rc = VINF_SUCCESS; 1776 #endif1777 1618 if (pVM->pgm.s.fRamPreAlloc) 1778 1619 rc = pgmR3PhysRamPreAllocate(pVM); … … 1870 1711 pgmR3PhysRelinkRamRanges(pVM); 1871 1712 1872 #ifndef PGM_WITHOUT_MAPPINGS1873 1874 /*1875 * Update the two page directories with all page table mappings.1876 * (One or more of them have changed, that's why we're here.)1877 */1878 pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pVM->pgm.s.pMappingsR3);1879 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur->pNextR3; pCur = pCur->pNextR3)1880 pCur->pNextRC = MMHyperR3ToRC(pVM, pCur->pNextR3);1881 1882 /* Relocate GC addresses of Page Tables. */1883 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)1884 {1885 for (RTHCUINT i = 0; i < pCur->cPTs; i++)1886 {1887 pCur->aPTs[i].pPTRC = MMHyperR3ToRC(pVM, pCur->aPTs[i].pPTR3);1888 pCur->aPTs[i].paPaePTsRC = MMHyperR3ToRC(pVM, pCur->aPTs[i].paPaePTsR3);1889 }1890 }1891 1892 /*1893 * Dynamic page mapping area.1894 */1895 pVM->pgm.s.paDynPageMap32BitPTEsGC += offDelta;1896 pVM->pgm.s.paDynPageMapPaePTEsGC += offDelta;1897 pVM->pgm.s.pbDynPageMapBaseGC += offDelta;1898 1899 if (pVM->pgm.s.pRCDynMap)1900 {1901 pVM->pgm.s.pRCDynMap += offDelta;1902 PPGMRCDYNMAP pDynMap = (PPGMRCDYNMAP)MMHyperRCToCC(pVM, pVM->pgm.s.pRCDynMap);1903 1904 pDynMap->paPages += offDelta;1905 PPGMRCDYNMAPENTRY paPages = (PPGMRCDYNMAPENTRY)MMHyperRCToCC(pVM, pDynMap->paPages);1906 1907 for (uint32_t iPage = 0; iPage < pDynMap->cPages; iPage++)1908 {1909 paPages[iPage].pvPage += offDelta;1910 paPages[iPage].uPte.pLegacy += offDelta;1911 paPages[iPage].uPte.pPae += offDelta;1912 }1913 }1914 1915 #endif /* PGM_WITHOUT_MAPPINGS */1916 1917 1713 /* 1918 1714 * The Zero page. … … 1980 1776 1981 1777 PGM_LOCK_VOID(pVM); 1982 1983 /*1984 * Unfix any fixed mappings and disable CR3 monitoring.1985 */1986 pVM->pgm.s.fMappingsFixed = false;1987 pVM->pgm.s.fMappingsFixedRestored = false;1988 pVM->pgm.s.GCPtrMappingFixed = NIL_RTGCPTR;1989 pVM->pgm.s.cbMappingFixed = 0;1990 1778 1991 1779 /* -
trunk/src/VBox/VMM/VMMR3/PGMDbg.cpp
r91016 r91854 1104 1104 * @param HCPhys The physical address of the shadow page. 1105 1105 * @param pszDesc The description. 1106 * @param fIsMapping Set if it's a mapping.1107 1106 * @param ppv Where to return the pointer. 1108 1107 */ 1109 static int pgmR3DumpHierarchyShwMapPage(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, const char *pszDesc, 1110 bool fIsMapping, void const **ppv) 1111 { 1112 void *pvPage; 1113 if (!fIsMapping) 1114 { 1115 PPGMPOOLPAGE pPoolPage = pgmPoolQueryPageForDbg(pState->pVM->pgm.s.pPoolR3, HCPhys); 1116 if (pPoolPage) 1117 { 1118 pState->pHlp->pfnPrintf(pState->pHlp, "%0*llx error! %s at HCPhys=%RHp was not found in the page pool!\n", 1119 pState->cchAddress, pState->u64Address, pszDesc, HCPhys); 1120 return VERR_PGM_POOL_GET_PAGE_FAILED; 1121 } 1122 pvPage = (uint8_t *)pPoolPage->pvPageR3 + (HCPhys & PAGE_OFFSET_MASK); 1123 } 1124 else 1125 { 1126 pvPage = NULL; 1127 #ifndef PGM_WITHOUT_MAPPINGS 1128 for (PPGMMAPPING pMap = pState->pVM->pgm.s.pMappingsR3; pMap; pMap = pMap->pNextR3) 1129 { 1130 uint64_t off = pState->u64Address - pMap->GCPtr; 1131 if (off < pMap->cb) 1132 { 1133 const int iPDE = (uint32_t)(off >> X86_PD_SHIFT); 1134 const int iSub = (int)((off >> X86_PD_PAE_SHIFT) & 1); /* MSC is a pain sometimes */ 1135 if ((iSub ? pMap->aPTs[iPDE].HCPhysPaePT1 : pMap->aPTs[iPDE].HCPhysPaePT0) != HCPhys) 1136 pState->pHlp->pfnPrintf(pState->pHlp, 1137 "%0*llx error! Mapping error! PT %d has HCPhysPT=%RHp not %RHp is in the PD.\n", 1138 pState->cchAddress, pState->u64Address, iPDE, 1139 iSub ? pMap->aPTs[iPDE].HCPhysPaePT1 : pMap->aPTs[iPDE].HCPhysPaePT0, HCPhys); 1140 pvPage = &pMap->aPTs[iPDE].paPaePTsR3[iSub]; 1141 break; 1142 } 1143 } 1144 #endif /* !PGM_WITHOUT_MAPPINGS */ 1145 if (!pvPage) 1146 { 1147 pState->pHlp->pfnPrintf(pState->pHlp, "%0*llx error! PT mapping %s at HCPhys=%RHp was not found in the page pool!\n", 1148 pState->cchAddress, pState->u64Address, pszDesc, HCPhys); 1149 return VERR_INVALID_PARAMETER; 1150 } 1151 } 1152 *ppv = pvPage; 1108 static int pgmR3DumpHierarchyShwMapPage(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, const char *pszDesc, void const **ppv) 1109 { 1110 PPGMPOOLPAGE pPoolPage = pgmPoolQueryPageForDbg(pState->pVM->pgm.s.pPoolR3, HCPhys); 1111 if (pPoolPage) 1112 { 1113 pState->pHlp->pfnPrintf(pState->pHlp, "%0*llx error! %s at HCPhys=%RHp was not found in the page pool!\n", 1114 pState->cchAddress, pState->u64Address, pszDesc, HCPhys); 1115 return VERR_PGM_POOL_GET_PAGE_FAILED; 1116 } 1117 *ppv = (uint8_t *)pPoolPage->pvPageR3 + (HCPhys & PAGE_OFFSET_MASK); 1153 1118 return VINF_SUCCESS; 1154 1119 } … … 1169 1134 RTStrPrintf(szPage, sizeof(szPage), " idx=0i%u", pPage->idx); 1170 1135 else 1171 {1172 /* probably a mapping */1173 1136 strcpy(szPage, " not found"); 1174 #ifndef PGM_WITHOUT_MAPPINGS1175 for (PPGMMAPPING pMap = pState->pVM->pgm.s.pMappingsR3; pMap; pMap = pMap->pNextR3)1176 {1177 uint64_t off = pState->u64Address - pMap->GCPtr;1178 if (off < pMap->cb)1179 {1180 const int iPDE = (uint32_t)(off >> X86_PD_SHIFT);1181 if (pMap->aPTs[iPDE].HCPhysPT == HCPhys)1182 RTStrPrintf(szPage, sizeof(szPage), " #%u: %s", iPDE, pMap->pszDesc);1183 else if (pMap->aPTs[iPDE].HCPhysPaePT0 == HCPhys)1184 RTStrPrintf(szPage, sizeof(szPage), " #%u/0: %s", iPDE, pMap->pszDesc);1185 else if (pMap->aPTs[iPDE].HCPhysPaePT1 == HCPhys)1186 RTStrPrintf(szPage, sizeof(szPage), " #%u/1: %s", iPDE, pMap->pszDesc);1187 else1188 continue;1189 break;1190 }1191 }1192 #endif /* !PGM_WITHOUT_MAPPINGS */1193 }1194 1137 PGM_UNLOCK(pState->pVM); 1195 1138 pState->pHlp->pfnPrintf(pState->pHlp, "%s", szPage); … … 1221 1164 } 1222 1165 else 1223 { 1224 #ifndef PGM_WITHOUT_MAPPINGS 1225 /* check the heap */ 1226 uint32_t cbAlloc; 1227 rc = MMR3HyperQueryInfoFromHCPhys(pState->pVM, HCPhys, szPage, sizeof(szPage), &cbAlloc); 1228 if (RT_SUCCESS(rc)) 1229 pState->pHlp->pfnPrintf(pState->pHlp, " %s %#x bytes", szPage, cbAlloc); 1230 else 1231 #endif 1232 pState->pHlp->pfnPrintf(pState->pHlp, " not found"); 1233 } 1166 pState->pHlp->pfnPrintf(pState->pHlp, " not found"); 1234 1167 NOREF(cbPage); 1235 1168 } … … 1242 1175 * @param pState The dumper state. 1243 1176 * @param HCPhys The page table address. 1244 * @param fIsMapping Whether it is a mapping. 1245 */ 1246 static int pgmR3DumpHierarchyShwPaePT(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, bool fIsMapping) 1177 */ 1178 static int pgmR3DumpHierarchyShwPaePT(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys) 1247 1179 { 1248 1180 PCPGMSHWPTPAE pPT; 1249 int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page table", fIsMapping,(void const **)&pPT);1181 int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page table", (void const **)&pPT); 1250 1182 if (RT_FAILURE(rc)) 1251 1183 return rc; … … 1315 1247 { 1316 1248 PCX86PDPAE pPD; 1317 int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page directory", false,(void const **)&pPD);1249 int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page directory", (void const **)&pPD); 1318 1250 if (RT_FAILURE(rc)) 1319 1251 return rc; … … 1347 1279 Pde.b.u1NoExecute ? "NX" : "--", 1348 1280 Pde.u & PGM_PDFLAGS_BIG_PAGE ? 'b' : '-', 1349 #ifndef PGM_WITHOUT_MAPPINGS1350 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-',1351 #else1352 1281 '-', 1353 #endif1354 1282 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-', 1355 1283 Pde.u & X86_PDE2M_PAE_PG_MASK); … … 1380 1308 Pde.n.u1NoExecute ? "NX" : "--", 1381 1309 Pde.u & PGM_PDFLAGS_BIG_PAGE ? 'b' : '-', 1382 #ifndef PGM_WITHOUT_MAPPINGS1383 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-',1384 #else1385 1310 '-', 1386 #endif1387 1311 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-', 1388 1312 Pde.u & X86_PDE_PAE_PG_MASK); … … 1395 1319 if (cMaxDepth) 1396 1320 { 1397 int rc2 = pgmR3DumpHierarchyShwPaePT(pState, Pde.u & X86_PDE_PAE_PG_MASK, 1398 #ifndef PGM_WITHOUT_MAPPINGS 1399 RT_BOOL(Pde.u & PGM_PDFLAGS_MAPPING) 1400 #else 1401 false /*fIsMapping*/ 1402 #endif 1403 ); 1321 int rc2 = pgmR3DumpHierarchyShwPaePT(pState, Pde.u & X86_PDE_PAE_PG_MASK); 1404 1322 if (rc2 < rc && RT_SUCCESS(rc)) 1405 1323 rc = rc2; … … 1429 1347 1430 1348 PCX86PDPT pPDPT; 1431 int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page directory pointer table", false,(void const **)&pPDPT);1349 int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page directory pointer table", (void const **)&pPDPT); 1432 1350 if (RT_FAILURE(rc)) 1433 1351 return rc; … … 1519 1437 { 1520 1438 PCX86PML4 pPML4; 1521 int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page map level 4", false,(void const **)&pPML4);1439 int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page map level 4", (void const **)&pPML4); 1522 1440 if (RT_FAILURE(rc)) 1523 1441 return rc; … … 1591 1509 * @param pState The dumper state. 1592 1510 * @param HCPhys The physical address of the table. 1593 * @param fMapping Set if it's a guest mapping. 1594 */ 1595 static int pgmR3DumpHierarchyShw32BitPT(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys, bool fMapping) 1511 */ 1512 static int pgmR3DumpHierarchyShw32BitPT(PPGMR3DUMPHIERARCHYSTATE pState, RTHCPHYS HCPhys) 1596 1513 { 1597 1514 PCX86PT pPT; 1598 int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page table", fMapping,(void const **)&pPT);1515 int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page table", (void const **)&pPT); 1599 1516 if (RT_FAILURE(rc)) 1600 1517 return rc; … … 1646 1563 1647 1564 PCX86PD pPD; 1648 int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page directory", false,(void const **)&pPD);1565 int rc = pgmR3DumpHierarchyShwMapPage(pState, HCPhys, "Page directory", (void const **)&pPD); 1649 1566 if (RT_FAILURE(rc)) 1650 1567 return rc; … … 1677 1594 Pde.b.u1PAT ? "AT" : "--", 1678 1595 Pde.u & PGM_PDFLAGS_BIG_PAGE ? 'b' : '-', 1679 #ifndef PGM_WITHOUT_MAPPINGS1680 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-',1681 #else1682 1596 '-', 1683 #endif1684 1597 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-', 1685 1598 u64Phys); … … 1702 1615 Pde.n.u1CacheDisable? "CD" : "--", 1703 1616 Pde.u & PGM_PDFLAGS_BIG_PAGE ? 'b' : '-', 1704 #ifndef PGM_WITHOUT_MAPPINGS1705 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-',1706 #else1707 1617 '-', 1708 #endif1709 1618 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-', 1710 1619 Pde.u & X86_PDE_PG_MASK); … … 1715 1624 if (cMaxDepth) 1716 1625 { 1717 int rc2 = pgmR3DumpHierarchyShw32BitPT(pState, Pde.u & X86_PDE_PG_MASK, 1718 #ifndef PGM_WITHOUT_MAPPINGS 1719 !!(Pde.u & PGM_PDFLAGS_MAPPING) 1720 #else 1721 false /*fIsMapping*/ 1722 #endif 1723 ); 1626 int rc2 = pgmR3DumpHierarchyShw32BitPT(pState, Pde.u & X86_PDE_PG_MASK); 1724 1627 if (rc2 < rc && RT_SUCCESS(rc)) 1725 1628 rc = rc2; -
trunk/src/VBox/VMM/VMMR3/PGMMap.cpp
r91247 r91854 21 21 *********************************************************************************************************************************/ 22 22 #define LOG_GROUP LOG_GROUP_PGM 23 #include <VBox/vmm/dbgf.h>24 23 #include <VBox/vmm/pgm.h> 25 24 #include "PGMInternal.h" 26 25 #include <VBox/vmm/vm.h> 27 #include "PGMInline.h"28 26 29 27 #include <VBox/log.h> 30 #include <VBox/err.h> 31 #include <iprt/asm.h> 32 #include <iprt/assert.h> 33 #include <iprt/string.h> 34 35 36 /********************************************************************************************************************************* 37 * Internal Functions * 38 *********************************************************************************************************************************/ 39 #ifndef PGM_WITHOUT_MAPPINGS 40 static void pgmR3MapClearPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE); 41 static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE); 42 static int pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault); 43 static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault); 44 #endif 45 46 47 #ifndef PGM_WITHOUT_MAPPINGS 48 49 /** 50 * Creates a page table based mapping in GC. 51 * 52 * @returns VBox status code. 53 * @param pVM The cross context VM structure. 54 * @param GCPtr Virtual Address. (Page table aligned!) 55 * @param cb Size of the range. Must be a 4MB aligned! 56 * @param fFlags PGMR3MAPPT_FLAGS_UNMAPPABLE or 0. 57 * @param pfnRelocate Relocation callback function. 58 * @param pvUser User argument to the callback. 59 * @param pszDesc Pointer to description string. This must not be freed. 60 */ 61 VMMR3DECL(int) PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, uint32_t cb, uint32_t fFlags, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc) 62 { 63 LogFlow(("PGMR3MapPT: GCPtr=%#x cb=%d fFlags=%#x pfnRelocate=%p pvUser=%p pszDesc=%s\n", GCPtr, cb, fFlags, pfnRelocate, pvUser, pszDesc)); 64 AssertMsg(pVM->pgm.s.pInterPD, ("Paging isn't initialized, init order problems!\n")); 65 66 /* 67 * Validate input. 68 * Note! The lower limit (1 MB) matches how pgmR3PhysMMIOExCreate works. 69 */ 70 Assert(!fFlags || fFlags == PGMR3MAPPT_FLAGS_UNMAPPABLE); 71 AssertMsgReturn(cb >= _1M && cb <= _64M, ("Seriously? cb=%d (%#x)\n", cb, cb), VERR_OUT_OF_RANGE); 72 73 cb = RT_ALIGN_32(cb, _4M); 74 RTGCPTR GCPtrLast = GCPtr + cb - 1; 75 76 AssertMsgReturn(GCPtrLast >= GCPtr, ("Range wraps! GCPtr=%x GCPtrLast=%x\n", GCPtr, GCPtrLast), 77 VERR_INVALID_PARAMETER); 78 AssertMsgReturn(!pVM->pgm.s.fMappingsFixed, ("Mappings are fixed! It's not possible to add new mappings at this time!\n"), 79 VERR_PGM_MAPPINGS_FIXED); 80 AssertPtrReturn(pfnRelocate, VERR_INVALID_PARAMETER); 81 82 /* 83 * Find list location. 84 */ 85 PPGMMAPPING pPrev = NULL; 86 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; 87 while (pCur) 88 { 89 if (pCur->GCPtrLast >= GCPtr && pCur->GCPtr <= GCPtrLast) 90 { 91 AssertMsgFailed(("Address is already in use by %s. req %#x-%#x take %#x-%#x\n", 92 pCur->pszDesc, GCPtr, GCPtrLast, pCur->GCPtr, pCur->GCPtrLast)); 93 LogRel(("VERR_PGM_MAPPING_CONFLICT: Address is already in use by %s. req %#x-%#x take %#x-%#x\n", 94 pCur->pszDesc, GCPtr, GCPtrLast, pCur->GCPtr, pCur->GCPtrLast)); 95 return VERR_PGM_MAPPING_CONFLICT; 96 } 97 if (pCur->GCPtr > GCPtr) 98 break; 99 pPrev = pCur; 100 pCur = pCur->pNextR3; 101 } 102 103 /* 104 * Check for conflicts with intermediate mappings. 105 */ 106 const unsigned iPageDir = GCPtr >> X86_PD_SHIFT; 107 const unsigned cPTs = cb >> X86_PD_SHIFT; 108 if (pVM->pgm.s.fFinalizedMappings) 109 { 110 for (unsigned i = 0; i < cPTs; i++) 111 if (pVM->pgm.s.pInterPD->a[iPageDir + i].n.u1Present) 112 { 113 AssertMsgFailed(("Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT))); 114 LogRel(("VERR_PGM_MAPPING_CONFLICT: Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT))); 115 return VERR_PGM_MAPPING_CONFLICT; 116 } 117 /** @todo AMD64: add check in PAE structures too, so we can remove all the 32-Bit paging stuff there. */ 118 } 119 120 /* 121 * Allocate and initialize the new list node. 122 */ 123 PPGMMAPPING pNew; 124 int rc; 125 if (fFlags & PGMR3MAPPT_FLAGS_UNMAPPABLE) 126 rc = MMHyperAlloc( pVM, RT_UOFFSETOF_DYN(PGMMAPPING, aPTs[cPTs]), 0, MM_TAG_PGM_MAPPINGS, (void **)&pNew); 127 else 128 rc = MMR3HyperAllocOnceNoRel(pVM, RT_UOFFSETOF_DYN(PGMMAPPING, aPTs[cPTs]), 0, MM_TAG_PGM_MAPPINGS, (void **)&pNew); 129 if (RT_FAILURE(rc)) 130 return rc; 131 pNew->GCPtr = GCPtr; 132 pNew->GCPtrLast = GCPtrLast; 133 pNew->cb = cb; 134 pNew->pfnRelocate = pfnRelocate; 135 pNew->pvUser = pvUser; 136 pNew->pszDesc = pszDesc; 137 pNew->cPTs = cPTs; 138 139 /* 140 * Allocate page tables and insert them into the page directories. 141 * (One 32-bit PT and two PAE PTs.) 142 */ 143 uint8_t *pbPTs; 144 if (fFlags & PGMR3MAPPT_FLAGS_UNMAPPABLE) 145 rc = MMHyperAlloc( pVM, PAGE_SIZE * 3 * cPTs, PAGE_SIZE, MM_TAG_PGM_MAPPINGS, (void **)&pbPTs); 146 else 147 rc = MMR3HyperAllocOnceNoRel(pVM, PAGE_SIZE * 3 * cPTs, PAGE_SIZE, MM_TAG_PGM_MAPPINGS, (void **)&pbPTs); 148 if (RT_FAILURE(rc)) 149 { 150 MMHyperFree(pVM, pNew); 151 return VERR_NO_MEMORY; 152 } 153 154 /* 155 * Init the page tables and insert them into the page directories. 156 */ 157 Log4(("PGMR3MapPT: GCPtr=%RGv cPTs=%u pbPTs=%p\n", GCPtr, cPTs, pbPTs)); 158 for (unsigned i = 0; i < cPTs; i++) 159 { 160 /* 161 * 32-bit. 162 */ 163 pNew->aPTs[i].pPTR3 = (PX86PT)pbPTs; 164 pNew->aPTs[i].pPTRC = MMHyperR3ToRC(pVM, pNew->aPTs[i].pPTR3); 165 pNew->aPTs[i].pPTR0 = MMHyperR3ToR0(pVM, pNew->aPTs[i].pPTR3); 166 pNew->aPTs[i].HCPhysPT = MMR3HyperHCVirt2HCPhys(pVM, pNew->aPTs[i].pPTR3); 167 pbPTs += PAGE_SIZE; 168 Log4(("PGMR3MapPT: i=%d: pPTR3=%RHv pPTRC=%RRv pPRTR0=%RHv HCPhysPT=%RHp\n", 169 i, pNew->aPTs[i].pPTR3, pNew->aPTs[i].pPTRC, pNew->aPTs[i].pPTR0, pNew->aPTs[i].HCPhysPT)); 170 171 /* 172 * PAE. 173 */ 174 pNew->aPTs[i].HCPhysPaePT0 = MMR3HyperHCVirt2HCPhys(pVM, pbPTs); 175 pNew->aPTs[i].HCPhysPaePT1 = MMR3HyperHCVirt2HCPhys(pVM, pbPTs + PAGE_SIZE); 176 pNew->aPTs[i].paPaePTsR3 = (PPGMSHWPTPAE)pbPTs; 177 pNew->aPTs[i].paPaePTsRC = MMHyperR3ToRC(pVM, pbPTs); 178 pNew->aPTs[i].paPaePTsR0 = MMHyperR3ToR0(pVM, pbPTs); 179 pbPTs += PAGE_SIZE * 2; 180 Log4(("PGMR3MapPT: i=%d: paPaePTsR#=%RHv paPaePTsRC=%RRv paPaePTsR#=%RHv HCPhysPaePT0=%RHp HCPhysPaePT1=%RHp\n", 181 i, pNew->aPTs[i].paPaePTsR3, pNew->aPTs[i].paPaePTsRC, pNew->aPTs[i].paPaePTsR0, pNew->aPTs[i].HCPhysPaePT0, pNew->aPTs[i].HCPhysPaePT1)); 182 } 183 if (pVM->pgm.s.fFinalizedMappings) 184 pgmR3MapSetPDEs(pVM, pNew, iPageDir); 185 /* else PGMR3FinalizeMappings() */ 186 187 /* 188 * Insert the new mapping. 189 */ 190 pNew->pNextR3 = pCur; 191 pNew->pNextRC = pCur ? MMHyperR3ToRC(pVM, pCur) : NIL_RTRCPTR; 192 pNew->pNextR0 = pCur ? MMHyperR3ToR0(pVM, pCur) : NIL_RTR0PTR; 193 if (pPrev) 194 { 195 pPrev->pNextR3 = pNew; 196 pPrev->pNextRC = MMHyperR3ToRC(pVM, pNew); 197 pPrev->pNextR0 = MMHyperR3ToR0(pVM, pNew); 198 } 199 else 200 { 201 pVM->pgm.s.pMappingsR3 = pNew; 202 pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pNew); 203 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pNew); 204 } 205 206 for (VMCPUID i = 0; i < pVM->cCpus; i++) 207 { 208 PVMCPU pVCpu = pVM->apCpusR3[i]; 209 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 210 } 211 return VINF_SUCCESS; 212 } 213 214 #ifdef VBOX_WITH_UNUSED_CODE 215 216 /** 217 * Removes a page table based mapping. 218 * 219 * @returns VBox status code. 220 * @param pVM The cross context VM structure. 221 * @param GCPtr Virtual Address. (Page table aligned!) 222 * 223 * @remarks Don't call this without passing PGMR3MAPPT_FLAGS_UNMAPPABLE to 224 * PGMR3MapPT or you'll burn in the heap. 225 */ 226 VMMR3DECL(int) PGMR3UnmapPT(PVM pVM, RTGCPTR GCPtr) 227 { 228 LogFlow(("PGMR3UnmapPT: GCPtr=%#x\n", GCPtr)); 229 AssertReturn(pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER); 230 231 /* 232 * Find it. 233 */ 234 PPGMMAPPING pPrev = NULL; 235 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; 236 while (pCur) 237 { 238 if (pCur->GCPtr == GCPtr) 239 { 240 /* 241 * Unlink it. 242 */ 243 if (pPrev) 244 { 245 pPrev->pNextR3 = pCur->pNextR3; 246 pPrev->pNextRC = pCur->pNextRC; 247 pPrev->pNextR0 = pCur->pNextR0; 248 } 249 else 250 { 251 pVM->pgm.s.pMappingsR3 = pCur->pNextR3; 252 pVM->pgm.s.pMappingsRC = pCur->pNextRC; 253 pVM->pgm.s.pMappingsR0 = pCur->pNextR0; 254 } 255 256 /* 257 * Free the page table memory, clear page directory entries 258 * and free the page tables and node memory. 259 */ 260 MMHyperFree(pVM, pCur->aPTs[0].pPTR3); 261 if (pCur->GCPtr != NIL_RTGCPTR) 262 pgmR3MapClearPDEs(pVM, pCur, pCur->GCPtr >> X86_PD_SHIFT); 263 MMHyperFree(pVM, pCur); 264 265 for (VMCPUID i = 0; i < pVM->cCpus; i++) 266 { 267 PVMCPU pVCpu = pVM->apCpusR3[i]; 268 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 269 } 270 return VINF_SUCCESS; 271 } 272 273 /* done? */ 274 if (pCur->GCPtr > GCPtr) 275 break; 276 277 /* next */ 278 pPrev = pCur; 279 pCur = pCur->pNextR3; 280 } 281 282 AssertMsgFailed(("No mapping for %#x found!\n", GCPtr)); 283 return VERR_INVALID_PARAMETER; 284 } 285 286 #endif /* unused */ 287 288 289 /** 290 * Checks whether a range of PDEs in the intermediate 291 * memory context are unused. 292 * 293 * We're talking 32-bit PDEs here. 294 * 295 * @returns true/false. 296 * @param pVM The cross context VM structure. 297 * @param iPD The first PDE in the range. 298 * @param cPTs The number of PDEs in the range. 299 */ 300 DECLINLINE(bool) pgmR3AreIntermediatePDEsUnused(PVM pVM, unsigned iPD, unsigned cPTs) 301 { 302 if (pVM->pgm.s.pInterPD->a[iPD].n.u1Present) 303 return false; 304 while (cPTs > 1) 305 { 306 iPD++; 307 if (pVM->pgm.s.pInterPD->a[iPD].n.u1Present) 308 return false; 309 cPTs--; 310 } 311 return true; 312 } 313 314 315 /** 316 * Unlinks the mapping. 317 * 318 * The mapping *must* be in the list. 319 * 320 * @param pVM The cross context VM structure. 321 * @param pMapping The mapping to unlink. 322 */ 323 static void pgmR3MapUnlink(PVM pVM, PPGMMAPPING pMapping) 324 { 325 PPGMMAPPING pAfterThis = pVM->pgm.s.pMappingsR3; 326 if (pAfterThis == pMapping) 327 { 328 /* head */ 329 pVM->pgm.s.pMappingsR3 = pMapping->pNextR3; 330 pVM->pgm.s.pMappingsRC = pMapping->pNextRC; 331 pVM->pgm.s.pMappingsR0 = pMapping->pNextR0; 332 } 333 else 334 { 335 /* in the list */ 336 while (pAfterThis->pNextR3 != pMapping) 337 { 338 pAfterThis = pAfterThis->pNextR3; 339 AssertReleaseReturnVoid(pAfterThis); 340 } 341 342 pAfterThis->pNextR3 = pMapping->pNextR3; 343 pAfterThis->pNextRC = pMapping->pNextRC; 344 pAfterThis->pNextR0 = pMapping->pNextR0; 345 } 346 } 347 348 349 /** 350 * Links the mapping. 351 * 352 * @param pVM The cross context VM structure. 353 * @param pMapping The mapping to linked. 354 */ 355 static void pgmR3MapLink(PVM pVM, PPGMMAPPING pMapping) 356 { 357 /* 358 * Find the list location (it's sorted by GCPhys) and link it in. 359 */ 360 if ( !pVM->pgm.s.pMappingsR3 361 || pVM->pgm.s.pMappingsR3->GCPtr > pMapping->GCPtr) 362 { 363 /* head */ 364 pMapping->pNextR3 = pVM->pgm.s.pMappingsR3; 365 pMapping->pNextRC = pVM->pgm.s.pMappingsRC; 366 pMapping->pNextR0 = pVM->pgm.s.pMappingsR0; 367 pVM->pgm.s.pMappingsR3 = pMapping; 368 pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pMapping); 369 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pMapping); 370 } 371 else 372 { 373 /* in the list */ 374 PPGMMAPPING pAfterThis = pVM->pgm.s.pMappingsR3; 375 PPGMMAPPING pBeforeThis = pAfterThis->pNextR3; 376 while (pBeforeThis && pBeforeThis->GCPtr <= pMapping->GCPtr) 377 { 378 pAfterThis = pBeforeThis; 379 pBeforeThis = pBeforeThis->pNextR3; 380 } 381 382 pMapping->pNextR3 = pAfterThis->pNextR3; 383 pMapping->pNextRC = pAfterThis->pNextRC; 384 pMapping->pNextR0 = pAfterThis->pNextR0; 385 pAfterThis->pNextR3 = pMapping; 386 pAfterThis->pNextRC = MMHyperR3ToRC(pVM, pMapping); 387 pAfterThis->pNextR0 = MMHyperR3ToR0(pVM, pMapping); 388 } 389 } 390 391 392 /** 393 * Finalizes the intermediate context. 394 * 395 * This is called at the end of the ring-3 init and will construct the 396 * intermediate paging structures, relocating all the mappings in the process. 397 * 398 * @returns VBox status code. 399 * @param pVM The cross context VM structure. 400 * @thread EMT(0) 401 */ 402 VMMR3DECL(int) PGMR3FinalizeMappings(PVM pVM) 403 { 404 AssertReturn(!pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER); 405 pVM->pgm.s.fFinalizedMappings = true; 406 407 /* 408 * Loop until all mappings have been finalized. 409 */ 410 #if 0 411 unsigned iPDNext = UINT32_C(0xc0000000) >> X86_PD_SHIFT; /* makes CSAM/PATM freak out booting linux. :-/ */ 412 #elif 0 413 unsigned iPDNext = MM_HYPER_AREA_ADDRESS >> X86_PD_SHIFT; 414 #else 415 unsigned iPDNext = 1 << X86_PD_SHIFT; /* no hint, map them from the top. */ 416 #endif 417 418 PPGMMAPPING pCur; 419 do 420 { 421 pCur = pVM->pgm.s.pMappingsR3; 422 while (pCur) 423 { 424 if (!pCur->fFinalized) 425 { 426 /* 427 * Find a suitable location. 428 */ 429 RTGCPTR const GCPtrOld = pCur->GCPtr; 430 const unsigned cPTs = pCur->cPTs; 431 unsigned iPDNew = iPDNext; 432 if ( iPDNew + cPTs >= X86_PG_ENTRIES /* exclude the last PD */ 433 || !pgmR3AreIntermediatePDEsUnused(pVM, iPDNew, cPTs) 434 || !pCur->pfnRelocate(pVM, GCPtrOld, (RTGCPTR)iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_SUGGEST, pCur->pvUser)) 435 { 436 /* No luck, just scan down from 4GB-4MB, giving up at 4MB. */ 437 iPDNew = X86_PG_ENTRIES - cPTs - 1; 438 while ( iPDNew > 0 439 && ( !pgmR3AreIntermediatePDEsUnused(pVM, iPDNew, cPTs) 440 || !pCur->pfnRelocate(pVM, GCPtrOld, (RTGCPTR)iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_SUGGEST, pCur->pvUser)) 441 ) 442 iPDNew--; 443 AssertLogRelReturn(iPDNew != 0, VERR_PGM_INTERMEDIATE_PAGING_CONFLICT); 444 } 445 446 /* 447 * Relocate it (something akin to pgmR3MapRelocate). 448 */ 449 pgmR3MapSetPDEs(pVM, pCur, iPDNew); 450 451 /* unlink the mapping, update the entry and relink it. */ 452 pgmR3MapUnlink(pVM, pCur); 453 454 RTGCPTR const GCPtrNew = (RTGCPTR)iPDNew << X86_PD_SHIFT; 455 pCur->GCPtr = GCPtrNew; 456 pCur->GCPtrLast = GCPtrNew + pCur->cb - 1; 457 pCur->fFinalized = true; 458 459 pgmR3MapLink(pVM, pCur); 460 461 /* Finally work the callback. */ 462 pCur->pfnRelocate(pVM, GCPtrOld, GCPtrNew, PGMRELOCATECALL_RELOCATE, pCur->pvUser); 463 464 /* 465 * The list order might have changed, start from the beginning again. 466 */ 467 iPDNext = iPDNew + cPTs; 468 break; 469 } 470 471 /* next */ 472 pCur = pCur->pNextR3; 473 } 474 } while (pCur); 475 476 return VINF_SUCCESS; 477 } 478 479 #endif /* !PGM_WITHOUT_MAPPINGS */ 28 #include <iprt/errcore.h> 480 29 481 30 … … 490 39 VMMR3DECL(int) PGMR3MappingsSize(PVM pVM, uint32_t *pcb) 491 40 { 492 RTGCPTR cb = 0;493 #ifndef PGM_WITHOUT_MAPPINGS494 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)495 cb += pCur->cb;496 #else497 41 RT_NOREF(pVM); 498 #endif 499 500 *pcb = cb; 501 AssertReturn(*pcb == cb, VERR_NUMBER_TOO_BIG); 502 Log(("PGMR3MappingsSize: return %d (%#x) bytes\n", cb, cb)); 42 *pcb = 0; 43 Log(("PGMR3MappingsSize: returns zero\n")); 503 44 return VINF_SUCCESS; 504 45 } … … 515 56 VMMR3DECL(int) PGMR3MappingsFix(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb) 516 57 { 517 Log(("PGMR3MappingsFix: GCPtrBase=%RGv cb=%#x (fMappingsFixed=%RTbool MappingEnabled=%RTbool)\n", 518 GCPtrBase, cb, pVM->pgm.s.fMappingsFixed, pgmMapAreMappingsEnabled(pVM))); 519 520 #ifndef PGM_WITHOUT_MAPPINGS 521 if (pgmMapAreMappingsEnabled(pVM)) 522 { 523 /* 524 * Only applies to VCPU 0 as we don't support SMP guests with raw mode. 525 */ 526 Assert(pVM->cCpus == 1); 527 PVMCPU pVCpu = pVM->apCpusR3[0]; 528 529 /* 530 * Before we do anything we'll do a forced PD sync to try make sure any 531 * pending relocations because of these mappings have been resolved. 532 */ 533 PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), true); 534 535 return pgmR3MappingsFixInternal(pVM, GCPtrBase, cb); 536 } 537 538 #else /* PGM_WITHOUT_MAPPINGS */ 58 Log(("PGMR3MappingsFix: GCPtrBase=%RGv cb=%#x\n", GCPtrBase, cb)); 539 59 RT_NOREF(pVM, GCPtrBase, cb); 540 #endif /* PGM_WITHOUT_MAPPINGS */541 542 Assert(!VM_IS_RAW_MODE_ENABLED(pVM));543 60 return VINF_SUCCESS; 544 61 } 545 546 547 #ifndef PGM_WITHOUT_MAPPINGS548 /**549 * Internal worker for PGMR3MappingsFix and pgmR3Load.550 *551 * (This does not perform a SyncCR3 before the fixation like PGMR3MappingsFix.)552 *553 * @returns VBox status code.554 * @param pVM The cross context VM structure.555 * @param GCPtrBase The address of the reserved range of guest memory.556 * @param cb The size of the range starting at GCPtrBase.557 */558 int pgmR3MappingsFixInternal(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb)559 {560 /*561 * Check input arguments and pre-conditions.562 */563 AssertMsgReturn(!(GCPtrBase & X86_PAGE_4M_OFFSET_MASK), ("GCPtrBase (%#x) has to be aligned on a 4MB address!\n", GCPtrBase),564 VERR_INVALID_PARAMETER);565 AssertMsgReturn(cb && !(cb & X86_PAGE_4M_OFFSET_MASK), ("cb (%#x) is 0 or not aligned on a 4MB address!\n", cb),566 VERR_INVALID_PARAMETER);567 AssertReturn(pgmMapAreMappingsEnabled(pVM), VERR_PGM_MAPPINGS_DISABLED);568 AssertReturn(pVM->cCpus == 1, VERR_PGM_MAPPINGS_SMP);569 570 /*571 * Check that it's not conflicting with a core code mapping in the intermediate page table.572 */573 unsigned iPDNew = GCPtrBase >> X86_PD_SHIFT;574 unsigned i = cb >> X86_PD_SHIFT;575 while (i-- > 0)576 {577 if (pVM->pgm.s.pInterPD->a[iPDNew + i].n.u1Present)578 {579 /* Check that it's not one or our mappings. */580 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;581 while (pCur)582 {583 if (iPDNew + i - (pCur->GCPtr >> X86_PD_SHIFT) < (pCur->cb >> X86_PD_SHIFT))584 break;585 pCur = pCur->pNextR3;586 }587 if (!pCur)588 {589 LogRel(("PGMR3MappingsFix: Conflicts with intermediate PDE %#x (GCPtrBase=%RGv cb=%#zx). The guest should retry.\n",590 iPDNew + i, GCPtrBase, cb));591 return VERR_PGM_MAPPINGS_FIX_CONFLICT;592 }593 }594 }595 596 /*597 * In PAE / PAE mode, make sure we don't cross page directories.598 */599 PVMCPU pVCpu = pVM->apCpusR3[0];600 if ( ( pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE601 || pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE_NX)602 && ( pVCpu->pgm.s.enmShadowMode == PGMMODE_PAE603 || pVCpu->pgm.s.enmShadowMode == PGMMODE_PAE_NX))604 {605 unsigned iPdptBase = GCPtrBase >> X86_PDPT_SHIFT;606 unsigned iPdptLast = (GCPtrBase + cb - 1) >> X86_PDPT_SHIFT;607 if (iPdptBase != iPdptLast)608 {609 LogRel(("PGMR3MappingsFix: Crosses PD boundary; iPdptBase=%#x iPdptLast=%#x (GCPtrBase=%RGv cb=%#zx). The guest should retry.\n",610 iPdptBase, iPdptLast, GCPtrBase, cb));611 return VERR_PGM_MAPPINGS_FIX_CONFLICT;612 }613 }614 615 /*616 * Loop the mappings and check that they all agree on their new locations.617 */618 RTGCPTR GCPtrCur = GCPtrBase;619 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;620 while (pCur)621 {622 if (!pCur->pfnRelocate(pVM, pCur->GCPtr, GCPtrCur, PGMRELOCATECALL_SUGGEST, pCur->pvUser))623 {624 AssertMsgFailed(("The suggested fixed address %#x was rejected by '%s'!\n", GCPtrCur, pCur->pszDesc));625 return VERR_PGM_MAPPINGS_FIX_REJECTED;626 }627 /* next */628 GCPtrCur += pCur->cb;629 pCur = pCur->pNextR3;630 }631 if (GCPtrCur > GCPtrBase + cb)632 {633 AssertMsgFailed(("cb (%#x) is less than the required range %#x!\n", cb, GCPtrCur - GCPtrBase));634 return VERR_PGM_MAPPINGS_FIX_TOO_SMALL;635 }636 637 /*638 * Loop the table assigning the mappings to the passed in memory639 * and call their relocator callback.640 */641 GCPtrCur = GCPtrBase;642 pCur = pVM->pgm.s.pMappingsR3;643 while (pCur)644 {645 RTGCPTR const GCPtrOld = pCur->GCPtr;646 647 /*648 * Relocate the page table(s).649 */650 if (pCur->GCPtr != NIL_RTGCPTR)651 pgmR3MapClearPDEs(pVM, pCur, GCPtrOld >> X86_PD_SHIFT);652 pgmR3MapSetPDEs(pVM, pCur, GCPtrCur >> X86_PD_SHIFT);653 654 /*655 * Update the entry.656 */657 pCur->GCPtr = GCPtrCur;658 pCur->GCPtrLast = GCPtrCur + pCur->cb - 1;659 660 /*661 * Callback to execute the relocation.662 */663 pCur->pfnRelocate(pVM, GCPtrOld, GCPtrCur, PGMRELOCATECALL_RELOCATE, pCur->pvUser);664 665 /*666 * Advance.667 */668 GCPtrCur += pCur->cb;669 pCur = pCur->pNextR3;670 }671 672 /*673 * Mark the mappings as fixed at this new location and return.674 */675 pVM->pgm.s.fMappingsFixed = true;676 pVM->pgm.s.fMappingsFixedRestored = false;677 pVM->pgm.s.GCPtrMappingFixed = GCPtrBase;678 pVM->pgm.s.cbMappingFixed = cb;679 680 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)681 {682 pVM->aCpus[idCpu].pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;683 VMCPU_FF_SET(pVM->apCpusR3[idCpu], VMCPU_FF_PGM_SYNC_CR3);684 }685 return VINF_SUCCESS;686 }687 #endif /* !PGM_WITHOUT_MAPPINGS */688 62 689 63 … … 700 74 VMMR3DECL(int) PGMR3MappingsUnfix(PVM pVM) 701 75 { 702 Log(("PGMR3MappingsUnfix: fMappingsFixed=%RTbool MappingsEnabled=%RTbool\n", pVM->pgm.s.fMappingsFixed, pgmMapAreMappingsEnabled(pVM))); 703 if ( pgmMapAreMappingsEnabled(pVM) 704 && ( pVM->pgm.s.fMappingsFixed 705 || pVM->pgm.s.fMappingsFixedRestored) 706 ) 707 { 708 bool const fResyncCR3 = pVM->pgm.s.fMappingsFixed; 709 710 pVM->pgm.s.fMappingsFixed = false; 711 pVM->pgm.s.fMappingsFixedRestored = false; 712 pVM->pgm.s.GCPtrMappingFixed = 0; 713 pVM->pgm.s.cbMappingFixed = 0; 714 715 if (fResyncCR3) 716 for (VMCPUID i = 0; i < pVM->cCpus; i++) 717 VMCPU_FF_SET(pVM->apCpusR3[i], VMCPU_FF_PGM_SYNC_CR3); 718 } 76 Log(("PGMR3MappingsUnfix:\n")); 77 RT_NOREF(pVM); 719 78 return VINF_SUCCESS; 720 79 } 721 80 722 #ifndef PGM_WITHOUT_MAPPINGS723 724 /**725 * Checks if the mappings needs re-fixing after a restore.726 *727 * @returns true if they need, false if not.728 * @param pVM The cross context VM structure.729 */730 VMMR3DECL(bool) PGMR3MappingsNeedReFixing(PVM pVM)731 {732 VM_ASSERT_VALID_EXT_RETURN(pVM, false);733 return pVM->pgm.s.fMappingsFixedRestored;734 }735 736 737 /**738 * Map pages into the intermediate context (switcher code).739 *740 * These pages are mapped at both the give virtual address and at the physical741 * address (for identity mapping).742 *743 * @returns VBox status code.744 * @param pVM The cross context VM structure.745 * @param Addr Intermediate context address of the mapping.746 * @param HCPhys Start of the range of physical pages. This must be entriely below 4GB!747 * @param cbPages Number of bytes to map.748 *749 * @remark This API shall not be used to anything but mapping the switcher code.750 */751 VMMR3DECL(int) PGMR3MapIntermediate(PVM pVM, RTUINTPTR Addr, RTHCPHYS HCPhys, unsigned cbPages)752 {753 LogFlow(("PGMR3MapIntermediate: Addr=%RTptr HCPhys=%RHp cbPages=%#x\n", Addr, HCPhys, cbPages));754 755 /*756 * Adjust input.757 */758 cbPages += (uint32_t)HCPhys & PAGE_OFFSET_MASK;759 cbPages = RT_ALIGN(cbPages, PAGE_SIZE);760 HCPhys &= X86_PTE_PAE_PG_MASK;761 Addr &= PAGE_BASE_MASK;762 /* We only care about the first 4GB, because on AMD64 we'll be repeating them all over the address space. */763 uint32_t uAddress = (uint32_t)Addr;764 765 /*766 * Assert input and state.767 */768 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));769 AssertMsg(pVM->pgm.s.pInterPD, ("Bad init order, paging.\n"));770 AssertMsg(cbPages <= (512 << PAGE_SHIFT), ("The mapping is too big %d bytes\n", cbPages));771 AssertMsg(HCPhys < _4G && HCPhys + cbPages < _4G, ("Addr=%RTptr HCPhys=%RHp cbPages=%d\n", Addr, HCPhys, cbPages));772 AssertReturn(!pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER);773 774 /*775 * Check for internal conflicts between the virtual address and the physical address.776 * A 1:1 mapping is fine, but partial overlapping is a no-no.777 */778 if ( uAddress != HCPhys779 && ( uAddress < HCPhys780 ? HCPhys - uAddress < cbPages781 : uAddress - HCPhys < cbPages782 )783 )784 AssertLogRelMsgFailedReturn(("Addr=%RTptr HCPhys=%RHp cbPages=%d\n", Addr, HCPhys, cbPages),785 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);786 787 const unsigned cPages = cbPages >> PAGE_SHIFT;788 int rc = pgmR3MapIntermediateCheckOne(pVM, uAddress, cPages, pVM->pgm.s.apInterPTs[0], pVM->pgm.s.apInterPaePTs[0]);789 if (RT_FAILURE(rc))790 return rc;791 rc = pgmR3MapIntermediateCheckOne(pVM, (uintptr_t)HCPhys, cPages, pVM->pgm.s.apInterPTs[1], pVM->pgm.s.apInterPaePTs[1]);792 if (RT_FAILURE(rc))793 return rc;794 795 /*796 * Everythings fine, do the mapping.797 */798 pgmR3MapIntermediateDoOne(pVM, uAddress, HCPhys, cPages, pVM->pgm.s.apInterPTs[0], pVM->pgm.s.apInterPaePTs[0]);799 pgmR3MapIntermediateDoOne(pVM, (uintptr_t)HCPhys, HCPhys, cPages, pVM->pgm.s.apInterPTs[1], pVM->pgm.s.apInterPaePTs[1]);800 801 return VINF_SUCCESS;802 }803 804 805 /**806 * Validates that there are no conflicts for this mapping into the intermediate context.807 *808 * @returns VBox status code.809 * @param pVM The cross context VM structure.810 * @param uAddress Address of the mapping.811 * @param cPages Number of pages.812 * @param pPTDefault Pointer to the default page table for this mapping.813 * @param pPTPaeDefault Pointer to the default page table for this mapping.814 */815 static int pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault)816 {817 AssertMsg((uAddress >> X86_PD_SHIFT) + cPages <= 1024, ("64-bit fixme uAddress=%RGv cPages=%u\n", uAddress, cPages));818 819 /*820 * Check that the ranges are available.821 * (This code doesn't have to be fast.)822 */823 while (cPages > 0)824 {825 /*826 * 32-Bit.827 */828 unsigned iPDE = (uAddress >> X86_PD_SHIFT) & X86_PD_MASK;829 unsigned iPTE = (uAddress >> X86_PT_SHIFT) & X86_PT_MASK;830 PX86PT pPT = pPTDefault;831 if (pVM->pgm.s.pInterPD->a[iPDE].u)832 {833 RTHCPHYS HCPhysPT = pVM->pgm.s.pInterPD->a[iPDE].u & X86_PDE_PG_MASK;834 if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[0]))835 pPT = pVM->pgm.s.apInterPTs[0];836 else if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[1]))837 pPT = pVM->pgm.s.apInterPTs[1];838 else839 {840 /** @todo this must be handled with a relocation of the conflicting mapping!841 * Which of course cannot be done because we're in the middle of the initialization. bad design! */842 AssertLogRelMsgFailedReturn(("Conflict between core code and PGMR3Mapping(). uAddress=%RHv\n", uAddress),843 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);844 }845 }846 if (pPT->a[iPTE].u)847 AssertLogRelMsgFailedReturn(("Conflict iPTE=%#x iPDE=%#x uAddress=%RHv pPT->a[iPTE].u=%RX32\n", iPTE, iPDE, uAddress, pPT->a[iPTE].u),848 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);849 850 /*851 * PAE.852 */853 const unsigned iPDPE= (uAddress >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;854 iPDE = (uAddress >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;855 iPTE = (uAddress >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK;856 Assert(iPDPE < 4);857 Assert(pVM->pgm.s.apInterPaePDs[iPDPE]);858 PX86PTPAE pPTPae = pPTPaeDefault;859 if (pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u)860 {861 RTHCPHYS HCPhysPT = pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u & X86_PDE_PAE_PG_MASK;862 if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]))863 pPTPae = pVM->pgm.s.apInterPaePTs[0];864 else if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]))865 pPTPae = pVM->pgm.s.apInterPaePTs[1];866 else867 {868 /** @todo this must be handled with a relocation of the conflicting mapping!869 * Which of course cannot be done because we're in the middle of the initialization. bad design! */870 AssertLogRelMsgFailedReturn(("Conflict between core code and PGMR3Mapping(). uAddress=%RHv\n", uAddress),871 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);872 }873 }874 if (pPTPae->a[iPTE].u)875 AssertLogRelMsgFailedReturn(("Conflict iPTE=%#x iPDE=%#x uAddress=%RHv pPTPae->a[iPTE].u=%#RX64\n", iPTE, iPDE, uAddress, pPTPae->a[iPTE].u),876 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);877 878 /* next */879 uAddress += PAGE_SIZE;880 cPages--;881 }882 883 return VINF_SUCCESS;884 }885 886 887 888 /**889 * Sets up the intermediate page tables for a verified mapping.890 *891 * @param pVM The cross context VM structure.892 * @param uAddress Address of the mapping.893 * @param HCPhys The physical address of the page range.894 * @param cPages Number of pages.895 * @param pPTDefault Pointer to the default page table for this mapping.896 * @param pPTPaeDefault Pointer to the default page table for this mapping.897 */898 static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault)899 {900 while (cPages > 0)901 {902 /*903 * 32-Bit.904 */905 unsigned iPDE = (uAddress >> X86_PD_SHIFT) & X86_PD_MASK;906 unsigned iPTE = (uAddress >> X86_PT_SHIFT) & X86_PT_MASK;907 PX86PT pPT;908 if (pVM->pgm.s.pInterPD->a[iPDE].u)909 pPT = (PX86PT)MMPagePhys2Page(pVM, pVM->pgm.s.pInterPD->a[iPDE].u & X86_PDE_PG_MASK);910 else911 {912 pVM->pgm.s.pInterPD->a[iPDE].u = X86_PDE_P | X86_PDE_A | X86_PDE_RW913 | (uint32_t)MMPage2Phys(pVM, pPTDefault);914 pPT = pPTDefault;915 }916 pPT->a[iPTE].u = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D | (uint32_t)HCPhys;917 918 /*919 * PAE920 */921 const unsigned iPDPE= (uAddress >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;922 iPDE = (uAddress >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;923 iPTE = (uAddress >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK;924 Assert(iPDPE < 4);925 Assert(pVM->pgm.s.apInterPaePDs[iPDPE]);926 PX86PTPAE pPTPae;927 if (pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u)928 pPTPae = (PX86PTPAE)MMPagePhys2Page(pVM, pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u & X86_PDE_PAE_PG_MASK);929 else930 {931 pPTPae = pPTPaeDefault;932 pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u = X86_PDE_P | X86_PDE_A | X86_PDE_RW933 | MMPage2Phys(pVM, pPTPaeDefault);934 }935 pPTPae->a[iPTE].u = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D | HCPhys;936 937 /* next */938 cPages--;939 HCPhys += PAGE_SIZE;940 uAddress += PAGE_SIZE;941 }942 }943 944 945 /**946 * Clears all PDEs involved with the mapping in the shadow and intermediate page tables.947 *948 * @param pVM The cross context VM structure.949 * @param pMap Pointer to the mapping in question.950 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.951 */952 static void pgmR3MapClearPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE)953 {954 unsigned i = pMap->cPTs;955 PVMCPU pVCpu = VMMGetCpu(pVM);956 PGM_LOCK_VOID(pVM); /* to avoid assertions */957 958 pgmMapClearShadowPDEs(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), pMap, iOldPDE, false /*fDeactivateCR3*/);959 960 iOldPDE += i;961 while (i-- > 0)962 {963 iOldPDE--;964 965 /*966 * 32-bit.967 */968 pVM->pgm.s.pInterPD->a[iOldPDE].u = 0;969 970 /*971 * PAE.972 */973 const unsigned iPD = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */974 unsigned iPDE = iOldPDE * 2 % 512;975 pVM->pgm.s.apInterPaePDs[iPD]->a[iPDE].u = 0;976 iPDE++;977 AssertFatal(iPDE < 512);978 pVM->pgm.s.apInterPaePDs[iPD]->a[iPDE].u = 0;979 }980 981 PGM_UNLOCK(pVM);982 }983 984 985 /**986 * Sets all PDEs involved with the mapping in the shadow and intermediate page tables.987 *988 * @param pVM The cross context VM structure.989 * @param pMap Pointer to the mapping in question.990 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.991 */992 static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)993 {994 PPGM pPGM = &pVM->pgm.s;995 # ifdef VBOX_STRICT996 PVMCPU pVCpu = VMMGetCpu(pVM);997 # endif998 PGM_LOCK_VOID(pVM); /* to avoid assertions */999 1000 Assert(!pgmMapAreMappingsEnabled(pVM) || PGMGetGuestMode(pVCpu) <= PGMMODE_PAE_NX);1001 1002 pgmMapSetShadowPDEs(pVM, pMap, iNewPDE);1003 1004 /*1005 * Init the page tables and insert them into the page directories.1006 */1007 unsigned i = pMap->cPTs;1008 iNewPDE += i;1009 while (i-- > 0)1010 {1011 iNewPDE--;1012 1013 /*1014 * 32-bit.1015 */1016 X86PDE Pde;1017 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */1018 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;1019 pPGM->pInterPD->a[iNewPDE] = Pde;1020 1021 /*1022 * PAE.1023 */1024 const unsigned iPD = iNewPDE / 256;1025 unsigned iPDE = iNewPDE * 2 % 512;1026 X86PDEPAE PdePae0;1027 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;1028 pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae0;1029 iPDE++;1030 AssertFatal(iPDE < 512);1031 X86PDEPAE PdePae1;1032 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;1033 pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae1;1034 }1035 1036 PGM_UNLOCK(pVM);1037 }1038 1039 1040 /**1041 * Relocates a mapping to a new address.1042 *1043 * @param pVM The cross context VM structure.1044 * @param pMapping The mapping to relocate.1045 * @param GCPtrOldMapping The address of the start of the old mapping.1046 * NIL_RTGCPTR if not currently mapped.1047 * @param GCPtrNewMapping The address of the start of the new mapping.1048 */1049 static void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping, RTGCPTR GCPtrNewMapping)1050 {1051 Log(("PGM: Relocating %s from %RGv to %RGv\n", pMapping->pszDesc, GCPtrOldMapping, GCPtrNewMapping));1052 AssertMsg(GCPtrOldMapping == pMapping->GCPtr, ("%RGv vs %RGv\n", GCPtrOldMapping, pMapping->GCPtr));1053 AssertMsg((GCPtrOldMapping >> X86_PD_SHIFT) < X86_PG_ENTRIES, ("%RGv\n", GCPtrOldMapping));1054 AssertMsg((GCPtrNewMapping >> X86_PD_SHIFT) < X86_PG_ENTRIES, ("%RGv\n", GCPtrOldMapping));1055 1056 /*1057 * Relocate the page table(s).1058 */1059 if (GCPtrOldMapping != NIL_RTGCPTR)1060 pgmR3MapClearPDEs(pVM, pMapping, GCPtrOldMapping >> X86_PD_SHIFT);1061 pgmR3MapSetPDEs(pVM, pMapping, GCPtrNewMapping >> X86_PD_SHIFT);1062 1063 /*1064 * Update and resort the mapping list.1065 */1066 1067 /* Find previous mapping for pMapping, put result into pPrevMap. */1068 PPGMMAPPING pPrevMap = NULL;1069 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;1070 while (pCur && pCur != pMapping)1071 {1072 /* next */1073 pPrevMap = pCur;1074 pCur = pCur->pNextR3;1075 }1076 Assert(pCur);1077 1078 /* Find mapping which >= than pMapping. */1079 RTGCPTR GCPtrNew = GCPtrNewMapping;1080 PPGMMAPPING pPrev = NULL;1081 pCur = pVM->pgm.s.pMappingsR3;1082 while (pCur && pCur->GCPtr < GCPtrNew)1083 {1084 /* next */1085 pPrev = pCur;1086 pCur = pCur->pNextR3;1087 }1088 1089 if (pCur != pMapping && pPrev != pMapping)1090 {1091 /*1092 * Unlink.1093 */1094 if (pPrevMap)1095 {1096 pPrevMap->pNextR3 = pMapping->pNextR3;1097 pPrevMap->pNextRC = pMapping->pNextRC;1098 pPrevMap->pNextR0 = pMapping->pNextR0;1099 }1100 else1101 {1102 pVM->pgm.s.pMappingsR3 = pMapping->pNextR3;1103 pVM->pgm.s.pMappingsRC = pMapping->pNextRC;1104 pVM->pgm.s.pMappingsR0 = pMapping->pNextR0;1105 }1106 1107 /*1108 * Link1109 */1110 pMapping->pNextR3 = pCur;1111 if (pPrev)1112 {1113 pMapping->pNextRC = pPrev->pNextRC;1114 pMapping->pNextR0 = pPrev->pNextR0;1115 pPrev->pNextR3 = pMapping;1116 pPrev->pNextRC = MMHyperR3ToRC(pVM, pMapping);1117 pPrev->pNextR0 = MMHyperR3ToR0(pVM, pMapping);1118 }1119 else1120 {1121 pMapping->pNextRC = pVM->pgm.s.pMappingsRC;1122 pMapping->pNextR0 = pVM->pgm.s.pMappingsR0;1123 pVM->pgm.s.pMappingsR3 = pMapping;1124 pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pMapping);1125 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pMapping);1126 }1127 }1128 1129 /*1130 * Update the entry.1131 */1132 pMapping->GCPtr = GCPtrNew;1133 pMapping->GCPtrLast = GCPtrNew + pMapping->cb - 1;1134 1135 /*1136 * Callback to execute the relocation.1137 */1138 pMapping->pfnRelocate(pVM, GCPtrOldMapping, GCPtrNewMapping, PGMRELOCATECALL_RELOCATE, pMapping->pvUser);1139 }1140 1141 1142 /**1143 * Checks if a new mapping address wasn't previously used and caused a clash with guest mappings.1144 *1145 * @returns VBox status code.1146 * @param pMapping The mapping which conflicts.1147 * @param GCPtr New mapping address to try1148 */1149 bool pgmR3MapIsKnownConflictAddress(PPGMMAPPING pMapping, RTGCPTR GCPtr)1150 {1151 for (unsigned i = 0; i < RT_ELEMENTS(pMapping->aGCPtrConflicts); i++)1152 {1153 if (GCPtr == pMapping->aGCPtrConflicts[i])1154 return true;1155 }1156 return false;1157 }1158 1159 1160 /**1161 * Resolves a conflict between a page table based GC mapping and1162 * the Guest OS page tables. (32 bits version)1163 *1164 * @returns VBox status code.1165 * @param pVM The cross context VM structure.1166 * @param pMapping The mapping which conflicts.1167 * @param pPDSrc The page directory of the guest OS.1168 * @param GCPtrOldMapping The address of the start of the current mapping.1169 */1170 int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping)1171 {1172 STAM_REL_COUNTER_INC(&pVM->pgm.s.cRelocations);1173 STAM_PROFILE_START(&pVM->pgm.s.Stats.StatR3ResolveConflict, a);1174 1175 /* Raw mode only which implies one VCPU. */1176 Assert(pVM->cCpus == 1);1177 1178 pMapping->aGCPtrConflicts[pMapping->cConflicts & (PGMMAPPING_CONFLICT_MAX-1)] = GCPtrOldMapping;1179 pMapping->cConflicts++;1180 1181 /*1182 * Scan for free page directory entries.1183 *1184 * Note that we do not support mappings at the very end of the1185 * address space since that will break our GCPtrEnd assumptions.1186 */1187 const unsigned cPTs = pMapping->cPTs;1188 unsigned iPDNew = RT_ELEMENTS(pPDSrc->a) - cPTs; /* (+ 1 - 1) */1189 while (iPDNew-- > 0)1190 {1191 if (pPDSrc->a[iPDNew].n.u1Present)1192 continue;1193 1194 if (pgmR3MapIsKnownConflictAddress(pMapping, iPDNew << X86_PD_SHIFT))1195 continue;1196 1197 if (cPTs > 1)1198 {1199 bool fOk = true;1200 for (unsigned i = 1; fOk && i < cPTs; i++)1201 if (pPDSrc->a[iPDNew + i].n.u1Present)1202 fOk = false;1203 if (!fOk)1204 continue;1205 }1206 1207 /*1208 * Check that it's not conflicting with an intermediate page table mapping.1209 */1210 bool fOk = true;1211 unsigned i = cPTs;1212 while (fOk && i-- > 0)1213 fOk = !pVM->pgm.s.pInterPD->a[iPDNew + i].n.u1Present;1214 if (!fOk)1215 continue;1216 /** @todo AMD64 should check the PAE directories and skip the 32bit stuff. */1217 1218 /*1219 * Ask for the mapping.1220 */1221 RTGCPTR GCPtrNewMapping = (RTGCPTR32)iPDNew << X86_PD_SHIFT;1222 1223 if (pMapping->pfnRelocate(pVM, GCPtrOldMapping, GCPtrNewMapping, PGMRELOCATECALL_SUGGEST, pMapping->pvUser))1224 {1225 pgmR3MapRelocate(pVM, pMapping, GCPtrOldMapping, GCPtrNewMapping);1226 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatR3ResolveConflict, a);1227 return VINF_SUCCESS;1228 }1229 }1230 1231 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatR3ResolveConflict, a);1232 # ifdef DEBUG_bird1233 /*1234 * Ended up here frequently recently with an NT4.0 VM (using SMP kernel).1235 *1236 * The problem is when enabling large pages (i.e. updating CR4) using the1237 * _Ki386EnableCurrentLargePage@8 assembly routine (address 0x801c97ad-9).1238 * The routine loads a sparsely popuplated page tables with identiy mappings1239 * of its own code, most entries are whatever ExAllocatePool returned, which1240 * is documented as undefined but all 0xffffffff in this case. Once loaded,1241 * it jumps to the physical code address, disables paging, set CR4.PSE=1,1242 * re-enables paging, restore the original page table and returns successfully.1243 *1244 * Theory: if CSAM/PATM patches the pushf;cli;mov eax,cr3; sequence, at the1245 * start of that function we're apparently in trouble, if CSAM/PATM doesn't1246 * we're switching back to REM and doing disabling of paging there instead.1247 *1248 * Normal PD: CR3=00030000; Problematic identity mapped PD: CR3=0x5fa000.1249 */1250 DBGFSTOP(pVM);1251 # endif1252 AssertMsgFailed(("Failed to relocate page table mapping '%s' from %#x! (cPTs=%d)\n", pMapping->pszDesc, GCPtrOldMapping, cPTs));1253 return VERR_PGM_NO_HYPERVISOR_ADDRESS;1254 }1255 1256 1257 /**1258 * Resolves a conflict between a page table based GC mapping and1259 * the Guest OS page tables. (PAE bits version)1260 *1261 * @returns VBox status code.1262 * @param pVM The cross context VM structure.1263 * @param pMapping The mapping which conflicts.1264 * @param GCPtrOldMapping The address of the start of the current mapping.1265 */1266 int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping)1267 {1268 STAM_REL_COUNTER_INC(&pVM->pgm.s.cRelocations);1269 STAM_PROFILE_START(&pVM->pgm.s.StatR3ResolveConflict, a);1270 1271 /* Raw mode only which implies one VCPU. */1272 Assert(pVM->cCpus == 1);1273 PVMCPU pVCpu = VMMGetCpu(pVM);1274 1275 pMapping->aGCPtrConflicts[pMapping->cConflicts & (PGMMAPPING_CONFLICT_MAX-1)] = GCPtrOldMapping;1276 pMapping->cConflicts++;1277 1278 for (int iPDPTE = X86_PG_PAE_PDPE_ENTRIES - 1; iPDPTE >= 0; iPDPTE--)1279 {1280 unsigned iPDSrc;1281 PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(pVCpu, (RTGCPTR32)iPDPTE << X86_PDPT_SHIFT, &iPDSrc, NULL);1282 1283 /*1284 * Scan for free page directory entries.1285 *1286 * Note that we do not support mappings at the very end of the1287 * address space since that will break our GCPtrEnd assumptions.1288 * Nor do we support mappings crossing page directories.1289 */1290 const unsigned cPTs = pMapping->cb >> X86_PD_PAE_SHIFT;1291 unsigned iPDNew = RT_ELEMENTS(pPDSrc->a) - cPTs; /* (+ 1 - 1) */1292 1293 while (iPDNew-- > 0)1294 {1295 /* Ugly assumption that mappings start on a 4 MB boundary. */1296 if (iPDNew & 1)1297 continue;1298 1299 if (pgmR3MapIsKnownConflictAddress(pMapping, ((RTGCPTR32)iPDPTE << X86_PDPT_SHIFT) + (iPDNew << X86_PD_PAE_SHIFT)))1300 continue;1301 1302 if (pPDSrc)1303 {1304 if (pPDSrc->a[iPDNew].n.u1Present)1305 continue;1306 if (cPTs > 1)1307 {1308 bool fOk = true;1309 for (unsigned i = 1; fOk && i < cPTs; i++)1310 if (pPDSrc->a[iPDNew + i].n.u1Present)1311 fOk = false;1312 if (!fOk)1313 continue;1314 }1315 }1316 /*1317 * Check that it's not conflicting with an intermediate page table mapping.1318 */1319 bool fOk = true;1320 unsigned i = cPTs;1321 while (fOk && i-- > 0)1322 fOk = !pVM->pgm.s.apInterPaePDs[iPDPTE]->a[iPDNew + i].n.u1Present;1323 if (!fOk)1324 continue;1325 1326 /*1327 * Ask for the mapping.1328 */1329 RTGCPTR GCPtrNewMapping = ((RTGCPTR32)iPDPTE << X86_PDPT_SHIFT) + ((RTGCPTR32)iPDNew << X86_PD_PAE_SHIFT);1330 1331 if (pMapping->pfnRelocate(pVM, GCPtrOldMapping, GCPtrNewMapping, PGMRELOCATECALL_SUGGEST, pMapping->pvUser))1332 {1333 pgmR3MapRelocate(pVM, pMapping, GCPtrOldMapping, GCPtrNewMapping);1334 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatR3ResolveConflict, a);1335 return VINF_SUCCESS;1336 }1337 }1338 }1339 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatR3ResolveConflict, a);1340 AssertMsgFailed(("Failed to relocate page table mapping '%s' from %#x! (cPTs=%d)\n", pMapping->pszDesc, GCPtrOldMapping, pMapping->cb >> X86_PD_PAE_SHIFT));1341 return VERR_PGM_NO_HYPERVISOR_ADDRESS;1342 }1343 1344 1345 /**1346 * Read memory from the guest mappings.1347 *1348 * This will use the page tables associated with the mappings to1349 * read the memory. This means that not all kind of memory is readable1350 * since we don't necessarily know how to convert that physical address1351 * to a HC virtual one.1352 *1353 * @returns VBox status code.1354 * @param pVM The cross context VM structure.1355 * @param pvDst The destination address (HC of course).1356 * @param GCPtrSrc The source address (GC virtual address).1357 * @param cb Number of bytes to read.1358 *1359 * @remarks The is indirectly for DBGF only.1360 * @todo Consider renaming it to indicate it's special usage, or just1361 * reimplement it in MMR3HyperReadGCVirt.1362 */1363 VMMR3DECL(int) PGMR3MapRead(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)1364 {1365 /*1366 * Simplicity over speed... Chop the request up into chunks1367 * which don't cross pages.1368 */1369 if (cb + (GCPtrSrc & PAGE_OFFSET_MASK) > PAGE_SIZE)1370 {1371 for (;;)1372 {1373 size_t cbRead = RT_MIN(cb, PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK));1374 int rc = PGMR3MapRead(pVM, pvDst, GCPtrSrc, cbRead);1375 if (RT_FAILURE(rc))1376 return rc;1377 cb -= cbRead;1378 if (!cb)1379 break;1380 pvDst = (char *)pvDst + cbRead;1381 GCPtrSrc += cbRead;1382 }1383 return VINF_SUCCESS;1384 }1385 1386 /*1387 * Find the mapping.1388 */1389 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);1390 while (pCur)1391 {1392 RTGCPTR off = GCPtrSrc - pCur->GCPtr;1393 if (off < pCur->cb)1394 {1395 if (off + cb > pCur->cb)1396 {1397 AssertMsgFailed(("Invalid page range %RGv LB%#x. mapping '%s' %RGv to %RGv\n",1398 GCPtrSrc, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast));1399 return VERR_INVALID_PARAMETER;1400 }1401 1402 unsigned iPT = off >> X86_PD_SHIFT;1403 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;1404 while (cb > 0 && iPTE < RT_ELEMENTS(CTXALLSUFF(pCur->aPTs[iPT].pPT)->a))1405 {1406 PCPGMSHWPTEPAE pPte = &pCur->aPTs[iPT].CTXALLSUFF(paPaePTs)[iPTE / 512].a[iPTE % 512];1407 if (!PGMSHWPTEPAE_IS_P(*pPte))1408 return VERR_PAGE_NOT_PRESENT;1409 RTHCPHYS HCPhys = PGMSHWPTEPAE_GET_HCPHYS(*pPte);1410 1411 /*1412 * Get the virtual page from the physical one.1413 */1414 void *pvPage;1415 int rc = MMR3HCPhys2HCVirt(pVM, HCPhys, &pvPage);1416 if (RT_FAILURE(rc))1417 return rc;1418 1419 memcpy(pvDst, (char *)pvPage + (GCPtrSrc & PAGE_OFFSET_MASK), cb);1420 return VINF_SUCCESS;1421 }1422 }1423 1424 /* next */1425 pCur = CTXALLSUFF(pCur->pNext);1426 }1427 1428 return VERR_INVALID_POINTER;1429 }1430 1431 1432 /**1433 * Info callback for 'pgmhandlers'.1434 *1435 * @param pVM The cross context VM structure.1436 * @param pHlp The output helpers.1437 * @param pszArgs The arguments. phys or virt.1438 */1439 DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)1440 {1441 NOREF(pszArgs);1442 if (!pgmMapAreMappingsEnabled(pVM))1443 pHlp->pfnPrintf(pHlp, "\nThe mappings are DISABLED.\n");1444 else if (pVM->pgm.s.fMappingsFixed)1445 pHlp->pfnPrintf(pHlp, "\nThe mappings are FIXED: %RGv-%RGv\n",1446 pVM->pgm.s.GCPtrMappingFixed, pVM->pgm.s.GCPtrMappingFixed + pVM->pgm.s.cbMappingFixed - 1);1447 else if (pVM->pgm.s.fMappingsFixedRestored)1448 pHlp->pfnPrintf(pHlp, "\nThe mappings are FLOATING-RESTORED-FIXED: %RGv-%RGv\n",1449 pVM->pgm.s.GCPtrMappingFixed, pVM->pgm.s.GCPtrMappingFixed + pVM->pgm.s.cbMappingFixed - 1);1450 else1451 pHlp->pfnPrintf(pHlp, "\nThe mappings are FLOATING.\n");1452 1453 PPGMMAPPING pCur;1454 for (pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)1455 {1456 pHlp->pfnPrintf(pHlp, "%RGv - %RGv %s\n", pCur->GCPtr, pCur->GCPtrLast, pCur->pszDesc);1457 if (pCur->cConflicts > 0)1458 {1459 pHlp->pfnPrintf(pHlp, " %u conflict%s: ", pCur->cConflicts, pCur->cConflicts == 1 ? "" : "s");1460 uint32_t cLeft = RT_MIN(pCur->cConflicts, RT_ELEMENTS(pCur->aGCPtrConflicts));1461 uint32_t i = pCur->cConflicts;1462 while (cLeft-- > 0)1463 {1464 i = (i - 1) & (PGMMAPPING_CONFLICT_MAX - 1);1465 pHlp->pfnPrintf(pHlp, cLeft ? "%RGv, " : "%RGv\n", pCur->aGCPtrConflicts[i]);1466 }1467 }1468 }1469 }1470 1471 #endif /* !PGM_WITHOUT_MAPPINGS */1472 -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r91853 r91854 1699 1699 * @param GCPhys The address of the RAM range. 1700 1700 * @param GCPhysLast The last address of the RAM range. 1701 * @param RCPtrNew The RC address if the range is floating. NIL_RTRCPTR1702 * if in HMA.1703 1701 * @param R0PtrNew Ditto for R0. 1704 1702 * @param pszDesc The description. … … 1706 1704 */ 1707 1705 static int pgmR3PhysInitAndLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, 1708 RTR CPTR RCPtrNew, RTR0PTR R0PtrNew, const char *pszDesc, PPGMRAMRANGE pPrev)1706 RTR0PTR R0PtrNew, const char *pszDesc, PPGMRAMRANGE pPrev) 1709 1707 { 1710 1708 /* … … 1719 1717 pNew->paLSPages = NULL; 1720 1718 pNew->fFlags = 0; 1721 #ifndef PGM_WITHOUT_MAPPINGS1722 if (RCPtrNew != NIL_RTRCPTR)1723 pNew->fFlags |= PGM_RAM_RANGE_FLAGS_FLOATING;1724 #else1725 NOREF(RCPtrNew);1726 #endif1727 1719 1728 1720 uint32_t const cPages = pNew->cb >> PAGE_SHIFT; … … 1776 1768 1777 1769 1778 #ifndef PGM_WITHOUT_MAPPINGS1779 /**1780 * @callback_method_impl{FNPGMRELOCATE, Relocate a floating RAM range.}1781 * @sa pgmR3PhysMMIO2ExRangeRelocate1782 */1783 static DECLCALLBACK(bool) pgmR3PhysRamRangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew,1784 PGMRELOCATECALL enmMode, void *pvUser)1785 {1786 PPGMRAMRANGE pRam = (PPGMRAMRANGE)pvUser;1787 Assert(pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING);1788 Assert(pRam->pSelfRC == GCPtrOld + PAGE_SIZE); RT_NOREF_PV(GCPtrOld);1789 1790 switch (enmMode)1791 {1792 case PGMRELOCATECALL_SUGGEST:1793 return true;1794 1795 case PGMRELOCATECALL_RELOCATE:1796 {1797 /*1798 * Update myself, then relink all the ranges and flush the RC TLB.1799 */1800 PGM_LOCK_VOID(pVM);1801 1802 pRam->pSelfRC = (RTRCPTR)(GCPtrNew + PAGE_SIZE);1803 1804 pgmR3PhysRelinkRamRanges(pVM);1805 for (unsigned i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)1806 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;1807 1808 PGM_UNLOCK(pVM);1809 return true;1810 }1811 1812 default:1813 AssertFailedReturn(false);1814 }1815 }1816 #endif /* !PGM_WITHOUT_MAPPINGS */1817 1818 1819 1770 /** 1820 1771 * PGMR3PhysRegisterRam worker that registers a high chunk. … … 1824 1775 * @param GCPhys The address of the RAM. 1825 1776 * @param cRamPages The number of RAM pages to register. 1826 * @param cbChunk The size of the PGMRAMRANGE guest mapping.1827 1777 * @param iChunk The chunk number. 1828 1778 * @param pszDesc The RAM range description. 1829 1779 * @param ppPrev Previous RAM range pointer. In/Out. 1830 1780 */ 1831 static int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRamPages, 1832 uint32_t cbChunk, uint32_t iChunk, const char *pszDesc, 1833 PPGMRAMRANGE *ppPrev) 1781 static int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRamPages, uint32_t iChunk, 1782 const char *pszDesc, PPGMRAMRANGE *ppPrev) 1834 1783 { 1835 1784 const char *pszDescChunk = iChunk == 0 … … 1855 1804 1856 1805 /* 1857 * Create a mapping and map the pages into it. 1858 * We push these in below the HMA. 1806 * Ok, init and link the range. 1859 1807 */ 1860 #ifdef PGM_WITHOUT_MAPPINGS 1861 RTGCPTR const GCPtrChunk = NIL_RTGCPTR; 1862 RT_NOREF(cbChunk); 1863 #else 1864 RTGCPTR const GCPtrChunkMap = pVM->pgm.s.GCPtrPrevRamRangeMapping - cbChunk; 1865 rc = PGMR3MapPT(pVM, GCPtrChunkMap, cbChunk, 0 /*fFlags*/, pgmR3PhysRamRangeRelocate, pNew, pszDescChunk); 1808 rc = pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhys + ((RTGCPHYS)cRamPages << PAGE_SHIFT) - 1, 1809 R0PtrChunk, pszDescChunk, *ppPrev); 1866 1810 if (RT_SUCCESS(rc)) 1867 { 1868 pVM->pgm.s.GCPtrPrevRamRangeMapping = GCPtrChunkMap; 1869 1870 RTGCPTR const GCPtrChunk = GCPtrChunkMap + PAGE_SIZE; 1871 RTGCPTR GCPtrPage = GCPtrChunk; 1872 for (uint32_t iPage = 0; iPage < cChunkPages && RT_SUCCESS(rc); iPage++, GCPtrPage += PAGE_SIZE) 1873 rc = PGMMap(pVM, GCPtrPage, paChunkPages[iPage].Phys, PAGE_SIZE, 0); 1874 if (RT_SUCCESS(rc)) 1875 #endif /* !PGM_WITHOUT_MAPPINGS */ 1876 { 1877 /* 1878 * Ok, init and link the range. 1879 */ 1880 rc = pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhys + ((RTGCPHYS)cRamPages << PAGE_SHIFT) - 1, 1881 (RTRCPTR)GCPtrChunk, R0PtrChunk, pszDescChunk, *ppPrev); 1882 if (RT_SUCCESS(rc)) 1883 *ppPrev = pNew; 1884 } 1885 #ifndef PGM_WITHOUT_MAPPINGS 1886 } 1887 #endif 1811 *ppPrev = pNew; 1888 1812 1889 1813 if (RT_FAILURE(rc)) … … 1985 1909 cPagesInChunk = cPagesPerChunk; 1986 1910 1987 rc = pgmR3PhysRegisterHighRamChunk(pVM, GCPhysChunk, cPagesInChunk, cbChunk,iChunk, pszDesc, &pPrev);1911 rc = pgmR3PhysRegisterHighRamChunk(pVM, GCPhysChunk, cPagesInChunk, iChunk, pszDesc, &pPrev); 1988 1912 AssertRCReturn(rc, rc); 1989 1913 … … 2004 1928 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc cbRamRange=%zu\n", rc, cbRamRange), rc); 2005 1929 2006 rc = pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, NIL_RTRCPTR,MMHyperCCToR0(pVM, pNew), pszDesc, pPrev);1930 rc = pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, MMHyperCCToR0(pVM, pNew), pszDesc, pPrev); 2007 1931 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc cbRamRange=%zu\n", rc, cbRamRange), rc); 2008 1932 } … … 2806 2730 2807 2731 2808 #ifndef PGM_WITHOUT_MAPPINGS2809 /**2810 * @callback_method_impl{FNPGMRELOCATE, Relocate a floating MMIO/MMIO2 range.}2811 * @sa pgmR3PhysRamRangeRelocate2812 */2813 static DECLCALLBACK(bool) pgmR3PhysMmio2RangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew,2814 PGMRELOCATECALL enmMode, void *pvUser)2815 {2816 PPGMREGMMIO2RANGE pMmio = (PPGMREGMMIO2RANGE)pvUser;2817 Assert(pMmio->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING);2818 Assert(pMmio->RamRange.pSelfRC == GCPtrOld + PAGE_SIZE + RT_UOFFSETOF(PGMREGMMIO2RANGE, RamRange)); RT_NOREF_PV(GCPtrOld);2819 2820 switch (enmMode)2821 {2822 case PGMRELOCATECALL_SUGGEST:2823 return true;2824 2825 case PGMRELOCATECALL_RELOCATE:2826 {2827 /*2828 * Update myself, then relink all the ranges and flush the RC TLB.2829 */2830 PGM_LOCK_VOID(pVM);2831 2832 pMmio->RamRange.pSelfRC = (RTRCPTR)(GCPtrNew + PAGE_SIZE + RT_UOFFSETOF(PGMREGMMIO2RANGE, RamRange));2833 2834 pgmR3PhysRelinkRamRanges(pVM);2835 for (unsigned i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)2836 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;2837 2838 PGM_UNLOCK(pVM);2839 return true;2840 }2841 2842 default:2843 AssertFailedReturn(false);2844 }2845 }2846 #endif /* !PGM_WITHOUT_MAPPINGS */2847 2848 2849 2732 /** 2850 2733 * Calculates the number of chunks … … 4024 3907 return VINF_SUCCESS; 4025 3908 } 4026 4027 4028 #ifndef PGM_WITHOUT_MAPPINGS4029 /**4030 * Gets the HC physical address of a page in the MMIO2 region.4031 *4032 * This is API is intended for MMHyper and shouldn't be called4033 * by anyone else...4034 *4035 * @returns VBox status code.4036 * @param pVM The cross context VM structure.4037 * @param pDevIns The owner of the memory, optional.4038 * @param iSubDev Sub-device number.4039 * @param iRegion The region.4040 * @param off The page expressed an offset into the MMIO2 region.4041 * @param pHCPhys Where to store the result.4042 */4043 VMMR3_INT_DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion,4044 RTGCPHYS off, PRTHCPHYS pHCPhys)4045 {4046 /*4047 * Validate input4048 */4049 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);4050 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);4051 AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);4052 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);4053 4054 PGM_LOCK_VOID(pVM);4055 PPGMREGMMIO2RANGE pCurMmio = pgmR3PhysMmio2Find(pVM, pDevIns, iSubDev, iRegion, NIL_PGMMMIO2HANDLE);4056 AssertReturn(pCurMmio, VERR_NOT_FOUND);4057 AssertReturn(pCurMmio->fFlags & (PGMREGMMIO2RANGE_F_MMIO2 | PGMREGMMIO2RANGE_F_FIRST_CHUNK), VERR_WRONG_TYPE);4058 4059 while ( off >= pCurMmio->RamRange.cb4060 && !(pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK))4061 {4062 off -= pCurMmio->RamRange.cb;4063 pCurMmio = pCurMmio->pNextR3;4064 }4065 AssertReturn(off < pCurMmio->RamRange.cb, VERR_INVALID_PARAMETER);4066 4067 PCPGMPAGE pPage = &pCurMmio->RamRange.aPages[off >> PAGE_SHIFT];4068 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);4069 PGM_UNLOCK(pVM);4070 return VINF_SUCCESS;4071 }4072 #endif /* !PGM_WITHOUT_MAPPINGS */4073 3909 4074 3910 -
trunk/src/VBox/VMM/VMMR3/PGMPool.cpp
r90439 r91854 544 544 if ((pShwPD->a[i].u & (X86_PDE_P | X86_PDE_PS)) == (X86_PDE_P | X86_PDE_PS)) 545 545 { 546 # ifndef PGM_WITHOUT_MAPPINGS547 Assert(!(pShwPD->a[i].u & PGM_PDFLAGS_MAPPING));548 # endif549 546 pShwPD->a[i].u = 0; 550 547 Assert(pPage->cPresent); … … 565 562 if ((pShwPD->a[i].u & (EPT_E_READ | EPT_E_LEAF)) == (EPT_E_READ | EPT_E_LEAF)) 566 563 { 567 # ifndef PGM_WITHOUT_MAPPINGS568 Assert(!(pShwPD->a[i].u & PGM_PDFLAGS_MAPPING));569 # endif570 564 pShwPD->a[i].u = 0; 571 565 Assert(pPage->cPresent); -
trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp
r91848 r91854 157 157 static const SSMFIELD s_aPGMFields[] = 158 158 { 159 SSMFIELD_ENTRY ( PGM, fMappingsFixed),160 SSMFIELD_ENTRY_ GCPTR( PGM,GCPtrMappingFixed),161 SSMFIELD_ENTRY ( PGM, cbMappingFixed),159 SSMFIELD_ENTRY_OLD( fMappingsFixed, sizeof(bool)), 160 SSMFIELD_ENTRY_OLD_GCPTR( GCPtrMappingFixed), 161 SSMFIELD_ENTRY_OLD( cbMappingFixed, sizeof(uint32_t)), 162 162 SSMFIELD_ENTRY( PGM, cBalloonedPages), 163 163 SSMFIELD_ENTRY_TERM() … … 166 166 static const SSMFIELD s_aPGMFieldsPreBalloon[] = 167 167 { 168 SSMFIELD_ENTRY ( PGM, fMappingsFixed),169 SSMFIELD_ENTRY_ GCPTR( PGM,GCPtrMappingFixed),170 SSMFIELD_ENTRY ( PGM, cbMappingFixed),168 SSMFIELD_ENTRY_OLD( fMappingsFixed, sizeof(bool)), 169 SSMFIELD_ENTRY_OLD_GCPTR( GCPtrMappingFixed), 170 SSMFIELD_ENTRY_OLD( cbMappingFixed, sizeof(uint32_t)), 171 171 SSMFIELD_ENTRY_TERM() 172 172 }; … … 2050 2050 * Save basic data (required / unaffected by relocation). 2051 2051 */ 2052 bool const fMappingsFixed = pVM->pgm.s.fMappingsFixed;2053 pVM->pgm.s.fMappingsFixed |= pVM->pgm.s.fMappingsFixedRestored;2054 2052 SSMR3PutStruct(pSSM, pPGM, &s_aPGMFields[0]); 2055 pVM->pgm.s.fMappingsFixed = fMappingsFixed;2056 2053 2057 2054 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) … … 2998 2995 AssertLogRelRCReturn(rc, rc); 2999 2996 3000 pPGM->fMappingsFixed = pgmOld.fMappingsFixed;3001 pPGM->GCPtrMappingFixed = pgmOld.GCPtrMappingFixed;3002 pPGM->cbMappingFixed = pgmOld.cbMappingFixed;3003 3004 2997 PVMCPU pVCpu0 = pVM->apCpusR3[0]; 3005 2998 pVCpu0->pgm.s.fA20Enabled = pgmOld.fA20Enabled; … … 3011 3004 AssertRelease(pVM->cCpus == 1); 3012 3005 3013 SSMR3GetBool(pSSM, &pPGM->fMappingsFixed); 3014 SSMR3GetGCPtr(pSSM, &pPGM->GCPtrMappingFixed); 3015 SSMR3GetU32(pSSM, &pPGM->cbMappingFixed); 3006 SSMR3Skip(pSSM, sizeof(bool)); 3007 RTGCPTR GCPtrIgn; 3008 SSMR3GetGCPtr(pSSM, &GCPtrIgn); 3009 SSMR3Skip(pSSM, sizeof(uint32_t)); 3016 3010 3017 3011 uint32_t cbRamSizeIgnored; … … 3214 3208 PGMNotifyNxeChanged(pVCpu, CPUMIsGuestNXEnabled(pVCpu)); 3215 3209 } 3216 3217 /*3218 * Try re-fixate the guest mappings.3219 */3220 pVM->pgm.s.fMappingsFixedRestored = false;3221 if ( pVM->pgm.s.fMappingsFixed3222 && pgmMapAreMappingsEnabled(pVM))3223 {3224 #ifndef PGM_WITHOUT_MAPPINGS3225 RTGCPTR GCPtrFixed = pVM->pgm.s.GCPtrMappingFixed;3226 uint32_t cbFixed = pVM->pgm.s.cbMappingFixed;3227 pVM->pgm.s.fMappingsFixed = false;3228 3229 uint32_t cbRequired;3230 int rc2 = PGMR3MappingsSize(pVM, &cbRequired); AssertRC(rc2);3231 if ( RT_SUCCESS(rc2)3232 && cbRequired > cbFixed)3233 rc2 = VERR_OUT_OF_RANGE;3234 if (RT_SUCCESS(rc2))3235 rc2 = pgmR3MappingsFixInternal(pVM, GCPtrFixed, cbFixed);3236 if (RT_FAILURE(rc2))3237 {3238 LogRel(("PGM: Unable to re-fixate the guest mappings at %RGv-%RGv: rc=%Rrc (cbRequired=%#x)\n",3239 GCPtrFixed, GCPtrFixed + cbFixed, rc2, cbRequired));3240 pVM->pgm.s.fMappingsFixed = false;3241 pVM->pgm.s.fMappingsFixedRestored = true;3242 pVM->pgm.s.GCPtrMappingFixed = GCPtrFixed;3243 pVM->pgm.s.cbMappingFixed = cbFixed;3244 }3245 #else3246 AssertFailed();3247 #endif3248 }3249 else3250 {3251 /* We used to set fixed + disabled while we only use disabled now,3252 so wipe the state to avoid any confusion. */3253 pVM->pgm.s.fMappingsFixed = false;3254 pVM->pgm.s.GCPtrMappingFixed = NIL_RTGCPTR;3255 pVM->pgm.s.cbMappingFixed = 0;3256 }3257 3258 /*3259 * If we have floating mappings, do a CR3 sync now to make sure the HMA3260 * doesn't conflict with guest code / data and thereby cause trouble3261 * when restoring other components like PATM.3262 */3263 if (pgmMapAreMappingsFloating(pVM))3264 {3265 PVMCPU pVCpu = pVM->apCpusR3[0];3266 rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), true);3267 if (RT_FAILURE(rc))3268 return SSMR3SetLoadError(pSSM, VERR_WRONG_ORDER, RT_SRC_POS,3269 N_("PGMSyncCR3 failed unexpectedly with rc=%Rrc"), rc);3270 3271 /* Make sure to re-sync before executing code. */3272 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);3273 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);3274 }3275 3210 } 3276 3211 } -
trunk/src/VBox/VMM/VMMR3/TM.cpp
r90347 r91854 260 260 rc = SUPR3GipGetPhys(&HCPhysGIP); 261 261 AssertMsgRCReturn(rc, ("Failed to get GIP physical address!\n"), rc); 262 263 #ifndef PGM_WITHOUT_MAPPINGS264 RTGCPTR GCPtr;265 # ifdef SUP_WITH_LOTS_OF_CPUS266 rc = MMR3HyperMapHCPhys(pVM, pVM->tm.s.pvGIPR3, NIL_RTR0PTR, HCPhysGIP, (size_t)pGip->cPages * PAGE_SIZE,267 "GIP", &GCPtr);268 # else269 rc = MMR3HyperMapHCPhys(pVM, pVM->tm.s.pvGIPR3, NIL_RTR0PTR, HCPhysGIP, PAGE_SIZE, "GIP", &GCPtr);270 # endif271 if (RT_FAILURE(rc))272 {273 AssertMsgFailed(("Failed to map GIP into GC, rc=%Rrc!\n", rc));274 return rc;275 }276 pVM->tm.s.pvGIPRC = GCPtr;277 LogFlow(("TMR3Init: HCPhysGIP=%RHp at %RRv\n", HCPhysGIP, pVM->tm.s.pvGIPRC));278 MMR3HyperReserveFence(pVM);279 #endif280 281 262 282 263 /* Check assumptions made in TMAllVirtual.cpp about the GIP update interval. */ -
trunk/src/VBox/VMM/VMMR3/VM.cpp
r91807 r91854 632 632 if (RT_SUCCESS(rc)) 633 633 { 634 #ifndef PGM_WITHOUT_MAPPINGS 635 rc = PGMR3FinalizeMappings(pVM); 634 LogFlow(("Ring-3 init succeeded\n")); 635 636 /* 637 * Init the Ring-0 components. 638 */ 639 rc = vmR3InitRing0(pVM); 636 640 if (RT_SUCCESS(rc)) 641 { 642 /* Relocate again, because some switcher fixups depends on R0 init results. */ 643 VMR3Relocate(pVM, 0 /* offDelta */); 644 645 #ifdef VBOX_WITH_DEBUGGER 646 /* 647 * Init the tcp debugger console if we're building 648 * with debugger support. 649 */ 650 void *pvUser = NULL; 651 rc = DBGCIoCreate(pUVM, &pvUser); 652 if ( RT_SUCCESS(rc) 653 || rc == VERR_NET_ADDRESS_IN_USE) 654 { 655 pUVM->vm.s.pvDBGC = pvUser; 637 656 #endif 638 { 639 640 LogFlow(("Ring-3 init succeeded\n")); 641 642 /* 643 * Init the Ring-0 components. 644 */ 645 rc = vmR3InitRing0(pVM); 646 if (RT_SUCCESS(rc)) 647 { 648 /* Relocate again, because some switcher fixups depends on R0 init results. */ 649 VMR3Relocate(pVM, 0 /* offDelta */); 650 657 /* 658 * Now we can safely set the VM halt method to default. 659 */ 660 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT); 661 if (RT_SUCCESS(rc)) 662 { 663 /* 664 * Set the state and we're done. 665 */ 666 vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING); 667 return VINF_SUCCESS; 668 } 651 669 #ifdef VBOX_WITH_DEBUGGER 652 /* 653 * Init the tcp debugger console if we're building 654 * with debugger support. 655 */ 656 void *pvUser = NULL; 657 rc = DBGCIoCreate(pUVM, &pvUser); 658 if ( RT_SUCCESS(rc) 659 || rc == VERR_NET_ADDRESS_IN_USE) 660 { 661 pUVM->vm.s.pvDBGC = pvUser; 670 DBGCIoTerminate(pUVM, pUVM->vm.s.pvDBGC); 671 pUVM->vm.s.pvDBGC = NULL; 672 } 662 673 #endif 663 /* 664 * Now we can safely set the VM halt method to default. 665 */ 666 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT); 667 if (RT_SUCCESS(rc)) 668 { 669 /* 670 * Set the state and we're done. 671 */ 672 vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING); 673 return VINF_SUCCESS; 674 } 675 #ifdef VBOX_WITH_DEBUGGER 676 DBGCIoTerminate(pUVM, pUVM->vm.s.pvDBGC); 677 pUVM->vm.s.pvDBGC = NULL; 678 } 679 #endif 680 //.. 681 } 674 //.. 682 675 } 683 676 vmR3Destroy(pVM); … … 913 906 if (RT_SUCCESS(rc)) 914 907 { 915 rc = PGMR3InitDynMap(pVM); 916 if (RT_SUCCESS(rc)) 917 rc = MMR3HyperInitFinalize(pVM); 908 rc = MMR3HyperInitFinalize(pVM); 918 909 if (RT_SUCCESS(rc)) 919 910 rc = PGMR3InitFinalize(pVM); -
trunk/src/VBox/VMM/include/PGMInline.h
r91848 r91854 1076 1076 1077 1077 1078 /**1079 * Tells if mappings are to be put into the shadow page table or not.1080 *1081 * @returns boolean result1082 * @param pVM The cross context VM structure.1083 */1084 DECL_FORCE_INLINE(bool) pgmMapAreMappingsEnabled(PVMCC pVM)1085 {1086 #ifdef PGM_WITHOUT_MAPPINGS1087 /* Only raw-mode has mappings. */1088 Assert(!VM_IS_RAW_MODE_ENABLED(pVM)); NOREF(pVM);1089 return false;1090 #else1091 Assert(pVM->cCpus == 1 || !VM_IS_RAW_MODE_ENABLED(pVM));1092 return VM_IS_RAW_MODE_ENABLED(pVM);1093 #endif1094 }1095 1096 1097 /**1098 * Checks if the mappings are floating and enabled.1099 *1100 * @returns true / false.1101 * @param pVM The cross context VM structure.1102 */1103 DECL_FORCE_INLINE(bool) pgmMapAreMappingsFloating(PVMCC pVM)1104 {1105 #ifdef PGM_WITHOUT_MAPPINGS1106 /* Only raw-mode has mappings. */1107 Assert(!VM_IS_RAW_MODE_ENABLED(pVM)); NOREF(pVM);1108 return false;1109 #else1110 return !pVM->pgm.s.fMappingsFixed1111 && pgmMapAreMappingsEnabled(pVM);1112 #endif1113 }1114 1115 1078 /** @} */ 1116 1079 -
trunk/src/VBox/VMM/include/PGMInternal.h
r91848 r91854 59 59 60 60 /** 61 * Indicates that there are no guest mappings in the shadow tables.62 *63 * Note! In ring-3 the macro is also used to exclude the managment of the64 * intermediate context page tables. On 32-bit systems we use the intermediate65 * context to support 64-bit guest execution. Thus, we cannot fully make it66 * without mappings there even when VBOX_WITH_RAW_MODE is not defined.67 *68 * In raw-mode context there are by design always guest mappings (the code is69 * executed from one), while in ring-0 there are none at all. Neither context70 * manages the page tables for intermediate switcher context, that's all done in71 * ring-3.72 *73 * Update 6.1: It is always defined now, in pgm.h74 */75 #if defined(IN_RING0) \76 || ( !defined(VBOX_WITH_RAW_MODE) \77 && ( HC_ARCH_BITS != 32 \78 || !defined(VBOX_WITH_64_BITS_GUESTS) \79 ) \80 )81 # undef PGM_WITHOUT_MAPPINGS82 # define PGM_WITHOUT_MAPPINGS83 #endif84 85 /**86 61 * Check and skip global PDEs for non-global flushes 87 62 */ … … 177 152 * Never free such an entry. */ 178 153 #define PGM_PLXFLAGS_PERMANENT RT_BIT_64(10) 179 #ifndef PGM_WITHOUT_MAPPINGS180 /** Mapping (hypervisor allocated pagetable). */181 # define PGM_PLXFLAGS_MAPPING RT_BIT_64(11)182 # define PGM_PML4_FLAGS RT_BIT_64(11)183 #endif184 154 /** PGM specific bits in PML4 entries. */ 185 #define PGM_PML4_FLAGS 0155 #define PGM_PML4_FLAGS 0 186 156 /** PGM specific bits in PDPT entries. */ 187 #ifndef PGM_WITHOUT_MAPPINGS 188 # define PGM_PDPT_FLAGS (PGM_PLXFLAGS_PERMANENT | PGM_PLXFLAGS_MAPPING) 189 #else 190 # define PGM_PDPT_FLAGS (PGM_PLXFLAGS_PERMANENT) 191 #endif 157 #define PGM_PDPT_FLAGS (PGM_PLXFLAGS_PERMANENT) 192 158 /** @} */ 193 159 … … 199 165 * @remarks This is currently only used for statistics and can be recycled. */ 200 166 #define PGM_PDFLAGS_BIG_PAGE RT_BIT_64(9) 201 #ifndef PGM_WITHOUT_MAPPINGS202 /** Mapping (hypervisor allocated pagetable). */203 # define PGM_PDFLAGS_MAPPING RT_BIT_64(10)204 #endif205 167 /** Made read-only to facilitate dirty bit tracking. */ 206 168 #define PGM_PDFLAGS_TRACK_DIRTY RT_BIT_64(11) … … 511 473 typedef PGMSHWPTPAE const *PCPGMSHWPTPAE; 512 474 /** @} */ 513 514 #ifndef PGM_WITHOUT_MAPPINGS515 516 /** Size of the GCPtrConflict array in PGMMAPPING.517 * @remarks Must be a power of two. */518 # define PGMMAPPING_CONFLICT_MAX 8519 520 /**521 * Structure for tracking GC Mappings.522 *523 * This structure is used by linked list in both GC and HC.524 */525 typedef struct PGMMAPPING526 {527 /** Pointer to next entry. */528 R3PTRTYPE(struct PGMMAPPING *) pNextR3;529 /** Pointer to next entry. */530 R0PTRTYPE(struct PGMMAPPING *) pNextR0;531 /** Indicate whether this entry is finalized. */532 bool fFinalized;533 bool afPadding[7];534 /** Start Virtual address. */535 RTGCPTR GCPtr;536 /** Last Virtual address (inclusive). */537 RTGCPTR GCPtrLast;538 /** Range size (bytes). */539 RTGCPTR cb;540 /** Pointer to relocation callback function. */541 R3PTRTYPE(PFNPGMRELOCATE) pfnRelocate;542 /** User argument to the callback. */543 R3PTRTYPE(void *) pvUser;544 /** Mapping description / name. For easing debugging. */545 R3PTRTYPE(const char *) pszDesc;546 /** Last 8 addresses that caused conflicts. */547 RTGCPTR aGCPtrConflicts[PGMMAPPING_CONFLICT_MAX];548 /** Number of conflicts for this hypervisor mapping. */549 uint32_t cConflicts;550 /** Number of page tables. */551 uint32_t cPTs;552 553 /** Array of page table mapping data. Each entry554 * describes one page table. The array can be longer555 * than the declared length.556 */557 struct558 {559 /** The HC physical address of the page table. */560 RTHCPHYS HCPhysPT;561 /** The HC physical address of the first PAE page table. */562 RTHCPHYS HCPhysPaePT0;563 /** The HC physical address of the second PAE page table. */564 RTHCPHYS HCPhysPaePT1;565 /** The HC virtual address of the 32-bit page table. */566 R3PTRTYPE(PX86PT) pPTR3;567 /** The HC virtual address of the two PAE page table. (i.e 1024 entries instead of 512) */568 R3PTRTYPE(PPGMSHWPTPAE) paPaePTsR3;569 /** The R0 virtual address of the 32-bit page table. */570 R0PTRTYPE(PX86PT) pPTR0;571 /** The R0 virtual address of the two PAE page table. */572 R0PTRTYPE(PPGMSHWPTPAE) paPaePTsR0;573 } aPTs[1];574 } PGMMAPPING;575 /** Pointer to structure for tracking GC Mappings. */576 typedef struct PGMMAPPING *PPGMMAPPING;577 578 #endif /* !PGM_WITHOUT_MAPPINGS */579 475 580 476 … … 2991 2887 /** The host paging mode. (This is what SUPLib reports.) */ 2992 2888 SUPPAGINGMODE enmHostMode; 2993 bool fAlignment3b; 2994 2995 /** Indicates that PGMR3FinalizeMappings has been called and that further 2996 * PGMR3MapIntermediate calls will be rejected. */ 2997 bool fFinalizedMappings; 2998 /** If set no conflict checks are required. */ 2999 bool fMappingsFixed; 3000 /** If set if restored as fixed but we were unable to re-fixate at the old 3001 * location because of room or address incompatibilities. */ 3002 bool fMappingsFixedRestored; 3003 /** Size of fixed mapping. 3004 * This is valid if either fMappingsFixed or fMappingsFixedRestored is set. */ 3005 uint32_t cbMappingFixed; 2889 bool afAlignment3b[2]; 2890 3006 2891 /** Generation ID for the RAM ranges. This member is incremented everytime 3007 2892 * a RAM range is linked or unlinked. */ 3008 2893 uint32_t volatile idRamRangesGen; 3009 3010 /** Base address (GC) of fixed mapping.3011 * This is valid if either fMappingsFixed or fMappingsFixedRestored is set. */3012 RTGCPTR GCPtrMappingFixed;3013 #ifndef PGM_WITHOUT_MAPPINGS3014 /** The address of the previous RAM range mapping. */3015 RTGCPTR GCPtrPrevRamRangeMapping;3016 #else3017 RTGCPTR Unused0;3018 #endif3019 2894 3020 2895 /** Physical access handler type for ROM protection. */ … … 3043 2918 /** Shadow Page Pool - R3 Ptr. */ 3044 2919 R3PTRTYPE(PPGMPOOL) pPoolR3; 3045 #ifndef PGM_WITHOUT_MAPPINGS3046 /** Linked list of GC mappings - for HC.3047 * The list is sorted ascending on address. */3048 R3PTRTYPE(PPGMMAPPING) pMappingsR3;3049 #endif3050 2920 /** Pointer to the list of ROM ranges - for R3. 3051 2921 * This is sorted by physical address and contains no overlapping ranges. */ … … 3069 2939 /** Shadow Page Pool - R0 Ptr. */ 3070 2940 R0PTRTYPE(PPGMPOOL) pPoolR0; 3071 #ifndef PGM_WITHOUT_MAPPINGS3072 /** Linked list of GC mappings - for R0.3073 * The list is sorted ascending on address. */3074 R0PTRTYPE(PPGMMAPPING) pMappingsR0;3075 RTR0PTR R0PtrAlignment0;3076 #endif3077 2941 /** R0 pointer corresponding to PGM::pRomRangesR3. */ 3078 2942 R0PTRTYPE(PPGMROMRANGE) pRomRangesR0; … … 3080 2944 R0PTRTYPE(PPGMREGMMIO2RANGE) apMmio2RangesR0[PGM_MMIO2_MAX_RANGES]; 3081 2945 3082 #ifndef PGM_WITHOUT_MAPPINGS3083 /** Pointer to the 5 page CR3 content mapping.3084 * The first page is always the CR3 (in some form) while the 4 other pages3085 * are used for the PDs in PAE mode. */3086 RTGCPTR GCPtrCR3Mapping;3087 3088 /** @name Intermediate Context3089 * @{ */3090 /** Pointer to the intermediate page directory - Normal. */3091 R3PTRTYPE(PX86PD) pInterPD;3092 /** Pointer to the intermediate page tables - Normal.3093 * There are two page tables, one for the identity mapping and one for3094 * the host context mapping (of the core code). */3095 R3PTRTYPE(PX86PT) apInterPTs[2];3096 /** Pointer to the intermediate page tables - PAE. */3097 R3PTRTYPE(PX86PTPAE) apInterPaePTs[2];3098 /** Pointer to the intermediate page directory - PAE. */3099 R3PTRTYPE(PX86PDPAE) apInterPaePDs[4];3100 /** Pointer to the intermediate page directory - PAE. */3101 R3PTRTYPE(PX86PDPT) pInterPaePDPT;3102 /** Pointer to the intermediate page-map level 4 - AMD64. */3103 R3PTRTYPE(PX86PML4) pInterPaePML4;3104 /** Pointer to the intermediate page directory - AMD64. */3105 R3PTRTYPE(PX86PDPT) pInterPaePDPT64;3106 /** The Physical Address (HC) of the intermediate Page Directory - Normal. */3107 RTHCPHYS HCPhysInterPD;3108 /** The Physical Address (HC) of the intermediate Page Directory Pointer Table - PAE. */3109 RTHCPHYS HCPhysInterPaePDPT;3110 /** The Physical Address (HC) of the intermediate Page Map Level 4 table - AMD64. */3111 RTHCPHYS HCPhysInterPaePML4;3112 /** @} */3113 #endif3114 3115 #ifndef PGM_WITHOUT_MAPPINGS3116 /** Base address of the dynamic page mapping area.3117 * The array is MM_HYPER_DYNAMIC_SIZE bytes big.3118 *3119 * @todo The plan of keeping PGMRCDYNMAP private to PGMRZDynMap.cpp didn't3120 * work out. Some cleaning up of the initialization that would3121 * remove this memory is yet to be done...3122 */3123 RCPTRTYPE(uint8_t *) pbDynPageMapBaseGC;3124 /** The address of the raw-mode context mapping cache. */3125 RCPTRTYPE(PPGMRCDYNMAP) pRCDynMap;3126 /** The address of the ring-0 mapping cache if we're making use of it. */3127 RTR0PTR pvR0DynMapUsed;3128 #endif3129 3130 2946 /** Hack: Number of deprecated page mapping locks taken by the current lock 3131 2947 * owner via pgmPhysGCPhys2CCPtrInternalDepr. */ 3132 2948 uint32_t cDeprecatedPageLocks; 3133 2949 /** Alignment padding. */ 3134 uint32_t au32Alignment2[1+2]; 3135 2950 uint32_t au32Alignment2[1]; 3136 2951 3137 2952 /** PGM critical section. … … 3316 3131 } PGM; 3317 3132 #ifndef IN_TSTVMSTRUCTGC /* HACK */ 3318 # ifndef PGM_WITHOUT_MAPPINGS3319 AssertCompileMemberAlignment(PGM, paDynPageMap32BitPTEsGC, 8);3320 # endif3321 AssertCompileMemberAlignment(PGM, GCPtrMappingFixed, sizeof(RTGCPTR));3322 # ifndef PGM_WITHOUT_MAPPINGS3323 AssertCompileMemberAlignment(PGM, HCPhysInterPD, 8);3324 # endif3325 3133 AssertCompileMemberAlignment(PGM, CritSectX, 8); 3326 3134 AssertCompileMemberAlignment(PGM, ChunkR3Map, 16); … … 3359 3167 STAMPROFILE StatRZTrap0eTime2InvalidPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is access to an invalid physical guest address. */ 3360 3168 STAMPROFILE StatRZTrap0eTime2MakeWritable; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a page that needed to be made writable. */ 3361 STAMPROFILE StatRZTrap0eTime2Mapping; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is the guest mappings. */3362 3169 STAMPROFILE StatRZTrap0eTime2Misc; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is not known. */ 3363 3170 STAMPROFILE StatRZTrap0eTime2OutOfSync; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync page. */ … … 3369 3176 STAMPROFILE StatRZTrap0eTime2Wp0RoUsUnhack; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CR0.WP and netware hack to be disabled. */ 3370 3177 STAMCOUNTER StatRZTrap0eConflicts; /**< RC/R0: The number of times \#PF was caused by an undetected conflict. */ 3371 STAMCOUNTER StatRZTrap0eHandlersMapping; /**< RC/R0: Number of traps due to access handlers in mappings. */3372 3178 STAMCOUNTER StatRZTrap0eHandlersOutOfSync; /**< RC/R0: Number of out-of-sync handled pages. */ 3373 3179 STAMCOUNTER StatRZTrap0eHandlersPhysAll; /**< RC/R0: Number of traps due to physical all-access handlers. */ … … 3388 3194 STAMCOUNTER StatRZTrap0eSNXE; /**< RC/R0: \#PF err kind */ 3389 3195 STAMCOUNTER StatRZTrap0eGuestPF; /**< RC/R0: Real guest \#PFs. */ 3390 STAMCOUNTER StatRZTrap0eGuestPFMapping; /**< RC/R0: Real guest \#PF to HMA or other mapping. */3391 3196 STAMCOUNTER StatRZTrap0eWPEmulInRZ; /**< RC/R0: WP=0 virtualization trap, handled. */ 3392 3197 STAMCOUNTER StatRZTrap0eWPEmulToR3; /**< RC/R0: WP=0 virtualization trap, chickened out. */ … … 3457 3262 STAMCOUNTER StatRZInvalidatePage4MBPages; /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4MB page. */ 3458 3263 STAMCOUNTER StatRZInvalidatePage4MBPagesSkip; /**< RC/R0: The number of times PGMInvalidatePage() skipped a 4MB page. */ 3459 STAMCOUNTER StatRZInvalidatePagePDMappings; /**< RC/R0: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */3460 3264 STAMCOUNTER StatRZInvalidatePagePDNAs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not accessed page directory. */ 3461 3265 STAMCOUNTER StatRZInvalidatePagePDNPs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not present page directory. */ … … 3507 3311 STAMCOUNTER StatR3InvalidatePagePDNAs; /**< R3: The number of times PGMInvalidatePage() was called for a not accessed page directory. */ 3508 3312 STAMCOUNTER StatR3InvalidatePagePDNPs; /**< R3: The number of times PGMInvalidatePage() was called for a not present page directory. */ 3509 STAMCOUNTER StatR3InvalidatePagePDMappings; /**< R3: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */3510 3313 STAMCOUNTER StatR3InvalidatePagePDOutOfSync; /**< R3: The number of times PGMInvalidatePage() was called for an out of sync page directory. */ 3511 3314 STAMCOUNTER StatR3InvalidatePageSizeChanges ; /**< R3: The number of times PGMInvalidatePage() was called on a page size change (4KB <-> 2/4MB). */ … … 3682 3485 /** Always sync CR3. */ 3683 3486 #define PGM_SYNC_ALWAYS RT_BIT(1) 3684 /** Check monitoring on next CR3 (re)load and invalidate page.3685 * @todo This is obsolete now. Remove after 2.2.0 is branched off. */3686 #define PGM_SYNC_MONITOR_CR3 RT_BIT(2)3687 3487 /** Check guest mapping in SyncCR3. */ 3688 3488 #define PGM_SYNC_MAP_CR3 RT_BIT(3) … … 3748 3548 */ 3749 3549 #define PGM_LOCK_ASSERT_OWNER_EX(a_pVM, a_pVCpu) Assert(PDMCritSectIsOwnerEx((a_pVCpu), &(a_pVM)->pgm.s.CritSectX)) 3750 3751 #ifndef PGM_WITHOUT_MAPPINGS3752 int pgmR3MappingsFixInternal(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb);3753 int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping);3754 int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping);3755 int pgmMapResolveConflicts(PVM pVM);3756 PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr);3757 DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);3758 #endif /* !PGM_WITHOUT_MAPPINGS */3759 3550 3760 3551 int pgmHandlerPhysicalExCreate(PVMCC pVM, PGMPHYSHANDLERTYPE hType, RTR3PTR pvUserR3, RTR0PTR pvUserR0, … … 3858 3649 void pgmR3RefreshShadowModeAfterA20Change(PVMCPU pVCpu); 3859 3650 3860 #ifndef PGM_WITHOUT_MAPPINGS3861 void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);3862 void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3);3863 int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);3864 int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);3865 #endif3866 3867 3651 int pgmShwMakePageSupervisorAndWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags); 3868 3652 int pgmShwSyncPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
Note:
See TracChangeset
for help on using the changeset viewer.

