VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 76553

Last change on this file since 76553 was 76553, checked in by vboxsync, 5 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 88.2 KB
RevLine 
[23]1/* $Id: PGMAllHandler.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
[1]2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
[76553]7 * Copyright (C) 2006-2019 Oracle Corporation
[1]8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
[5999]12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
[1]16 */
17
18
[57358]19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
[1]22#define LOG_GROUP LOG_GROUP_PGM
[35346]23#include <VBox/vmm/dbgf.h>
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/mm.h>
27#include <VBox/vmm/em.h>
[70954]28#include <VBox/vmm/nem.h>
[35346]29#include <VBox/vmm/stam.h>
[40274]30#ifdef VBOX_WITH_REM
31# include <VBox/vmm/rem.h>
32#endif
[35346]33#include <VBox/vmm/dbgf.h>
[35333]34#include "PGMInternal.h"
[35346]35#include <VBox/vmm/vm.h>
[35333]36#include "PGMInline.h"
[1]37
38#include <VBox/log.h>
39#include <iprt/assert.h>
[29250]40#include <iprt/asm-amd64-x86.h>
[1]41#include <iprt/string.h>
42#include <VBox/param.h>
43#include <VBox/err.h>
[35346]44#include <VBox/vmm/selm.h>
[1]45
46
[57358]47/*********************************************************************************************************************************
48* Internal Functions *
49*********************************************************************************************************************************/
[6861]50static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam);
[70954]51static void pgmHandlerPhysicalDeregisterNotifyREMAndNEM(PVM pVM, PPGMPHYSHANDLER pCur, int fRestoreRAM);
[1]52static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur);
53
54
[55493]55/**
56 * Internal worker for releasing a physical handler type registration reference.
57 *
58 * @returns New reference count. UINT32_MAX if invalid input (asserted).
[58122]59 * @param pVM The cross context VM structure.
[55493]60 * @param pType Pointer to the type registration.
61 */
62DECLINLINE(uint32_t) pgmHandlerPhysicalTypeRelease(PVM pVM, PPGMPHYSHANDLERTYPEINT pType)
63{
64 AssertMsgReturn(pType->u32Magic == PGMPHYSHANDLERTYPEINT_MAGIC, ("%#x\n", pType->u32Magic), UINT32_MAX);
65 uint32_t cRefs = ASMAtomicDecU32(&pType->cRefs);
66 if (cRefs == 0)
67 {
68 pgmLock(pVM);
69 pType->u32Magic = PGMPHYSHANDLERTYPEINT_MAGIC_DEAD;
70 RTListOff32NodeRemove(&pType->ListNode);
71 pgmUnlock(pVM);
72 MMHyperFree(pVM, pType);
73 }
74 return cRefs;
75}
[1]76
[55493]77
[1]78/**
[55493]79 * Internal worker for retaining a physical handler type registration reference.
80 *
81 * @returns New reference count. UINT32_MAX if invalid input (asserted).
[58122]82 * @param pVM The cross context VM structure.
[55493]83 * @param pType Pointer to the type registration.
84 */
85DECLINLINE(uint32_t) pgmHandlerPhysicalTypeRetain(PVM pVM, PPGMPHYSHANDLERTYPEINT pType)
86{
[57851]87 NOREF(pVM);
[55493]88 AssertMsgReturn(pType->u32Magic == PGMPHYSHANDLERTYPEINT_MAGIC, ("%#x\n", pType->u32Magic), UINT32_MAX);
89 uint32_t cRefs = ASMAtomicIncU32(&pType->cRefs);
90 Assert(cRefs < _1M && cRefs > 0);
91 return cRefs;
92}
93
94
95/**
96 * Releases a reference to a physical handler type registration.
97 *
98 * @returns New reference count. UINT32_MAX if invalid input (asserted).
[58122]99 * @param pVM The cross context VM structure.
[55493]100 * @param hType The type regiration handle.
101 */
102VMMDECL(uint32_t) PGMHandlerPhysicalTypeRelease(PVM pVM, PGMPHYSHANDLERTYPE hType)
103{
104 if (hType != NIL_PGMPHYSHANDLERTYPE)
105 return pgmHandlerPhysicalTypeRelease(pVM, PGMPHYSHANDLERTYPEINT_FROM_HANDLE(pVM, hType));
106 return 0;
107}
108
109
110/**
111 * Retains a reference to a physical handler type registration.
112 *
113 * @returns New reference count. UINT32_MAX if invalid input (asserted).
[58122]114 * @param pVM The cross context VM structure.
[55493]115 * @param hType The type regiration handle.
116 */
117VMMDECL(uint32_t) PGMHandlerPhysicalTypeRetain(PVM pVM, PGMPHYSHANDLERTYPE hType)
118{
119 return pgmHandlerPhysicalTypeRetain(pVM, PGMPHYSHANDLERTYPEINT_FROM_HANDLE(pVM, hType));
120}
121
122
123/**
[64327]124 * Creates a physical access handler.
[1]125 *
126 * @returns VBox status code.
127 * @retval VINF_SUCCESS when successfully installed.
128 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
129 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
130 * flagged together with a pool clearing.
131 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
132 * one. A debug assertion is raised.
133 *
[58122]134 * @param pVM The cross context VM structure.
[55493]135 * @param hType The handler type registration handle.
[1]136 * @param pvUserR3 User argument to the R3 handler.
137 * @param pvUserR0 User argument to the R0 handler.
[13042]138 * @param pvUserRC User argument to the RC handler. This can be a value
139 * less that 0x10000 or a (non-null) pointer that is
[33540]140 * automatically relocated.
[55493]141 * @param pszDesc Description of this handler. If NULL, the type
142 * description will be used instead.
[64115]143 * @param ppPhysHandler Where to return the access handler structure on
144 * success.
[1]145 */
[64115]146int pgmHandlerPhysicalExCreate(PVM pVM, PGMPHYSHANDLERTYPE hType, RTR3PTR pvUserR3, RTR0PTR pvUserR0, RTRCPTR pvUserRC,
147 R3PTRTYPE(const char *) pszDesc, PPGMPHYSHANDLER *ppPhysHandler)
[1]148{
[55493]149 PPGMPHYSHANDLERTYPEINT pType = PGMPHYSHANDLERTYPEINT_FROM_HANDLE(pVM, hType);
[64115]150 Log(("pgmHandlerPhysicalExCreate: pvUserR3=%RHv pvUserR0=%RHv pvUserGC=%RRv hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
151 pvUserR3, pvUserR0, pvUserRC, hType, pType->enmKind, R3STRING(pType->pszDesc), pszDesc, R3STRING(pszDesc)));
[1]152
153 /*
154 * Validate input.
155 */
[64115]156 AssertPtr(ppPhysHandler);
[55493]157 AssertReturn(pType->u32Magic == PGMPHYSHANDLERTYPEINT_MAGIC, VERR_INVALID_HANDLE);
[64115]158 AssertMsgReturn( (RTRCUINTPTR)pvUserRC < 0x10000
159 || MMHyperR3ToRC(pVM, MMHyperRCToR3(pVM, pvUserRC)) == pvUserRC,
160 ("Not RC pointer! pvUserRC=%RRv\n", pvUserRC),
161 VERR_INVALID_PARAMETER);
162 AssertMsgReturn( (RTR0UINTPTR)pvUserR0 < 0x10000
163 || MMHyperR3ToR0(pVM, MMHyperR0ToR3(pVM, pvUserR0)) == pvUserR0,
164 ("Not R0 pointer! pvUserR0=%RHv\n", pvUserR0),
165 VERR_INVALID_PARAMETER);
166
167 /*
168 * Allocate and initialize the new entry.
169 */
170 PPGMPHYSHANDLER pNew;
171 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
172 if (RT_SUCCESS(rc))
173 {
174 pNew->Core.Key = NIL_RTGCPHYS;
175 pNew->Core.KeyLast = NIL_RTGCPHYS;
176 pNew->cPages = 0;
177 pNew->cAliasedPages = 0;
178 pNew->cTmpOffPages = 0;
179 pNew->pvUserR3 = pvUserR3;
180 pNew->pvUserR0 = pvUserR0;
181 pNew->pvUserRC = pvUserRC;
182 pNew->hType = hType;
183 pNew->pszDesc = pszDesc != NIL_RTR3PTR ? pszDesc : pType->pszDesc;
184 pgmHandlerPhysicalTypeRetain(pVM, pType);
185 *ppPhysHandler = pNew;
186 return VINF_SUCCESS;
187 }
188
189 return rc;
190}
191
192
193/**
[64327]194 * Duplicates a physical access handler.
195 *
196 * @returns VBox status code.
197 * @retval VINF_SUCCESS when successfully installed.
198 *
199 * @param pVM The cross context VM structure.
200 * @param pPhysHandlerSrc The source handler to duplicate
201 * @param ppPhysHandler Where to return the access handler structure on
202 * success.
203 */
204int pgmHandlerPhysicalExDup(PVM pVM, PPGMPHYSHANDLER pPhysHandlerSrc, PPGMPHYSHANDLER *ppPhysHandler)
205{
206 return pgmHandlerPhysicalExCreate(pVM,
207 pPhysHandlerSrc->hType,
208 pPhysHandlerSrc->pvUserR3,
209 pPhysHandlerSrc->pvUserR0,
210 pPhysHandlerSrc->pvUserRC,
211 pPhysHandlerSrc->pszDesc,
212 ppPhysHandler);
213}
214
215
216/**
[64115]217 * Register a access handler for a physical range.
218 *
219 * @returns VBox status code.
220 * @retval VINF_SUCCESS when successfully installed.
221 *
222 * @param pVM The cross context VM structure.
223 * @param pPhysHandler The physical handler.
224 * @param GCPhys Start physical address.
225 * @param GCPhysLast Last physical address. (inclusive)
226 */
227int pgmHandlerPhysicalExRegister(PVM pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
228{
229 /*
230 * Validate input.
231 */
232 AssertPtr(pPhysHandler);
233 PPGMPHYSHANDLERTYPEINT pType = PGMPHYSHANDLERTYPEINT_FROM_HANDLE(pVM, pPhysHandler->hType);
234 Assert(pType->u32Magic == PGMPHYSHANDLERTYPEINT_MAGIC);
235 Log(("pgmHandlerPhysicalExRegister: GCPhys=%RGp GCPhysLast=%RGp hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
236 GCPhys, GCPhysLast, pPhysHandler->hType, pType->enmKind, R3STRING(pType->pszDesc), pPhysHandler->pszDesc, R3STRING(pPhysHandler->pszDesc)));
237 AssertReturn(pPhysHandler->Core.Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
238
[13232]239 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);
[55493]240 switch (pType->enmKind)
[1]241 {
[55493]242 case PGMPHYSHANDLERKIND_WRITE:
[17432]243 break;
[55493]244 case PGMPHYSHANDLERKIND_MMIO:
245 case PGMPHYSHANDLERKIND_ALL:
[55966]246 /* Simplification for PGMPhysRead, PGMR0Trap0eHandlerNPMisconfig and others: Full pages. */
[17432]247 AssertMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
248 AssertMsgReturn((GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);
[1]249 break;
250 default:
[55493]251 AssertMsgFailed(("Invalid input enmKind=%d!\n", pType->enmKind));
[1]252 return VERR_INVALID_PARAMETER;
253 }
254
255 /*
256 * We require the range to be within registered ram.
257 * There is no apparent need to support ranges which cover more than one ram range.
258 */
[36891]259 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
260 if ( !pRam
[64323]261 || GCPhysLast > pRam->GCPhysLast)
[1]262 {
263#ifdef IN_RING3
[44399]264 DBGFR3Info(pVM->pUVM, "phys", NULL, NULL);
[1]265#endif
[13824]266 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
[7753]267 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
[1]268 }
[64323]269 Assert(GCPhys >= pRam->GCPhys && GCPhys < pRam->GCPhysLast);
270 Assert(GCPhysLast <= pRam->GCPhysLast && GCPhysLast >= pRam->GCPhys);
[1]271
272 /*
[64115]273 * Try insert into list.
[1]274 */
[64115]275 pPhysHandler->Core.Key = GCPhys;
276 pPhysHandler->Core.KeyLast = GCPhysLast;
277 pPhysHandler->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
[1]278
279 pgmLock(pVM);
[64115]280 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pPhysHandler->Core))
[1]281 {
[64115]282 int rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam);
[18266]283 if (rc == VINF_PGM_SYNC_CR3)
284 rc = VINF_PGM_GCPHYS_ALIASED;
[70954]285
286#if defined(IN_RING3) || defined(IN_RING0)
287 NEMHCNotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1);
288#endif
[19806]289 pgmUnlock(pVM);
[64115]290
[40274]291#ifdef VBOX_WITH_REM
292# ifndef IN_RING3
[55493]293 REMNotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1, !!pType->pfnHandlerR3);
[40274]294# else
[55493]295 REMR3NotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1, !!pType->pfnHandlerR3);
[40274]296# endif
[1]297#endif
298 if (rc != VINF_SUCCESS)
[13824]299 Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast));
[1]300 return rc;
301 }
302 pgmUnlock(pVM);
303
[64115]304 pPhysHandler->Core.Key = NIL_RTGCPHYS;
305 pPhysHandler->Core.KeyLast = NIL_RTGCPHYS;
306
[1]307#if defined(IN_RING3) && defined(VBOX_STRICT)
[44399]308 DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL);
[1]309#endif
[55493]310 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s/%s\n",
[64115]311 GCPhys, GCPhysLast, R3STRING(pPhysHandler->pszDesc), R3STRING(pType->pszDesc)));
[1]312 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
313}
314
315
316/**
[64115]317 * Register a access handler for a physical range.
318 *
319 * @returns VBox status code.
320 * @retval VINF_SUCCESS when successfully installed.
321 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
322 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
323 * flagged together with a pool clearing.
324 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
325 * one. A debug assertion is raised.
326 *
327 * @param pVM The cross context VM structure.
328 * @param GCPhys Start physical address.
329 * @param GCPhysLast Last physical address. (inclusive)
330 * @param hType The handler type registration handle.
331 * @param pvUserR3 User argument to the R3 handler.
332 * @param pvUserR0 User argument to the R0 handler.
333 * @param pvUserRC User argument to the RC handler. This can be a value
334 * less that 0x10000 or a (non-null) pointer that is
335 * automatically relocated.
336 * @param pszDesc Description of this handler. If NULL, the type
337 * description will be used instead.
338 */
339VMMDECL(int) PGMHandlerPhysicalRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, PGMPHYSHANDLERTYPE hType,
340 RTR3PTR pvUserR3, RTR0PTR pvUserR0, RTRCPTR pvUserRC, R3PTRTYPE(const char *) pszDesc)
341{
[64117]342#ifdef LOG_ENABLED
[64115]343 PPGMPHYSHANDLERTYPEINT pType = PGMPHYSHANDLERTYPEINT_FROM_HANDLE(pVM, hType);
344 Log(("PGMHandlerPhysicalRegister: GCPhys=%RGp GCPhysLast=%RGp pvUserR3=%RHv pvUserR0=%RHv pvUserGC=%RRv hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
345 GCPhys, GCPhysLast, pvUserR3, pvUserR0, pvUserRC, hType, pType->enmKind, R3STRING(pType->pszDesc), pszDesc, R3STRING(pszDesc)));
[64117]346#endif
[64115]347
348 PPGMPHYSHANDLER pNew;
349 int rc = pgmHandlerPhysicalExCreate(pVM, hType, pvUserR3, pvUserR0, pvUserRC, pszDesc, &pNew);
350 if (RT_SUCCESS(rc))
351 {
352 rc = pgmHandlerPhysicalExRegister(pVM, pNew, GCPhys, GCPhysLast);
353 if (RT_SUCCESS(rc))
354 return rc;
355 pgmHandlerPhysicalExDestroy(pVM, pNew);
356 }
357 return rc;
358}
359
360
361/**
[1]362 * Sets ram range flags and attempts updating shadow PTs.
363 *
364 * @returns VBox status code.
365 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
[18266]366 * @retval VINF_PGM_SYNC_CR3 when the shadow PTs could be updated because
367 * the guest page aliased or/and mapped by multiple PTs. FFs set.
[58122]368 * @param pVM The cross context VM structure.
[1]369 * @param pCur The physical handler.
[6902]370 * @param pRam The RAM range.
[1]371 */
372static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam)
373{
374 /*
375 * Iterate the guest ram pages updating the flags and flushing PT entries
376 * mapping the page.
377 */
[55493]378 bool fFlushTLBs = false;
379 int rc = VINF_SUCCESS;
380 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
381 const unsigned uState = pCurType->uState;
382 uint32_t cPages = pCur->cPages;
383 uint32_t i = (pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT;
[1]384 for (;;)
385 {
[18230]386 PPGMPAGE pPage = &pRam->aPages[i];
[55493]387 AssertMsg(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage),
[18230]388 ("%RGp %R[pgmpage]\n", pRam->GCPhys + (i << PAGE_SHIFT), pPage));
[1]389
[6902]390 /* Only do upgrades. */
391 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
[1]392 {
[6902]393 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
[1]394
[70977]395 const RTGCPHYS GCPhysPage = pRam->GCPhys + (i << PAGE_SHIFT);
396 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage,
[32087]397 false /* allow updates of PTEs (instead of flushing) */, &fFlushTLBs);
[17509]398 if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
399 rc = rc2;
[70977]400
401#ifndef IN_RC
402 /* Tell NEM about the protection update. */
403 if (VM_IS_NEM_ENABLED(pVM))
404 {
405 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
406 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
407 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
408 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
409 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
410 }
411#endif
[1]412 }
413
414 /* next */
415 if (--cPages == 0)
416 break;
417 i++;
418 }
419
[28936]420 if (fFlushTLBs)
[1]421 {
[20492]422 PGM_INVL_ALL_VCPU_TLBS(pVM);
[27402]423 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs; rc=%d\n", rc));
[1]424 }
425 else
[46420]426 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc; sync flags=%x VMCPU_FF_PGM_SYNC_CR3=%d\n", rc, VMMGetCpu(pVM)->pgm.s.fSyncFlags, VMCPU_FF_IS_SET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3)));
[28936]427
[1]428 return rc;
429}
430
431
432/**
[64115]433 * Deregister a physical page access handler.
[1]434 *
435 * @returns VBox status code.
[64115]436 * @param pVM The cross context VM structure.
437 * @param pPhysHandler The handler to deregister (but not free).
[70954]438 * @param fRestoreAsRAM How this will likely be restored, if we know (true,
439 * false, or if we don't know -1).
[64115]440 */
[70954]441int pgmHandlerPhysicalExDeregister(PVM pVM, PPGMPHYSHANDLER pPhysHandler, int fRestoreAsRAM)
[64115]442{
[70954]443 LogFlow(("pgmHandlerPhysicalExDeregister: Removing Range %RGp-%RGp %s fRestoreAsRAM=%d\n",
444 pPhysHandler->Core.Key, pPhysHandler->Core.KeyLast, R3STRING(pPhysHandler->pszDesc), fRestoreAsRAM));
[64115]445 AssertReturn(pPhysHandler->Core.Key != NIL_RTGCPHYS, VERR_PGM_HANDLER_NOT_FOUND);
446
447 /*
448 * Remove the handler from the tree.
449 */
450 pgmLock(pVM);
451 PPGMPHYSHANDLER pRemoved = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
452 pPhysHandler->Core.Key);
453 if (pRemoved == pPhysHandler)
454 {
455 /*
456 * Clear the page bits, notify the REM about this change and clear
457 * the cache.
458 */
459 pgmHandlerPhysicalResetRamFlags(pVM, pPhysHandler);
[70954]460 pgmHandlerPhysicalDeregisterNotifyREMAndNEM(pVM, pPhysHandler, fRestoreAsRAM);
[64115]461 pVM->pgm.s.pLastPhysHandlerR0 = 0;
462 pVM->pgm.s.pLastPhysHandlerR3 = 0;
463 pVM->pgm.s.pLastPhysHandlerRC = 0;
464
465 pPhysHandler->Core.Key = NIL_RTGCPHYS;
466 pPhysHandler->Core.KeyLast = NIL_RTGCPHYS;
467
468 pgmUnlock(pVM);
469
470 return VINF_SUCCESS;
471 }
472
473 /*
474 * Both of the failure conditions here are considered internal processing
475 * errors because they can only be caused by race conditions or corruption.
476 * If we ever need to handle concurrent deregistration, we have to move
477 * the NIL_RTGCPHYS check inside the PGM lock.
478 */
479 if (pRemoved)
480 RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pRemoved->Core);
481
482 pgmUnlock(pVM);
483
484 if (!pRemoved)
485 AssertMsgFailed(("Didn't find range starting at %RGp in the tree!\n", pPhysHandler->Core.Key));
486 else
487 AssertMsgFailed(("Found different handle at %RGp in the tree: got %p insteaded of %p\n",
488 pPhysHandler->Core.Key, pRemoved, pPhysHandler));
489 return VERR_PGM_HANDLER_IPE_1;
490}
491
492
493/**
494 * Destroys (frees) a physical handler.
495 *
496 * The caller must deregister it before destroying it!
497 *
498 * @returns VBox status code.
[58122]499 * @param pVM The cross context VM structure.
[64115]500 * @param pHandler The handler to free. NULL if ignored.
501 */
502int pgmHandlerPhysicalExDestroy(PVM pVM, PPGMPHYSHANDLER pHandler)
503{
504 if (pHandler)
505 {
506 AssertPtr(pHandler);
507 AssertReturn(pHandler->Core.Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
508 PGMHandlerPhysicalTypeRelease(pVM, pHandler->hType);
509 MMHyperFree(pVM, pHandler);
510 }
511 return VINF_SUCCESS;
512}
513
514
515/**
516 * Deregister a physical page access handler.
517 *
518 * @returns VBox status code.
519 * @param pVM The cross context VM structure.
[1]520 * @param GCPhys Start physical address.
521 */
[12989]522VMMDECL(int) PGMHandlerPhysicalDeregister(PVM pVM, RTGCPHYS GCPhys)
[1]523{
524 /*
525 * Find the handler.
526 */
527 pgmLock(pVM);
[64115]528 PPGMPHYSHANDLER pRemoved = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
529 if (pRemoved)
[1]530 {
[64115]531 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n",
532 pRemoved->Core.Key, pRemoved->Core.KeyLast, R3STRING(pRemoved->pszDesc)));
[1]533
534 /*
[31136]535 * Clear the page bits, notify the REM about this change and clear
536 * the cache.
[1]537 */
[64115]538 pgmHandlerPhysicalResetRamFlags(pVM, pRemoved);
[70954]539 pgmHandlerPhysicalDeregisterNotifyREMAndNEM(pVM, pRemoved, -1);
[31136]540 pVM->pgm.s.pLastPhysHandlerR0 = 0;
541 pVM->pgm.s.pLastPhysHandlerR3 = 0;
542 pVM->pgm.s.pLastPhysHandlerRC = 0;
[64115]543
[1]544 pgmUnlock(pVM);
[64115]545
546 pRemoved->Core.Key = NIL_RTGCPHYS;
547 pgmHandlerPhysicalExDestroy(pVM, pRemoved);
[1]548 return VINF_SUCCESS;
549 }
[64115]550
[1]551 pgmUnlock(pVM);
552
[13824]553 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
[1]554 return VERR_PGM_HANDLER_NOT_FOUND;
555}
556
557
558/**
559 * Shared code with modify.
560 */
[70954]561static void pgmHandlerPhysicalDeregisterNotifyREMAndNEM(PVM pVM, PPGMPHYSHANDLER pCur, int fRestoreAsRAM)
[1]562{
[55493]563 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
564 RTGCPHYS GCPhysStart = pCur->Core.Key;
565 RTGCPHYS GCPhysLast = pCur->Core.KeyLast;
[1]566
567 /*
568 * Page align the range.
[6902]569 *
570 * Since we've reset (recalculated) the physical handler state of all pages
571 * we can make use of the page states to figure out whether a page should be
572 * included in the REM notification or not.
[1]573 */
[70954]574 if ( (pCur->Core.Key & PAGE_OFFSET_MASK)
575 || ((pCur->Core.KeyLast + 1) & PAGE_OFFSET_MASK))
[1]576 {
[55493]577 Assert(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO);
[6902]578
[1]579 if (GCPhysStart & PAGE_OFFSET_MASK)
580 {
[36891]581 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysStart);
[6902]582 if ( pPage
583 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
[1]584 {
[32036]585 RTGCPHYS GCPhys = (GCPhysStart + (PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
[1]586 if ( GCPhys > GCPhysLast
587 || GCPhys < GCPhysStart)
588 return;
589 GCPhysStart = GCPhys;
590 }
591 else
[32036]592 GCPhysStart &= X86_PTE_PAE_PG_MASK;
[6902]593 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
[1]594 }
[6902]595
[1]596 if (GCPhysLast & PAGE_OFFSET_MASK)
597 {
[36891]598 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysLast);
[6902]599 if ( pPage
600 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
[1]601 {
[32036]602 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
[1]603 if ( GCPhys < GCPhysStart
604 || GCPhys > GCPhysLast)
605 return;
606 GCPhysLast = GCPhys;
607 }
608 else
[6902]609 GCPhysLast |= PAGE_OFFSET_MASK;
610 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
[1]611 }
612 }
613
614 /*
[70954]615 * Tell REM and NEM.
[1]616 */
[70954]617 const bool fRestoreAsRAM2 = pCurType->pfnHandlerR3
618 && pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO; /** @todo this isn't entirely correct. */
[70960]619#ifdef VBOX_WITH_REM
[40274]620# ifndef IN_RING3
[55493]621 REMNotifyHandlerPhysicalDeregister(pVM, pCurType->enmKind, GCPhysStart, GCPhysLast - GCPhysStart + 1,
[70954]622 !!pCurType->pfnHandlerR3, fRestoreAsRAM2);
[40274]623# else
[55493]624 REMR3NotifyHandlerPhysicalDeregister(pVM, pCurType->enmKind, GCPhysStart, GCPhysLast - GCPhysStart + 1,
[70954]625 !!pCurType->pfnHandlerR3, fRestoreAsRAM2);
[40274]626# endif
[70954]627#endif
[70977]628 /** @todo do we need this notification? */
[70954]629#if defined(IN_RING3) || defined(IN_RING0)
630 NEMHCNotifyHandlerPhysicalDeregister(pVM, pCurType->enmKind, GCPhysStart, GCPhysLast - GCPhysStart + 1,
631 fRestoreAsRAM, fRestoreAsRAM2);
[63465]632#else
[70954]633 RT_NOREF_PV(fRestoreAsRAM); /** @todo this needs more work for REM! */
[74341]634 RT_NOREF_PV(fRestoreAsRAM2);
[1]635#endif
636}
637
638
639/**
[32087]640 * pgmHandlerPhysicalResetRamFlags helper that checks for other handlers on
641 * edge pages.
[6902]642 */
[32087]643DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PVM pVM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
[6902]644{
645 /*
646 * Look for other handlers.
647 */
648 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
649 for (;;)
650 {
[32087]651 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys, fAbove);
[55493]652 if ( !pCur
653 || ((fAbove ? pCur->Core.Key : pCur->Core.KeyLast) >> PAGE_SHIFT) != (GCPhys >> PAGE_SHIFT))
[6902]654 break;
[55493]655 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
656 uState = RT_MAX(uState, pCurType->uState);
[6902]657
658 /* next? */
659 RTGCPHYS GCPhysNext = fAbove
660 ? pCur->Core.KeyLast + 1
661 : pCur->Core.Key - 1;
662 if ((GCPhysNext >> PAGE_SHIFT) != (GCPhys >> PAGE_SHIFT))
663 break;
664 GCPhys = GCPhysNext;
665 }
666
667 /*
668 * Update if we found something that is a higher priority
669 * state than the current.
670 */
671 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
672 {
673 PPGMPAGE pPage;
[36891]674 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, ppRamHint);
[6902]675 if ( RT_SUCCESS(rc)
676 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
[32087]677 {
678 /* This should normally not be necessary. */
[6902]679 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
[32087]680 bool fFlushTLBs ;
681 rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, false /*fFlushPTEs*/, &fFlushTLBs);
682 if (RT_SUCCESS(rc) && fFlushTLBs)
683 PGM_INVL_ALL_VCPU_TLBS(pVM);
684 else
685 AssertRC(rc);
[70977]686
687#ifndef IN_RC
688 /* Tell NEM about the protection update. */
689 if (VM_IS_NEM_ENABLED(pVM))
690 {
691 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
692 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
693 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
694 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
695 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
696 }
697#endif
[32087]698 }
[6902]699 else
700 AssertRC(rc);
701 }
702}
703
704
705/**
[18230]706 * Resets an aliased page.
707 *
[58122]708 * @param pVM The cross context VM structure.
[32087]709 * @param pPage The page.
710 * @param GCPhysPage The page address in case it comes in handy.
711 * @param fDoAccounting Whether to perform accounting. (Only set during
712 * reset where pgmR3PhysRamReset doesn't have the
713 * handler structure handy.)
[18230]714 */
[32087]715void pgmHandlerPhysicalResetAliasedPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage, bool fDoAccounting)
[18230]716{
[47786]717 Assert( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
718 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
[18230]719 Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
[70977]720#ifndef IN_RC
721 RTHCPHYS const HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage);
722#endif
[18230]723
724 /*
725 * Flush any shadow page table references *first*.
726 */
727 bool fFlushTLBs = false;
[32087]728 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage, true /*fFlushPTEs*/, &fFlushTLBs);
[18230]729 AssertLogRelRCReturnVoid(rc);
[70977]730#ifdef IN_RC
[18266]731 if (fFlushTLBs && rc != VINF_PGM_SYNC_CR3)
[19833]732 PGM_INVL_VCPU_TLBS(VMMGetCpu0(pVM));
[70977]733#else
[43387]734 HMFlushTLBOnAllVCpus(pVM);
[70977]735#endif
[18230]736
737 /*
738 * Make it an MMIO/Zero page.
739 */
[37354]740 PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
741 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO);
742 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
743 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
[18230]744 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL);
745
[25542]746 /* Flush its TLB entry. */
[37354]747 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
[25542]748
[32087]749 /*
750 * Do accounting for pgmR3PhysRamReset.
751 */
752 if (fDoAccounting)
753 {
754 PPGMPHYSHANDLER pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysPage);
755 if (RT_LIKELY(pHandler))
756 {
757 Assert(pHandler->cAliasedPages > 0);
758 pHandler->cAliasedPages--;
759 }
760 else
761 AssertFailed();
762 }
[70977]763
764#ifndef IN_RC
765 /*
766 * Tell NEM about the protection change.
767 */
768 if (VM_IS_NEM_ENABLED(pVM))
769 {
770 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
771 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, HCPhysPrev, pVM->pgm.s.HCPhysZeroPg,
772 NEM_PAGE_PROT_NONE, PGMPAGETYPE_MMIO, &u2State);
773 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
774 }
775#endif
[18230]776}
777
778
779/**
[1]780 * Resets ram range flags.
781 *
782 * @returns VBox status code.
783 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
[58122]784 * @param pVM The cross context VM structure.
[1]785 * @param pCur The physical handler.
786 *
[32087]787 * @remark We don't start messing with the shadow page tables, as we've
788 * already got code in Trap0e which deals with out of sync handler
789 * flags (originally conceived for global pages).
[1]790 */
791static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur)
792{
793 /*
[6902]794 * Iterate the guest ram pages updating the state.
[1]795 */
[36891]796 RTUINT cPages = pCur->cPages;
797 RTGCPHYS GCPhys = pCur->Core.Key;
[1]798 PPGMRAMRANGE pRamHint = NULL;
799 for (;;)
800 {
[6902]801 PPGMPAGE pPage;
[36891]802 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
[6902]803 if (RT_SUCCESS(rc))
[18230]804 {
[47786]805 /* Reset aliased MMIO pages to MMIO, since this aliasing is our business.
[18230]806 (We don't flip MMIO to RAM though, that's PGMPhys.cpp's job.) */
[70977]807 bool fNemNotifiedAlready = false;
[47786]808 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
809 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
[32087]810 {
811 Assert(pCur->cAliasedPages > 0);
812 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhys, false /*fDoAccounting*/);
813 pCur->cAliasedPages--;
[70977]814 fNemNotifiedAlready = true;
[32087]815 }
[55493]816#ifdef VBOX_STRICT
817 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
818 AssertMsg(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage), ("%RGp %R[pgmpage]\n", GCPhys, pPage));
819#endif
[6902]820 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE);
[70977]821
822#ifndef IN_RC
823 /* Tell NEM about the protection change. */
824 if (VM_IS_NEM_ENABLED(pVM) && !fNemNotifiedAlready)
825 {
826 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
827 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
828 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
829 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
830 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
831 }
832#else
833 RT_NOREF_PV(fNemNotifiedAlready);
834#endif
[18230]835 }
[6902]836 else
837 AssertRC(rc);
838
[1]839 /* next */
840 if (--cPages == 0)
841 break;
842 GCPhys += PAGE_SIZE;
843 }
844
[32087]845 pCur->cAliasedPages = 0;
846 pCur->cTmpOffPages = 0;
847
[1]848 /*
[6902]849 * Check for partial start and end pages.
[1]850 */
851 if (pCur->Core.Key & PAGE_OFFSET_MASK)
[32087]852 pgmHandlerPhysicalRecalcPageState(pVM, pCur->Core.Key - 1, false /* fAbove */, &pRamHint);
853 if ((pCur->Core.KeyLast & PAGE_OFFSET_MASK) != PAGE_OFFSET_MASK)
854 pgmHandlerPhysicalRecalcPageState(pVM, pCur->Core.KeyLast + 1, true /* fAbove */, &pRamHint);
[1]855}
856
857
858/**
859 * Modify a physical page access handler.
860 *
861 * Modification can only be done to the range it self, not the type or anything else.
862 *
863 * @returns VBox status code.
864 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
865 * and a new registration must be performed!
[58122]866 * @param pVM The cross context VM structure.
[1]867 * @param GCPhysCurrent Current location.
868 * @param GCPhys New location.
869 * @param GCPhysLast New last location.
870 */
[12989]871VMMDECL(int) PGMHandlerPhysicalModify(PVM pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
[1]872{
873 /*
874 * Remove it.
875 */
876 int rc;
877 pgmLock(pVM);
[13062]878 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysCurrent);
[1]879 if (pCur)
880 {
881 /*
882 * Clear the ram flags. (We're gonna move or free it!)
883 */
884 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
[70954]885#if defined(VBOX_WITH_REM) || defined(IN_RING3) || defined(IN_RING0)
886 PPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
887 bool const fRestoreAsRAM = pCurType->pfnHandlerR3 /** @todo this isn't entirely correct. */
888 && pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO;
[59652]889#endif
[1]890
891 /*
892 * Validate the new range, modify and reinsert.
893 */
894 if (GCPhysLast >= GCPhys)
895 {
896 /*
897 * We require the range to be within registered ram.
898 * There is no apparent need to support ranges which cover more than one ram range.
899 */
[36891]900 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
901 if ( pRam
902 && GCPhys <= pRam->GCPhysLast
903 && GCPhysLast >= pRam->GCPhys)
[1]904 {
905 pCur->Core.Key = GCPhys;
906 pCur->Core.KeyLast = GCPhysLast;
[32036]907 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> PAGE_SHIFT;
[1]908
[13062]909 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pCur->Core))
[1]910 {
[70954]911#if defined(VBOX_WITH_REM) || defined(IN_RING3) || defined(IN_RING0)
912 RTGCPHYS const cb = GCPhysLast - GCPhys + 1;
913 PGMPHYSHANDLERKIND const enmKind = pCurType->enmKind;
914#endif
[59652]915#ifdef VBOX_WITH_REM
[70954]916 bool const fHasHCHandler = !!pCurType->pfnHandlerR3;
[59652]917#endif
[20788]918
[1]919 /*
920 * Set ram flags, flush shadow PT entries and finally tell REM about this.
921 */
922 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
[70954]923
[70977]924 /** @todo NEM: not sure we need this notification... */
[70954]925#if defined(IN_RING3) || defined(IN_RING0)
926 NEMHCNotifyHandlerPhysicalModify(pVM, enmKind, GCPhysCurrent, GCPhys, cb, fRestoreAsRAM);
927#endif
928
[20788]929 pgmUnlock(pVM);
[1]930
[40274]931#ifdef VBOX_WITH_REM
932# ifndef IN_RING3
[55493]933 REMNotifyHandlerPhysicalModify(pVM, enmKind, GCPhysCurrent, GCPhys, cb,
[25243]934 fHasHCHandler, fRestoreAsRAM);
[40274]935# else
[55493]936 REMR3NotifyHandlerPhysicalModify(pVM, enmKind, GCPhysCurrent, GCPhys, cb,
[25243]937 fHasHCHandler, fRestoreAsRAM);
[40274]938# endif
[1]939#endif
[22756]940 PGM_INVL_ALL_VCPU_TLBS(pVM);
[13824]941 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%RGp -> GCPhys=%RGp GCPhysLast=%RGp\n",
[1]942 GCPhysCurrent, GCPhys, GCPhysLast));
943 return VINF_SUCCESS;
944 }
[4615]945
[13824]946 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
[1]947 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
948 }
949 else
950 {
[13824]951 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
[1]952 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
953 }
954 }
955 else
956 {
[13824]957 AssertMsgFailed(("Invalid range %RGp-%RGp\n", GCPhys, GCPhysLast));
[1]958 rc = VERR_INVALID_PARAMETER;
959 }
960
961 /*
[31136]962 * Invalid new location, flush the cache and free it.
[1]963 * We've only gotta notify REM and free the memory.
964 */
[70954]965 pgmHandlerPhysicalDeregisterNotifyREMAndNEM(pVM, pCur, -1);
[31136]966 pVM->pgm.s.pLastPhysHandlerR0 = 0;
967 pVM->pgm.s.pLastPhysHandlerR3 = 0;
968 pVM->pgm.s.pLastPhysHandlerRC = 0;
[55493]969 PGMHandlerPhysicalTypeRelease(pVM, pCur->hType);
[1]970 MMHyperFree(pVM, pCur);
971 }
972 else
973 {
[13824]974 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhysCurrent));
[1]975 rc = VERR_PGM_HANDLER_NOT_FOUND;
976 }
977
978 pgmUnlock(pVM);
979 return rc;
980}
981
982
983/**
[55331]984 * Changes the user callback arguments associated with a physical access
985 * handler.
[1]986 *
987 * @returns VBox status code.
[58122]988 * @param pVM The cross context VM structure.
[55331]989 * @param GCPhys Start physical address of the handler.
[1]990 * @param pvUserR3 User argument to the R3 handler.
991 * @param pvUserR0 User argument to the R0 handler.
[13042]992 * @param pvUserRC User argument to the RC handler. Values larger or
993 * equal to 0x10000 will be relocated automatically.
[1]994 */
[55331]995VMMDECL(int) PGMHandlerPhysicalChangeUserArgs(PVM pVM, RTGCPHYS GCPhys, RTR3PTR pvUserR3, RTR0PTR pvUserR0, RTRCPTR pvUserRC)
[1]996{
997 /*
[55331]998 * Find the handler.
[1]999 */
1000 int rc = VINF_SUCCESS;
1001 pgmLock(pVM);
[13062]1002 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
[1]1003 if (pCur)
1004 {
1005 /*
[55331]1006 * Change arguments.
[1]1007 */
[55331]1008 pCur->pvUserR3 = pvUserR3;
1009 pCur->pvUserR0 = pvUserR0;
1010 pCur->pvUserRC = pvUserRC;
[1]1011 }
1012 else
1013 {
[13824]1014 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
[1]1015 rc = VERR_PGM_HANDLER_NOT_FOUND;
1016 }
1017
1018 pgmUnlock(pVM);
1019 return rc;
1020}
1021
1022
1023/**
[9008]1024 * Splits a physical access handler in two.
[1]1025 *
1026 * @returns VBox status code.
[58122]1027 * @param pVM The cross context VM structure.
[1]1028 * @param GCPhys Start physical address of the handler.
1029 * @param GCPhysSplit The split address.
1030 */
[12989]1031VMMDECL(int) PGMHandlerPhysicalSplit(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
[1]1032{
1033 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
1034
1035 /*
1036 * Do the allocation without owning the lock.
1037 */
1038 PPGMPHYSHANDLER pNew;
1039 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
[13816]1040 if (RT_FAILURE(rc))
[1]1041 return rc;
1042
1043 /*
1044 * Get the handler.
1045 */
1046 pgmLock(pVM);
[13062]1047 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
[13232]1048 if (RT_LIKELY(pCur))
[1]1049 {
[13232]1050 if (RT_LIKELY(GCPhysSplit <= pCur->Core.KeyLast))
[1]1051 {
1052 /*
1053 * Create new handler node for the 2nd half.
1054 */
1055 *pNew = *pCur;
1056 pNew->Core.Key = GCPhysSplit;
[32036]1057 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
[1]1058
1059 pCur->Core.KeyLast = GCPhysSplit - 1;
[32036]1060 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
[1]1061
[13232]1062 if (RT_LIKELY(RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)))
[1]1063 {
[13824]1064 LogFlow(("PGMHandlerPhysicalSplit: %RGp-%RGp and %RGp-%RGp\n",
[1]1065 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
1066 pgmUnlock(pVM);
1067 return VINF_SUCCESS;
1068 }
1069 AssertMsgFailed(("whu?\n"));
[39402]1070 rc = VERR_PGM_PHYS_HANDLER_IPE;
[1]1071 }
1072 else
1073 {
[13824]1074 AssertMsgFailed(("outside range: %RGp-%RGp split %RGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
[1]1075 rc = VERR_INVALID_PARAMETER;
1076 }
1077 }
1078 else
1079 {
[13824]1080 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
[1]1081 rc = VERR_PGM_HANDLER_NOT_FOUND;
1082 }
1083 pgmUnlock(pVM);
1084 MMHyperFree(pVM, pNew);
1085 return rc;
1086}
1087
1088
1089/**
1090 * Joins up two adjacent physical access handlers which has the same callbacks.
1091 *
1092 * @returns VBox status code.
[58122]1093 * @param pVM The cross context VM structure.
[1]1094 * @param GCPhys1 Start physical address of the first handler.
1095 * @param GCPhys2 Start physical address of the second handler.
1096 */
[12989]1097VMMDECL(int) PGMHandlerPhysicalJoin(PVM pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
[1]1098{
1099 /*
1100 * Get the handlers.
1101 */
1102 int rc;
1103 pgmLock(pVM);
[13062]1104 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys1);
[13232]1105 if (RT_LIKELY(pCur1))
[1]1106 {
[13062]1107 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
[13232]1108 if (RT_LIKELY(pCur2))
[1]1109 {
1110 /*
1111 * Make sure that they are adjacent, and that they've got the same callbacks.
1112 */
[13232]1113 if (RT_LIKELY(pCur1->Core.KeyLast + 1 == pCur2->Core.Key))
[1]1114 {
[55493]1115 if (RT_LIKELY(pCur1->hType == pCur2->hType))
[1]1116 {
[13062]1117 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
[13232]1118 if (RT_LIKELY(pCur3 == pCur2))
[1]1119 {
1120 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
[32036]1121 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
[13824]1122 LogFlow(("PGMHandlerPhysicalJoin: %RGp-%RGp %RGp-%RGp\n",
[1]1123 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
[31136]1124 pVM->pgm.s.pLastPhysHandlerR0 = 0;
1125 pVM->pgm.s.pLastPhysHandlerR3 = 0;
1126 pVM->pgm.s.pLastPhysHandlerRC = 0;
[55493]1127 PGMHandlerPhysicalTypeRelease(pVM, pCur2->hType);
[20709]1128 MMHyperFree(pVM, pCur2);
[1]1129 pgmUnlock(pVM);
1130 return VINF_SUCCESS;
1131 }
[6902]1132
[1]1133 Assert(pCur3 == pCur2);
[39402]1134 rc = VERR_PGM_PHYS_HANDLER_IPE;
[1]1135 }
1136 else
1137 {
1138 AssertMsgFailed(("mismatching handlers\n"));
1139 rc = VERR_ACCESS_DENIED;
1140 }
1141 }
1142 else
1143 {
[13824]1144 AssertMsgFailed(("not adjacent: %RGp-%RGp %RGp-%RGp\n",
[1]1145 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1146 rc = VERR_INVALID_PARAMETER;
1147 }
1148 }
1149 else
1150 {
[13824]1151 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys2));
[1]1152 rc = VERR_PGM_HANDLER_NOT_FOUND;
1153 }
1154 }
1155 else
1156 {
[13824]1157 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys1));
[1]1158 rc = VERR_PGM_HANDLER_NOT_FOUND;
1159 }
1160 pgmUnlock(pVM);
1161 return rc;
1162
1163}
1164
1165
1166/**
[32087]1167 * Resets any modifications to individual pages in a physical page access
1168 * handler region.
[1]1169 *
[32087]1170 * This is used in pair with PGMHandlerPhysicalPageTempOff(),
1171 * PGMHandlerPhysicalPageAlias() or PGMHandlerPhysicalPageAliasHC().
[1]1172 *
1173 * @returns VBox status code.
[58122]1174 * @param pVM The cross context VM structure.
[18230]1175 * @param GCPhys The start address of the handler regions, i.e. what you
1176 * passed to PGMR3HandlerPhysicalRegister(),
1177 * PGMHandlerPhysicalRegisterEx() or
1178 * PGMHandlerPhysicalModify().
[1]1179 */
[32087]1180VMMDECL(int) PGMHandlerPhysicalReset(PVM pVM, RTGCPHYS GCPhys)
[1]1181{
[20434]1182 LogFlow(("PGMHandlerPhysicalReset GCPhys=%RGp\n", GCPhys));
[6902]1183 pgmLock(pVM);
1184
[1]1185 /*
1186 * Find the handler.
1187 */
[6902]1188 int rc;
[13062]1189 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
[13232]1190 if (RT_LIKELY(pCur))
[1]1191 {
1192 /*
[55493]1193 * Validate kind.
[1]1194 */
[55493]1195 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1196 switch (pCurType->enmKind)
[1]1197 {
[55493]1198 case PGMPHYSHANDLERKIND_WRITE:
1199 case PGMPHYSHANDLERKIND_ALL:
1200 case PGMPHYSHANDLERKIND_MMIO: /* NOTE: Only use when clearing MMIO ranges with aliased MMIO2 pages! */
[1]1201 {
[58126]1202 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerReset)); /** @todo move out of switch */
[36891]1203 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
[6902]1204 Assert(pRam);
[18230]1205 Assert(pRam->GCPhys <= pCur->Core.Key);
1206 Assert(pRam->GCPhysLast >= pCur->Core.KeyLast);
1207
[55493]1208 if (pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO)
[1]1209 {
[18230]1210 /*
1211 * Reset all the PGMPAGETYPE_MMIO2_ALIAS_MMIO pages first and that's it.
1212 * This could probably be optimized a bit wrt to flushing, but I'm too lazy
1213 * to do that now...
1214 */
[32087]1215 if (pCur->cAliasedPages)
[18230]1216 {
[32087]1217 PPGMPAGE pPage = &pRam->aPages[(pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT];
1218 uint32_t cLeft = pCur->cPages;
1219 while (cLeft-- > 0)
1220 {
[47786]1221 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
1222 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
[32087]1223 {
1224 Assert(pCur->cAliasedPages > 0);
1225 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)cLeft << PAGE_SHIFT),
1226 false /*fDoAccounting*/);
1227 --pCur->cAliasedPages;
1228#ifndef VBOX_STRICT
1229 if (pCur->cAliasedPages == 0)
1230 break;
1231#endif
1232 }
1233 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO);
1234 pPage++;
1235 }
1236 Assert(pCur->cAliasedPages == 0);
[18230]1237 }
[1]1238 }
[32087]1239 else if (pCur->cTmpOffPages > 0)
[18230]1240 {
1241 /*
1242 * Set the flags and flush shadow PT entries.
1243 */
1244 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
1245 }
[6902]1246
[32087]1247 pCur->cAliasedPages = 0;
1248 pCur->cTmpOffPages = 0;
1249
[6902]1250 rc = VINF_SUCCESS;
1251 break;
[1]1252 }
1253
1254 /*
1255 * Invalid.
1256 */
1257 default:
[55493]1258 AssertMsgFailed(("Invalid type %d! Corruption!\n", pCurType->enmKind));
[39402]1259 rc = VERR_PGM_PHYS_HANDLER_IPE;
[6902]1260 break;
[1]1261 }
1262 }
[6902]1263 else
1264 {
1265 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1266 rc = VERR_PGM_HANDLER_NOT_FOUND;
1267 }
1268
[1]1269 pgmUnlock(pVM);
[6902]1270 return rc;
[1]1271}
1272
1273
1274/**
[6906]1275 * Temporarily turns off the access monitoring of a page within a monitored
1276 * physical write/all page access handler region.
1277 *
1278 * Use this when no further \#PFs are required for that page. Be aware that
1279 * a page directory sync might reset the flags, and turn on access monitoring
1280 * for the page.
1281 *
1282 * The caller must do required page table modifications.
1283 *
1284 * @returns VBox status code.
[58122]1285 * @param pVM The cross context VM structure.
[18230]1286 * @param GCPhys The start address of the access handler. This
1287 * must be a fully page aligned range or we risk
1288 * messing up other handlers installed for the
1289 * start and end pages.
1290 * @param GCPhysPage The physical address of the page to turn off
1291 * access monitoring for.
[6906]1292 */
[12989]1293VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
[6906]1294{
[28771]1295 LogFlow(("PGMHandlerPhysicalPageTempOff GCPhysPage=%RGp\n", GCPhysPage));
[20434]1296
[20765]1297 pgmLock(pVM);
[6906]1298 /*
1299 * Validate the range.
1300 */
[13062]1301 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
[13232]1302 if (RT_LIKELY(pCur))
[6906]1303 {
[13232]1304 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
1305 && GCPhysPage <= pCur->Core.KeyLast))
[6906]1306 {
1307 Assert(!(pCur->Core.Key & PAGE_OFFSET_MASK));
1308 Assert((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1309
[55493]1310 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1311 AssertReturnStmt( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1312 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL,
[28771]1313 pgmUnlock(pVM), VERR_ACCESS_DENIED);
[6906]1314
1315 /*
1316 * Change the page status.
1317 */
1318 PPGMPAGE pPage;
[36891]1319 int rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage);
[20765]1320 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc);
[32087]1321 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1322 {
1323 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1324 pCur->cTmpOffPages++;
[70977]1325#ifndef IN_RC
1326 /* Tell NEM about the protection change (VGA is using this to track dirty pages). */
1327 if (VM_IS_NEM_ENABLED(pVM))
1328 {
1329 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1330 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
[71043]1331 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
[70977]1332 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1333 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1334 }
1335#endif
[32087]1336 }
[20765]1337 pgmUnlock(pVM);
[6906]1338 return VINF_SUCCESS;
1339 }
[20765]1340 pgmUnlock(pVM);
[6906]1341 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
1342 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
1343 return VERR_INVALID_PARAMETER;
1344 }
[20765]1345 pgmUnlock(pVM);
[6906]1346 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1347 return VERR_PGM_HANDLER_NOT_FOUND;
1348}
1349
[13415]1350
[13387]1351/**
[18230]1352 * Replaces an MMIO page with an MMIO2 page.
[13387]1353 *
[18234]1354 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
[18230]1355 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
1356 * backing, the caller must provide a replacement page. For various reasons the
1357 * replacement page must be an MMIO2 page.
[13387]1358 *
[18230]1359 * The caller must do required page table modifications. You can get away
[33540]1360 * without making any modifications since it's an MMIO page, the cost is an extra
[18230]1361 * \#PF which will the resync the page.
[13387]1362 *
[18230]1363 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1364 *
1365 * The caller may still get handler callback even after this call and must be
1366 * able to deal correctly with such calls. The reason for these callbacks are
1367 * either that we're executing in the recompiler (which doesn't know about this
1368 * arrangement) or that we've been restored from saved state (where we won't
1369 * save the change).
1370 *
[13387]1371 * @returns VBox status code.
[58122]1372 * @param pVM The cross context VM structure.
[18230]1373 * @param GCPhys The start address of the access handler. This
1374 * must be a fully page aligned range or we risk
1375 * messing up other handlers installed for the
1376 * start and end pages.
1377 * @param GCPhysPage The physical address of the page to turn off
1378 * access monitoring for.
1379 * @param GCPhysPageRemap The physical address of the MMIO2 page that
1380 * serves as backing memory.
1381 *
1382 * @remark May cause a page pool flush if used on a page that is already
1383 * aliased.
1384 *
1385 * @note This trick does only work reliably if the two pages are never ever
1386 * mapped in the same page table. If they are the page pool code will
1387 * be confused should either of them be flushed. See the special case
1388 * of zero page aliasing mentioned in #3170.
1389 *
[13387]1390 */
1391VMMDECL(int) PGMHandlerPhysicalPageAlias(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTGCPHYS GCPhysPageRemap)
1392{
[19807]1393/// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
[55493]1394 pgmLock(pVM);
[19807]1395
[13387]1396 /*
[18230]1397 * Lookup and validate the range.
[13387]1398 */
1399 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1400 if (RT_LIKELY(pCur))
1401 {
[70977]1402 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
1403 && GCPhysPage <= pCur->Core.KeyLast))
[13387]1404 {
[55493]1405 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1406 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, pgmUnlock(pVM), VERR_ACCESS_DENIED);
[20765]1407 AssertReturnStmt(!(pCur->Core.Key & PAGE_OFFSET_MASK), pgmUnlock(pVM), VERR_INVALID_PARAMETER);
1408 AssertReturnStmt((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, pgmUnlock(pVM), VERR_INVALID_PARAMETER);
[13387]1409
[17509]1410 /*
[18230]1411 * Get and validate the two pages.
[17509]1412 */
[13387]1413 PPGMPAGE pPageRemap;
[36891]1414 int rc = pgmPhysGetPageEx(pVM, GCPhysPageRemap, &pPageRemap);
[20765]1415 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc);
1416 AssertMsgReturnStmt(PGM_PAGE_GET_TYPE(pPageRemap) == PGMPAGETYPE_MMIO2,
[18230]1417 ("GCPhysPageRemap=%RGp %R[pgmpage]\n", GCPhysPageRemap, pPageRemap),
[20765]1418 pgmUnlock(pVM), VERR_PGM_PHYS_NOT_MMIO2);
[13387]1419
1420 PPGMPAGE pPage;
[36891]1421 rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage);
[20765]1422 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc);
[18230]1423 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1424 {
1425 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1426 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1427 VERR_PGM_PHYS_NOT_MMIO2);
[19992]1428 if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPageRemap))
[20765]1429 {
1430 pgmUnlock(pVM);
[18230]1431 return VINF_PGM_HANDLER_ALREADY_ALIASED;
[20765]1432 }
[13387]1433
[18230]1434 /*
1435 * The page is already mapped as some other page, reset it
1436 * to an MMIO/ZERO page before doing the new mapping.
1437 */
1438 Log(("PGMHandlerPhysicalPageAlias: GCPhysPage=%RGp (%R[pgmpage]; %RHp -> %RHp\n",
1439 GCPhysPage, pPage, PGM_PAGE_GET_HCPHYS(pPage), PGM_PAGE_GET_HCPHYS(pPageRemap)));
[32087]1440 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, false /*fDoAccounting*/);
1441 pCur->cAliasedPages--;
[18230]1442 }
1443 Assert(PGM_PAGE_IS_ZERO(pPage));
1444
1445 /*
1446 * Do the actual remapping here.
1447 * This page now serves as an alias for the backing memory specified.
1448 */
1449 LogFlow(("PGMHandlerPhysicalPageAlias: %RGp (%R[pgmpage]) alias for %RGp (%R[pgmpage])\n",
1450 GCPhysPage, pPage, GCPhysPageRemap, pPageRemap ));
[37354]1451 PGM_PAGE_SET_HCPHYS(pVM, pPage, PGM_PAGE_GET_HCPHYS(pPageRemap));
1452 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1453 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1454 PGM_PAGE_SET_PAGEID(pVM, pPage, PGM_PAGE_GET_PAGEID(pPageRemap));
[18230]1455 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
[32087]1456 pCur->cAliasedPages++;
1457 Assert(pCur->cAliasedPages <= pCur->cPages);
[24711]1458
[25542]1459 /* Flush its TLB entry. */
[37354]1460 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
[25542]1461
[70977]1462# ifndef IN_RC
1463 /* Tell NEM about the backing and protection change. */
1464 if (VM_IS_NEM_ENABLED(pVM))
1465 {
1466 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1467 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1468 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO),
1469 PGMPAGETYPE_MMIO2_ALIAS_MMIO, &u2State);
1470 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1471 }
1472# endif
[18230]1473 LogFlow(("PGMHandlerPhysicalPageAlias: => %R[pgmpage]\n", pPage));
[20765]1474 pgmUnlock(pVM);
[13387]1475 return VINF_SUCCESS;
1476 }
1477
[20765]1478 pgmUnlock(pVM);
[13387]1479 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
1480 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
1481 return VERR_INVALID_PARAMETER;
1482 }
1483
[20765]1484 pgmUnlock(pVM);
[13387]1485 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1486 return VERR_PGM_HANDLER_NOT_FOUND;
1487}
1488
[47719]1489
[19992]1490/**
[47786]1491 * Replaces an MMIO page with an arbitrary HC page in the shadow page tables.
[19992]1492 *
[47786]1493 * This differs from PGMHandlerPhysicalPageAlias in that the page doesn't need
1494 * to be a known MMIO2 page and that only shadow paging may access the page.
1495 * The latter distinction is important because the only use for this feature is
1496 * for mapping the special APIC access page that VT-x uses to detect APIC MMIO
1497 * operations, the page is shared between all guest CPUs and actually not
1498 * written to. At least at the moment.
[19992]1499 *
1500 * The caller must do required page table modifications. You can get away
[33540]1501 * without making any modifications since it's an MMIO page, the cost is an extra
[19992]1502 * \#PF which will the resync the page.
1503 *
1504 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1505 *
1506 *
1507 * @returns VBox status code.
[58122]1508 * @param pVM The cross context VM structure.
[19992]1509 * @param GCPhys The start address of the access handler. This
1510 * must be a fully page aligned range or we risk
1511 * messing up other handlers installed for the
1512 * start and end pages.
1513 * @param GCPhysPage The physical address of the page to turn off
1514 * access monitoring for.
1515 * @param HCPhysPageRemap The physical address of the HC page that
1516 * serves as backing memory.
1517 *
1518 * @remark May cause a page pool flush if used on a page that is already
1519 * aliased.
1520 */
1521VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap)
1522{
1523/// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
[55493]1524 pgmLock(pVM);
[13387]1525
[19992]1526 /*
1527 * Lookup and validate the range.
1528 */
1529 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1530 if (RT_LIKELY(pCur))
1531 {
1532 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
1533 && GCPhysPage <= pCur->Core.KeyLast))
1534 {
[55493]1535 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1536 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, pgmUnlock(pVM), VERR_ACCESS_DENIED);
[20765]1537 AssertReturnStmt(!(pCur->Core.Key & PAGE_OFFSET_MASK), pgmUnlock(pVM), VERR_INVALID_PARAMETER);
1538 AssertReturnStmt((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, pgmUnlock(pVM), VERR_INVALID_PARAMETER);
[19992]1539
1540 /*
1541 * Get and validate the pages.
1542 */
1543 PPGMPAGE pPage;
[36891]1544 int rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage);
[20765]1545 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc);
[19992]1546 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1547 {
[20765]1548 pgmUnlock(pVM);
[47786]1549 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
[19992]1550 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1551 VERR_PGM_PHYS_NOT_MMIO2);
1552 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1553 }
1554 Assert(PGM_PAGE_IS_ZERO(pPage));
1555
1556 /*
1557 * Do the actual remapping here.
[47786]1558 * This page now serves as an alias for the backing memory
1559 * specified as far as shadow paging is concerned.
[19992]1560 */
[20063]1561 LogFlow(("PGMHandlerPhysicalPageAlias: %RGp (%R[pgmpage]) alias for %RHp\n",
[19992]1562 GCPhysPage, pPage, HCPhysPageRemap));
[37354]1563 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhysPageRemap);
[47786]1564 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
[37354]1565 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1566 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
[19992]1567 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
[32087]1568 pCur->cAliasedPages++;
1569 Assert(pCur->cAliasedPages <= pCur->cPages);
[25542]1570
1571 /* Flush its TLB entry. */
[37354]1572 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
[32087]1573
[70977]1574# ifndef IN_RC
1575 /* Tell NEM about the backing and protection change. */
1576 if (VM_IS_NEM_ENABLED(pVM))
1577 {
1578 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1579 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1580 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO),
1581 PGMPAGETYPE_SPECIAL_ALIAS_MMIO, &u2State);
1582 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1583 }
1584# endif
[19992]1585 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage));
[20765]1586 pgmUnlock(pVM);
[19992]1587 return VINF_SUCCESS;
1588 }
[20765]1589 pgmUnlock(pVM);
[19992]1590 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
1591 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
1592 return VERR_INVALID_PARAMETER;
1593 }
[20765]1594 pgmUnlock(pVM);
[19992]1595
1596 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1597 return VERR_PGM_HANDLER_NOT_FOUND;
1598}
1599
1600
[6906]1601/**
1602 * Checks if a physical range is handled
1603 *
1604 * @returns boolean
[58122]1605 * @param pVM The cross context VM structure.
[6906]1606 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
[15284]1607 * @remarks Caller must take the PGM lock...
[25647]1608 * @thread EMT.
[6906]1609 */
[12989]1610VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVM pVM, RTGCPHYS GCPhys)
[6906]1611{
1612 /*
1613 * Find the handler.
1614 */
[20765]1615 pgmLock(pVM);
[31136]1616 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
[6906]1617 if (pCur)
1618 {
[55493]1619#ifdef VBOX_STRICT
[16045]1620 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
[55493]1621 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1622 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1623 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
1624 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO);
1625#endif
[20765]1626 pgmUnlock(pVM);
[16045]1627 return true;
[6906]1628 }
[20765]1629 pgmUnlock(pVM);
[6906]1630 return false;
1631}
1632
[15284]1633
[14969]1634/**
[16045]1635 * Checks if it's an disabled all access handler or write access handler at the
1636 * given address.
1637 *
1638 * @returns true if it's an all access handler, false if it's a write access
1639 * handler.
[58122]1640 * @param pVM The cross context VM structure.
[16045]1641 * @param GCPhys The address of the page with a disabled handler.
1642 *
1643 * @remarks The caller, PGMR3PhysTlbGCPhys2Ptr, must hold the PGM lock.
1644 */
1645bool pgmHandlerPhysicalIsAll(PVM pVM, RTGCPHYS GCPhys)
1646{
[20765]1647 pgmLock(pVM);
[31136]1648 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
[20765]1649 if (!pCur)
1650 {
1651 pgmUnlock(pVM);
1652 AssertFailed();
1653 return true;
1654 }
[55493]1655 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1656 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1657 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
1658 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO); /* sanity */
[16045]1659 /* Only whole pages can be disabled. */
1660 Assert( pCur->Core.Key <= (GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK)
[16047]1661 && pCur->Core.KeyLast >= (GCPhys | PAGE_OFFSET_MASK));
[20765]1662
[55493]1663 bool bRet = pCurType->enmKind != PGMPHYSHANDLERKIND_WRITE;
[20765]1664 pgmUnlock(pVM);
1665 return bRet;
[16045]1666}
1667
1668
[56384]1669#ifdef VBOX_WITH_RAW_MODE
1670
[16045]1671/**
[55889]1672 * Internal worker for releasing a virtual handler type registration reference.
1673 *
1674 * @returns New reference count. UINT32_MAX if invalid input (asserted).
[58122]1675 * @param pVM The cross context VM structure.
[55889]1676 * @param pType Pointer to the type registration.
1677 */
1678DECLINLINE(uint32_t) pgmHandlerVirtualTypeRelease(PVM pVM, PPGMVIRTHANDLERTYPEINT pType)
1679{
1680 AssertMsgReturn(pType->u32Magic == PGMVIRTHANDLERTYPEINT_MAGIC, ("%#x\n", pType->u32Magic), UINT32_MAX);
1681 uint32_t cRefs = ASMAtomicDecU32(&pType->cRefs);
1682 if (cRefs == 0)
1683 {
1684 pgmLock(pVM);
1685 pType->u32Magic = PGMVIRTHANDLERTYPEINT_MAGIC_DEAD;
1686 RTListOff32NodeRemove(&pType->ListNode);
1687 pgmUnlock(pVM);
1688 MMHyperFree(pVM, pType);
1689 }
1690 return cRefs;
1691}
1692
1693
1694/**
1695 * Internal worker for retaining a virtual handler type registration reference.
1696 *
1697 * @returns New reference count. UINT32_MAX if invalid input (asserted).
[58122]1698 * @param pVM The cross context VM structure.
[55889]1699 * @param pType Pointer to the type registration.
1700 */
1701DECLINLINE(uint32_t) pgmHandlerVirtualTypeRetain(PVM pVM, PPGMVIRTHANDLERTYPEINT pType)
1702{
[57851]1703 NOREF(pVM);
[55889]1704 AssertMsgReturn(pType->u32Magic == PGMVIRTHANDLERTYPEINT_MAGIC, ("%#x\n", pType->u32Magic), UINT32_MAX);
1705 uint32_t cRefs = ASMAtomicIncU32(&pType->cRefs);
1706 Assert(cRefs < _1M && cRefs > 0);
1707 return cRefs;
1708}
1709
1710
1711/**
1712 * Releases a reference to a virtual handler type registration.
1713 *
1714 * @returns New reference count. UINT32_MAX if invalid input (asserted).
[58122]1715 * @param pVM The cross context VM structure.
[55889]1716 * @param hType The type regiration handle.
1717 */
1718VMM_INT_DECL(uint32_t) PGMHandlerVirtualTypeRelease(PVM pVM, PGMVIRTHANDLERTYPE hType)
1719{
1720 if (hType != NIL_PGMVIRTHANDLERTYPE)
1721 return pgmHandlerVirtualTypeRelease(pVM, PGMVIRTHANDLERTYPEINT_FROM_HANDLE(pVM, hType));
1722 return 0;
1723}
1724
1725
1726/**
1727 * Retains a reference to a virtual handler type registration.
1728 *
1729 * @returns New reference count. UINT32_MAX if invalid input (asserted).
[58122]1730 * @param pVM The cross context VM structure.
[55889]1731 * @param hType The type regiration handle.
1732 */
1733VMM_INT_DECL(uint32_t) PGMHandlerVirtualTypeRetain(PVM pVM, PGMVIRTHANDLERTYPE hType)
1734{
1735 return pgmHandlerVirtualTypeRetain(pVM, PGMVIRTHANDLERTYPEINT_FROM_HANDLE(pVM, hType));
1736}
1737
1738
1739/**
[14969]1740 * Check if particular guest's VA is being monitored.
1741 *
1742 * @returns true or false
[58122]1743 * @param pVM The cross context VM structure.
[14969]1744 * @param GCPtr Virtual address.
[15284]1745 * @remarks Will acquire the PGM lock.
[25647]1746 * @thread Any.
[14969]1747 */
[55889]1748VMM_INT_DECL(bool) PGMHandlerVirtualIsRegistered(PVM pVM, RTGCPTR GCPtr)
[14969]1749{
1750 pgmLock(pVM);
1751 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, GCPtr);
1752 pgmUnlock(pVM);
[6906]1753
[15284]1754 return pCur != NULL;
[14969]1755}
1756
[15284]1757
[6906]1758/**
[1]1759 * Search for virtual handler with matching physical address
1760 *
[55966]1761 * @returns Pointer to the virtual handler structure if found, otherwise NULL.
[58122]1762 * @param pVM The cross context VM structure.
[1]1763 * @param GCPhys GC physical address to search for.
1764 * @param piPage Where to store the pointer to the index of the cached physical page.
1765 */
[55966]1766PPGMVIRTHANDLER pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, unsigned *piPage)
[1]1767{
[13085]1768 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a);
[1]1769
[19806]1770 pgmLock(pVM);
[1]1771 PPGMPHYS2VIRTHANDLER pCur;
[13062]1772 pCur = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, GCPhys);
[1]1773 if (pCur)
1774 {
1775 /* found a match! */
[55966]1776 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
1777 *piPage = pCur - &pVirt->aPhysToVirt[0];
[19806]1778 pgmUnlock(pVM);
1779
[1]1780#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1781 AssertRelease(pCur->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD);
1782#endif
[55966]1783 LogFlow(("PHYS2VIRT: found match for %RGp -> %RGv *piPage=%#x\n", GCPhys, pVirt->Core.Key, *piPage));
[31123]1784 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a);
[55966]1785 return pVirt;
[1]1786 }
1787
[19806]1788 pgmUnlock(pVM);
[31123]1789 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a);
[55966]1790 return NULL;
[1]1791}
1792
1793
1794/**
1795 * Deal with aliases in phys2virt.
1796 *
[6927]1797 * As pointed out by the various todos, this currently only deals with
1798 * aliases where the two ranges match 100%.
1799 *
[58122]1800 * @param pVM The cross context VM structure.
[1]1801 * @param pPhys2Virt The node we failed insert.
1802 */
1803static void pgmHandlerVirtualInsertAliased(PVM pVM, PPGMPHYS2VIRTHANDLER pPhys2Virt)
1804{
1805 /*
1806 * First find the node which is conflicting with us.
1807 */
[33540]1808 /** @todo Deal with partial overlapping. (Unlikely situation, so I'm too lazy to do anything about it now.) */
[6927]1809 /** @todo check if the current head node covers the ground we do. This is highly unlikely
1810 * and I'm too lazy to implement this now as it will require sorting the list and stuff like that. */
[13062]1811 PPGMPHYS2VIRTHANDLER pHead = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
[6927]1812#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
[13824]1813 AssertReleaseMsg(pHead != pPhys2Virt, ("%RGp-%RGp offVirtHandler=%#RX32\n",
[6927]1814 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler));
1815#endif
1816 if (RT_UNLIKELY(!pHead || pHead->Core.KeyLast != pPhys2Virt->Core.KeyLast))
[1]1817 {
1818 /** @todo do something clever here... */
[13824]1819 LogRel(("pgmHandlerVirtualInsertAliased: %RGp-%RGp\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
[1]1820 pPhys2Virt->offNextAlias = 0;
1821 return;
1822 }
1823
1824 /*
1825 * Insert ourselves as the next node.
1826 */
1827 if (!(pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
1828 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IN_TREE;
1829 else
1830 {
1831 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pHead + (pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1832 pPhys2Virt->offNextAlias = ((intptr_t)pNext - (intptr_t)pPhys2Virt)
1833 | PGMPHYS2VIRTHANDLER_IN_TREE;
1834 }
1835 pHead->offNextAlias = ((intptr_t)pPhys2Virt - (intptr_t)pHead)
1836 | (pHead->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
[13824]1837 Log(("pgmHandlerVirtualInsertAliased: %RGp-%RGp offNextAlias=%#RX32\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
[1]1838}
1839
1840
1841/**
1842 * Resets one virtual handler range.
1843 *
[6927]1844 * This is called by HandlerVirtualUpdate when it has detected some kind of
1845 * problem and have started clearing the virtual handler page states (or
1846 * when there have been registration/deregistrations). For this reason this
1847 * function will only update the page status if it's lower than desired.
1848 *
[1]1849 * @returns 0
1850 * @param pNode Pointer to a PGMVIRTHANDLER.
[41783]1851 * @param pvUser Pointer to the VM.
[1]1852 */
1853DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
1854{
1855 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
1856 PVM pVM = (PVM)pvUser;
1857
[37354]1858 PGM_LOCK_ASSERT_OWNER(pVM);
1859
[1]1860 /*
[6927]1861 * Iterate the pages and apply the new state.
[1]1862 */
[55889]1863 uint32_t uState = PGMVIRTANDLER_GET_TYPE(pVM, pCur)->uState;
[1]1864 PPGMRAMRANGE pRamHint = NULL;
[13046]1865 RTGCUINTPTR offPage = ((RTGCUINTPTR)pCur->Core.Key & PAGE_OFFSET_MASK);
[1]1866 RTGCUINTPTR cbLeft = pCur->cb;
1867 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
1868 {
1869 PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
1870 if (pPhys2Virt->Core.Key != NIL_RTGCPHYS)
1871 {
[6927]1872 /*
1873 * Update the page state wrt virtual handlers.
1874 */
1875 PPGMPAGE pPage;
[36891]1876 int rc = pgmPhysGetPageWithHintEx(pVM, pPhys2Virt->Core.Key, &pPage, &pRamHint);
[6927]1877 if ( RT_SUCCESS(rc)
1878 && PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < uState)
1879 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, uState);
1880 else
1881 AssertRC(rc);
[1]1882
[6927]1883 /*
1884 * Need to insert the page in the Phys2Virt lookup tree?
1885 */
[1]1886 if (pPhys2Virt->Core.KeyLast == NIL_RTGCPHYS)
1887 {
1888#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1889 AssertRelease(!pPhys2Virt->offNextAlias);
1890#endif
1891 unsigned cbPhys = cbLeft;
1892 if (cbPhys > PAGE_SIZE - offPage)
1893 cbPhys = PAGE_SIZE - offPage;
1894 else
1895 Assert(iPage == pCur->cPages - 1);
1896 pPhys2Virt->Core.KeyLast = pPhys2Virt->Core.Key + cbPhys - 1; /* inclusive */
1897 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IS_HEAD | PGMPHYS2VIRTHANDLER_IN_TREE;
[13062]1898 if (!RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, &pPhys2Virt->Core))
[1]1899 pgmHandlerVirtualInsertAliased(pVM, pPhys2Virt);
1900#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1901 else
[13062]1902 AssertReleaseMsg(RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key) == &pPhys2Virt->Core,
[13824]1903 ("%RGp-%RGp offNextAlias=%#RX32\n",
[1]1904 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
1905#endif
[13824]1906 Log2(("PHYS2VIRT: Insert physical range %RGp-%RGp offNextAlias=%#RX32 %s\n",
[1]1907 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
1908 }
1909 }
1910 cbLeft -= PAGE_SIZE - offPage;
1911 offPage = 0;
1912 }
1913
1914 return 0;
1915}
1916
[56384]1917# if defined(VBOX_STRICT) || defined(LOG_ENABLED)
[6906]1918
1919/**
1920 * Worker for pgmHandlerVirtualDumpPhysPages.
1921 *
1922 * @returns 0 (continue enumeration).
1923 * @param pNode The virtual handler node.
1924 * @param pvUser User argument, unused.
1925 */
1926static DECLCALLBACK(int) pgmHandlerVirtualDumpPhysPagesCallback(PAVLROGCPHYSNODECORE pNode, void *pvUser)
1927{
1928 PPGMPHYS2VIRTHANDLER pCur = (PPGMPHYS2VIRTHANDLER)pNode;
1929 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
[39078]1930 NOREF(pvUser); NOREF(pVirt);
1931
[13824]1932 Log(("PHYS2VIRT: Range %RGp-%RGp for virtual handler: %s\n", pCur->Core.Key, pCur->Core.KeyLast, pVirt->pszDesc));
[6906]1933 return 0;
1934}
1935
1936
1937/**
1938 * Assertion / logging helper for dumping all the
1939 * virtual handlers to the log.
1940 *
[58122]1941 * @param pVM The cross context VM structure.
[6906]1942 */
1943void pgmHandlerVirtualDumpPhysPages(PVM pVM)
1944{
[13062]1945 RTAvlroGCPhysDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, true /* from left */,
[6906]1946 pgmHandlerVirtualDumpPhysPagesCallback, 0);
1947}
[13232]1948
[56384]1949# endif /* VBOX_STRICT || LOG_ENABLED */
1950#endif /* VBOX_WITH_RAW_MODE */
[6906]1951#ifdef VBOX_STRICT
1952
1953/**
1954 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
1955 * and its AVL enumerators.
1956 */
1957typedef struct PGMAHAFIS
1958{
[6927]1959 /** The current physical address. */
1960 RTGCPHYS GCPhys;
1961 /** The state we've calculated. */
1962 unsigned uVirtStateFound;
1963 /** The state we're matching up to. */
1964 unsigned uVirtState;
1965 /** Number of errors. */
1966 unsigned cErrors;
[41783]1967 /** Pointer to the VM. */
[6906]1968 PVM pVM;
1969} PGMAHAFIS, *PPGMAHAFIS;
1970
[56384]1971# ifdef VBOX_WITH_RAW_MODE
[6927]1972
[56384]1973# if 0 /* unused */
[6906]1974/**
1975 * Verify virtual handler by matching physical address.
1976 *
1977 * @returns 0
1978 * @param pNode Pointer to a PGMVIRTHANDLER.
1979 * @param pvUser Pointer to user parameter.
1980 */
[6927]1981static DECLCALLBACK(int) pgmHandlerVirtualVerifyOneByPhysAddr(PAVLROGCPTRNODECORE pNode, void *pvUser)
[6906]1982{
1983 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
1984 PPGMAHAFIS pState = (PPGMAHAFIS)pvUser;
1985
1986 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
1987 {
[32036]1988 if ((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == pState->GCPhys)
[6906]1989 {
[6927]1990 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1991 if (pState->uVirtState < uState)
[6906]1992 {
[6927]1993 error
[6906]1994 }
[6927]1995
1996 if (pState->uVirtState == uState)
1997 break; //??
[6906]1998 }
1999 }
2000 return 0;
2001}
[56384]2002# endif /* unused */
[6906]2003
2004
2005/**
[6927]2006 * Verify a virtual handler (enumeration callback).
[6906]2007 *
[6927]2008 * Called by PGMAssertHandlerAndFlagsInSync to check the sanity of all
2009 * the virtual handlers, esp. that the physical addresses matches up.
2010 *
[6906]2011 * @returns 0
2012 * @param pNode Pointer to a PGMVIRTHANDLER.
[6927]2013 * @param pvUser Pointer to a PPGMAHAFIS structure.
[6906]2014 */
[6927]2015static DECLCALLBACK(int) pgmHandlerVirtualVerifyOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
[6906]2016{
[55889]2017 PPGMAHAFIS pState = (PPGMAHAFIS)pvUser;
2018 PVM pVM = pState->pVM;
2019 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)pNode;
2020 PPGMVIRTHANDLERTYPEINT pType = PGMVIRTANDLER_GET_TYPE(pVM, pVirt);
[6906]2021
2022 /*
[6927]2023 * Validate the type and calc state.
[6906]2024 */
[55889]2025 switch (pType->enmKind)
[6906]2026 {
[55889]2027 case PGMVIRTHANDLERKIND_WRITE:
2028 case PGMVIRTHANDLERKIND_ALL:
[6927]2029 break;
[6906]2030 default:
[55889]2031 AssertMsgFailed(("unknown/wrong enmKind=%d\n", pType->enmKind));
[6927]2032 pState->cErrors++;
[6906]2033 return 0;
2034 }
[55889]2035 const uint32_t uState = pType->uState;
[6906]2036
2037 /*
[6927]2038 * Check key alignment.
[6906]2039 */
[13046]2040 if ( (pVirt->aPhysToVirt[0].Core.Key & PAGE_OFFSET_MASK) != ((RTGCUINTPTR)pVirt->Core.Key & PAGE_OFFSET_MASK)
[6927]2041 && pVirt->aPhysToVirt[0].Core.Key != NIL_RTGCPHYS)
2042 {
[13046]2043 AssertMsgFailed(("virt handler phys has incorrect key! %RGp %RGv %s\n",
2044 pVirt->aPhysToVirt[0].Core.Key, pVirt->Core.Key, R3STRING(pVirt->pszDesc)));
[6927]2045 pState->cErrors++;
2046 }
2047
[13046]2048 if ( (pVirt->aPhysToVirt[pVirt->cPages - 1].Core.KeyLast & PAGE_OFFSET_MASK) != ((RTGCUINTPTR)pVirt->Core.KeyLast & PAGE_OFFSET_MASK)
[6927]2049 && pVirt->aPhysToVirt[pVirt->cPages - 1].Core.Key != NIL_RTGCPHYS)
2050 {
[13046]2051 AssertMsgFailed(("virt handler phys has incorrect key! %RGp %RGv %s\n",
2052 pVirt->aPhysToVirt[pVirt->cPages - 1].Core.KeyLast, pVirt->Core.KeyLast, R3STRING(pVirt->pszDesc)));
[6927]2053 pState->cErrors++;
2054 }
2055
2056 /*
2057 * Check pages for sanity and state.
2058 */
[13046]2059 RTGCUINTPTR GCPtr = (RTGCUINTPTR)pVirt->Core.Key;
[6906]2060 for (unsigned iPage = 0; iPage < pVirt->cPages; iPage++, GCPtr += PAGE_SIZE)
2061 {
[22890]2062 for (VMCPUID i = 0; i < pVM->cCpus; i++)
[6906]2063 {
[18927]2064 PVMCPU pVCpu = &pVM->aCpus[i];
2065
2066 RTGCPHYS GCPhysGst;
2067 uint64_t fGst;
[18988]2068 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, &fGst, &GCPhysGst);
[18927]2069 if ( rc == VERR_PAGE_NOT_PRESENT
2070 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
[6906]2071 {
[18927]2072 if (pVirt->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
2073 {
2074 AssertMsgFailed(("virt handler phys out of sync. %RGp GCPhysNew=~0 iPage=%#x %RGv %s\n",
2075 pVirt->aPhysToVirt[iPage].Core.Key, iPage, GCPtr, R3STRING(pVirt->pszDesc)));
2076 pState->cErrors++;
2077 }
2078 continue;
2079 }
2080
2081 AssertRCReturn(rc, 0);
[32036]2082 if ((pVirt->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) != GCPhysGst)
[18927]2083 {
2084 AssertMsgFailed(("virt handler phys out of sync. %RGp GCPhysGst=%RGp iPage=%#x %RGv %s\n",
2085 pVirt->aPhysToVirt[iPage].Core.Key, GCPhysGst, iPage, GCPtr, R3STRING(pVirt->pszDesc)));
[6906]2086 pState->cErrors++;
[18927]2087 continue;
[6906]2088 }
2089
[36891]2090 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysGst);
[18927]2091 if (!pPage)
2092 {
2093 AssertMsgFailed(("virt handler getting ram flags. GCPhysGst=%RGp iPage=%#x %RGv %s\n",
2094 GCPhysGst, iPage, GCPtr, R3STRING(pVirt->pszDesc)));
2095 pState->cErrors++;
2096 continue;
2097 }
[6906]2098
[18927]2099 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < uState)
2100 {
2101 AssertMsgFailed(("virt handler state mismatch. pPage=%R[pgmpage] GCPhysGst=%RGp iPage=%#x %RGv state=%d expected>=%d %s\n",
2102 pPage, GCPhysGst, iPage, GCPtr, PGM_PAGE_GET_HNDL_VIRT_STATE(pPage), uState, R3STRING(pVirt->pszDesc)));
2103 pState->cErrors++;
2104 continue;
2105 }
2106 } /* for each VCPU */
[6906]2107 } /* for pages in virtual mapping. */
2108
2109 return 0;
2110}
2111
[56384]2112# endif /* VBOX_WITH_RAW_MODE */
[6906]2113
2114/**
2115 * Asserts that the handlers+guest-page-tables == ramrange-flags and
2116 * that the physical addresses associated with virtual handlers are correct.
2117 *
2118 * @returns Number of mismatches.
[58122]2119 * @param pVM The cross context VM structure.
[6906]2120 */
[12989]2121VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVM pVM)
[6906]2122{
2123 PPGM pPGM = &pVM->pgm.s;
2124 PGMAHAFIS State;
[6927]2125 State.GCPhys = 0;
2126 State.uVirtState = 0;
2127 State.uVirtStateFound = 0;
[6906]2128 State.cErrors = 0;
[6927]2129 State.pVM = pVM;
[6906]2130
[37354]2131 PGM_LOCK_ASSERT_OWNER(pVM);
[20767]2132
[6906]2133 /*
2134 * Check the RAM flags against the handlers.
2135 */
[36891]2136 for (PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
[6906]2137 {
[36891]2138 const uint32_t cPages = pRam->cb >> PAGE_SHIFT;
2139 for (uint32_t iPage = 0; iPage < cPages; iPage++)
[6906]2140 {
2141 PGMPAGE const *pPage = &pRam->aPages[iPage];
[7642]2142 if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
[6906]2143 {
2144 State.GCPhys = pRam->GCPhys + (iPage << PAGE_SHIFT);
2145
2146 /*
2147 * Physical first - calculate the state based on the handlers
2148 * active on the page, then compare.
2149 */
[7642]2150 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
[6906]2151 {
2152 /* the first */
[13062]2153 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, State.GCPhys);
[6906]2154 if (!pPhys)
2155 {
[13062]2156 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, State.GCPhys, true);
[6906]2157 if ( pPhys
2158 && pPhys->Core.Key > (State.GCPhys + PAGE_SIZE - 1))
2159 pPhys = NULL;
2160 Assert(!pPhys || pPhys->Core.Key >= State.GCPhys);
2161 }
2162 if (pPhys)
2163 {
[55493]2164 PPGMPHYSHANDLERTYPEINT pPhysType = (PPGMPHYSHANDLERTYPEINT)MMHyperHeapOffsetToPtr(pVM, pPhys->hType);
2165 unsigned uState = pPhysType->uState;
[6906]2166
2167 /* more? */
2168 while (pPhys->Core.KeyLast < (State.GCPhys | PAGE_OFFSET_MASK))
2169 {
[13062]2170 PPGMPHYSHANDLER pPhys2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers,
[6906]2171 pPhys->Core.KeyLast + 1, true);
2172 if ( !pPhys2
2173 || pPhys2->Core.Key > (State.GCPhys | PAGE_OFFSET_MASK))
2174 break;
[55493]2175 PPGMPHYSHANDLERTYPEINT pPhysType2 = (PPGMPHYSHANDLERTYPEINT)MMHyperHeapOffsetToPtr(pVM, pPhys2->hType);
2176 uState = RT_MAX(uState, pPhysType2->uState);
[6906]2177 pPhys = pPhys2;
2178 }
2179
2180 /* compare.*/
2181 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != uState
2182 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
2183 {
2184 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%RGp state=%d expected=%d %s\n",
[55493]2185 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), uState, pPhysType->pszDesc));
[6906]2186 State.cErrors++;
2187 }
2188
[56384]2189# ifdef VBOX_WITH_REM
2190# ifdef IN_RING3
[6906]2191 /* validate that REM is handling it. */
2192 if ( !REMR3IsPageAccessHandled(pVM, State.GCPhys)
[17372]2193 /* ignore shadowed ROM for the time being. */
[18666]2194 && PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW)
[6906]2195 {
2196 AssertMsgFailed(("ram range vs phys handler REM mismatch. GCPhys=%RGp state=%d %s\n",
[55493]2197 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), pPhysType->pszDesc));
[6906]2198 State.cErrors++;
2199 }
[56384]2200# endif
[40274]2201# endif
[6906]2202 }
2203 else
2204 {
2205 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%RGp\n", State.GCPhys));
2206 State.cErrors++;
2207 }
2208 }
2209
[6927]2210 /*
2211 * Virtual handlers.
2212 */
[7642]2213 if (PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage))
[6906]2214 {
[6927]2215 State.uVirtState = PGM_PAGE_GET_HNDL_VIRT_STATE(pPage);
[56384]2216
[6927]2217 /* locate all the matching physical ranges. */
2218 State.uVirtStateFound = PGM_PAGE_HNDL_VIRT_STATE_NONE;
[56384]2219# ifdef VBOX_WITH_RAW_MODE
[6927]2220 RTGCPHYS GCPhysKey = State.GCPhys;
2221 for (;;)
[6906]2222 {
[13062]2223 PPGMPHYS2VIRTHANDLER pPhys2Virt = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
[6927]2224 GCPhysKey, true /* above-or-equal */);
2225 if ( !pPhys2Virt
[32036]2226 || (pPhys2Virt->Core.Key & X86_PTE_PAE_PG_MASK) != State.GCPhys)
[6927]2227 break;
2228
2229 /* the head */
2230 GCPhysKey = pPhys2Virt->Core.KeyLast;
2231 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)((uintptr_t)pPhys2Virt + pPhys2Virt->offVirtHandler);
[55889]2232 unsigned uState = PGMVIRTANDLER_GET_TYPE(pVM, pCur)->uState;
[6927]2233 State.uVirtStateFound = RT_MAX(State.uVirtStateFound, uState);
2234
2235 /* any aliases */
2236 while (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
2237 {
2238 pPhys2Virt = (PPGMPHYS2VIRTHANDLER)((uintptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
2239 pCur = (PPGMVIRTHANDLER)((uintptr_t)pPhys2Virt + pPhys2Virt->offVirtHandler);
[55889]2240 uState = PGMVIRTANDLER_GET_TYPE(pVM, pCur)->uState;
[6927]2241 State.uVirtStateFound = RT_MAX(State.uVirtStateFound, uState);
2242 }
2243
2244 /* done? */
[32036]2245 if ((GCPhysKey & X86_PTE_PAE_PG_MASK) != State.GCPhys)
[6927]2246 break;
2247 }
[56384]2248# endif /* VBOX_WITH_RAW_MODE */
[6927]2249 if (State.uVirtState != State.uVirtStateFound)
2250 {
2251 AssertMsgFailed(("ram range vs virt handler flags mismatch. GCPhys=%RGp uVirtState=%#x uVirtStateFound=%#x\n",
2252 State.GCPhys, State.uVirtState, State.uVirtStateFound));
[6906]2253 State.cErrors++;
2254 }
2255 }
2256 }
2257 } /* foreach page in ram range. */
2258 } /* foreach ram range. */
2259
[56384]2260# ifdef VBOX_WITH_RAW_MODE
[6906]2261 /*
[6927]2262 * Check that the physical addresses of the virtual handlers matches up
2263 * and that they are otherwise sane.
[6906]2264 */
[13062]2265 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualVerifyOne, &State);
[56384]2266# endif
[6906]2267
2268 /*
2269 * Do the reverse check for physical handlers.
2270 */
2271 /** @todo */
2272
2273 return State.cErrors;
2274}
2275
2276#endif /* VBOX_STRICT */
2277
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use