VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 25647

Last change on this file since 25647 was 25647, checked in by vboxsync, 15 years ago

Some more doxygen fixes, now for Core.docs.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 67.7 KB
Line 
1/* $Id: PGMAllHandler.cpp 25647 2010-01-05 09:59:19Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/dbgf.h>
28#include <VBox/pgm.h>
29#include <VBox/iom.h>
30#include <VBox/mm.h>
31#include <VBox/em.h>
32#include <VBox/stam.h>
33#include <VBox/rem.h>
34#include <VBox/dbgf.h>
35#include <VBox/rem.h>
36#include "PGMInternal.h"
37#include <VBox/vm.h>
38
39#include <VBox/log.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45#include <VBox/selm.h>
46
47
48/*******************************************************************************
49* Internal Functions *
50*******************************************************************************/
51static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam);
52static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur);
53static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur);
54
55
56
57/**
58 * Register a access handler for a physical range.
59 *
60 * @returns VBox status code.
61 * @retval VINF_SUCCESS when successfully installed.
62 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
63 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
64 * flagged together with a pool clearing.
65 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
66 * one. A debug assertion is raised.
67 *
68 * @param pVM VM Handle.
69 * @param enmType Handler type. Any of the PGMPHYSHANDLERTYPE_PHYSICAL* enums.
70 * @param GCPhys Start physical address.
71 * @param GCPhysLast Last physical address. (inclusive)
72 * @param pfnHandlerR3 The R3 handler.
73 * @param pvUserR3 User argument to the R3 handler.
74 * @param pfnHandlerR0 The R0 handler.
75 * @param pvUserR0 User argument to the R0 handler.
76 * @param pfnHandlerRC The RC handler.
77 * @param pvUserRC User argument to the RC handler. This can be a value
78 * less that 0x10000 or a (non-null) pointer that is
79 * automatically relocatated.
80 * @param pszDesc Pointer to description string. This must not be freed.
81 */
82VMMDECL(int) PGMHandlerPhysicalRegisterEx(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
83 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
84 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
85 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
86 R3PTRTYPE(const char *) pszDesc)
87{
88 Log(("PGMHandlerPhysicalRegisterEx: enmType=%d GCPhys=%RGp GCPhysLast=%RGp pfnHandlerR3=%RHv pvUserR3=%RHv pfnHandlerR0=%RHv pvUserR0=%RHv pfnHandlerGC=%RRv pvUserGC=%RRv pszDesc=%s\n",
89 enmType, GCPhys, GCPhysLast, pfnHandlerR3, pvUserR3, pfnHandlerR0, pvUserR0, pfnHandlerRC, pvUserRC, R3STRING(pszDesc)));
90
91 /*
92 * Validate input.
93 */
94 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);
95 switch (enmType)
96 {
97 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
98 break;
99 case PGMPHYSHANDLERTYPE_MMIO:
100 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
101 /* Simplification in PGMPhysRead among other places. */
102 AssertMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
103 AssertMsgReturn((GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);
104 break;
105 default:
106 AssertMsgFailed(("Invalid input enmType=%d!\n", enmType));
107 return VERR_INVALID_PARAMETER;
108 }
109 AssertMsgReturn( (RTRCUINTPTR)pvUserRC < 0x10000
110 || MMHyperR3ToRC(pVM, MMHyperRCToR3(pVM, pvUserRC)) == pvUserRC,
111 ("Not RC pointer! pvUserRC=%RRv\n", pvUserRC),
112 VERR_INVALID_PARAMETER);
113 AssertMsgReturn( (RTR0UINTPTR)pvUserR0 < 0x10000
114 || MMHyperR3ToR0(pVM, MMHyperR0ToR3(pVM, pvUserR0)) == pvUserR0,
115 ("Not R0 pointer! pvUserR0=%RHv\n", pvUserR0),
116 VERR_INVALID_PARAMETER);
117 AssertPtrReturn(pfnHandlerR3, VERR_INVALID_POINTER);
118 AssertReturn(pfnHandlerR0, VERR_INVALID_PARAMETER);
119 AssertReturn(pfnHandlerRC, VERR_INVALID_PARAMETER);
120
121 /*
122 * We require the range to be within registered ram.
123 * There is no apparent need to support ranges which cover more than one ram range.
124 */
125 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
126 while (pRam && GCPhys > pRam->GCPhysLast)
127 pRam = pRam->CTX_SUFF(pNext);
128 if ( !pRam
129 || GCPhysLast < pRam->GCPhys
130 || GCPhys > pRam->GCPhysLast)
131 {
132#ifdef IN_RING3
133 DBGFR3Info(pVM, "phys", NULL, NULL);
134#endif
135 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
136 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
137 }
138
139 /*
140 * Allocate and initialize the new entry.
141 */
142 PPGMPHYSHANDLER pNew;
143 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
144 if (RT_FAILURE(rc))
145 return rc;
146
147 pNew->Core.Key = GCPhys;
148 pNew->Core.KeyLast = GCPhysLast;
149 pNew->enmType = enmType;
150 pNew->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
151 pNew->pfnHandlerR3 = pfnHandlerR3;
152 pNew->pvUserR3 = pvUserR3;
153 pNew->pfnHandlerR0 = pfnHandlerR0;
154 pNew->pvUserR0 = pvUserR0;
155 pNew->pfnHandlerRC = pfnHandlerRC;
156 pNew->pvUserRC = pvUserRC;
157 pNew->pszDesc = pszDesc;
158
159 pgmLock(pVM);
160
161 /*
162 * Try insert into list.
163 */
164 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core))
165 {
166 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pNew, pRam);
167 if (rc == VINF_PGM_SYNC_CR3)
168 rc = VINF_PGM_GCPHYS_ALIASED;
169 pgmUnlock(pVM);
170#ifndef IN_RING3
171 REMNotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
172#else
173 REMR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
174#endif
175 if (rc != VINF_SUCCESS)
176 Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast));
177 return rc;
178 }
179
180 pgmUnlock(pVM);
181
182#if defined(IN_RING3) && defined(VBOX_STRICT)
183 DBGFR3Info(pVM, "handlers", "phys nostats", NULL);
184#endif
185 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s\n", GCPhys, GCPhysLast, pszDesc));
186 MMHyperFree(pVM, pNew);
187 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
188}
189
190
191/**
192 * Sets ram range flags and attempts updating shadow PTs.
193 *
194 * @returns VBox status code.
195 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
196 * @retval VINF_PGM_SYNC_CR3 when the shadow PTs could be updated because
197 * the guest page aliased or/and mapped by multiple PTs. FFs set.
198 * @param pVM The VM handle.
199 * @param pCur The physical handler.
200 * @param pRam The RAM range.
201 */
202static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam)
203{
204 /*
205 * Iterate the guest ram pages updating the flags and flushing PT entries
206 * mapping the page.
207 */
208 bool fFlushTLBs = false;
209 int rc = VINF_SUCCESS;
210 const unsigned uState = pgmHandlerPhysicalCalcState(pCur);
211 uint32_t cPages = pCur->cPages;
212 uint32_t i = (pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT;
213 for (;;)
214 {
215 PPGMPAGE pPage = &pRam->aPages[i];
216 AssertMsg(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO || PGM_PAGE_IS_MMIO(pPage),
217 ("%RGp %R[pgmpage]\n", pRam->GCPhys + (i << PAGE_SHIFT), pPage));
218
219 /* Only do upgrades. */
220 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
221 {
222 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
223
224 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, pPage, false /* allow updates of PTEs (instead of flushing) */, &fFlushTLBs);
225 if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
226 rc = rc2;
227 }
228
229 /* next */
230 if (--cPages == 0)
231 break;
232 i++;
233 }
234
235 if (fFlushTLBs && rc == VINF_SUCCESS)
236 {
237 PGM_INVL_ALL_VCPU_TLBS(pVM);
238 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs\n"));
239 }
240 else
241 {
242 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc\n", rc));
243 }
244 return rc;
245}
246
247
248/**
249 * Register a physical page access handler.
250 *
251 * @returns VBox status code.
252 * @param pVM VM Handle.
253 * @param GCPhys Start physical address.
254 */
255VMMDECL(int) PGMHandlerPhysicalDeregister(PVM pVM, RTGCPHYS GCPhys)
256{
257 /*
258 * Find the handler.
259 */
260 pgmLock(pVM);
261 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
262 if (pCur)
263 {
264 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n",
265 pCur->Core.Key, pCur->Core.KeyLast, R3STRING(pCur->pszDesc)));
266
267 /*
268 * Clear the page bits and notify the REM about this change.
269 */
270 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
271 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
272 MMHyperFree(pVM, pCur);
273 pgmUnlock(pVM);
274 return VINF_SUCCESS;
275 }
276 pgmUnlock(pVM);
277
278 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
279 return VERR_PGM_HANDLER_NOT_FOUND;
280}
281
282
283/**
284 * Shared code with modify.
285 */
286static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur)
287{
288 RTGCPHYS GCPhysStart = pCur->Core.Key;
289 RTGCPHYS GCPhysLast = pCur->Core.KeyLast;
290
291 /*
292 * Page align the range.
293 *
294 * Since we've reset (recalculated) the physical handler state of all pages
295 * we can make use of the page states to figure out whether a page should be
296 * included in the REM notification or not.
297 */
298 if ( (pCur->Core.Key & PAGE_OFFSET_MASK)
299 || ((pCur->Core.KeyLast + 1) & PAGE_OFFSET_MASK))
300 {
301 Assert(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO);
302
303 if (GCPhysStart & PAGE_OFFSET_MASK)
304 {
305 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysStart);
306 if ( pPage
307 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
308 {
309 RTGCPHYS GCPhys = (GCPhysStart + (PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
310 if ( GCPhys > GCPhysLast
311 || GCPhys < GCPhysStart)
312 return;
313 GCPhysStart = GCPhys;
314 }
315 else
316 GCPhysStart &= X86_PTE_PAE_PG_MASK;
317 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
318 }
319
320 if (GCPhysLast & PAGE_OFFSET_MASK)
321 {
322 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysLast);
323 if ( pPage
324 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
325 {
326 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
327 if ( GCPhys < GCPhysStart
328 || GCPhys > GCPhysLast)
329 return;
330 GCPhysLast = GCPhys;
331 }
332 else
333 GCPhysLast |= PAGE_OFFSET_MASK;
334 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
335 }
336 }
337
338 /*
339 * Tell REM.
340 */
341 const bool fRestoreAsRAM = pCur->pfnHandlerR3
342 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO; /** @todo this isn't entirely correct. */
343#ifndef IN_RING3
344 REMNotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
345#else
346 REMR3NotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
347#endif
348}
349
350
351/**
352 * pgmHandlerPhysicalResetRamFlags helper that checks for
353 * other handlers on edge pages.
354 */
355DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PPGM pPGM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
356{
357 /*
358 * Look for other handlers.
359 */
360 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
361 for (;;)
362 {
363 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, GCPhys, fAbove);
364 if ( !pCur
365 || ((fAbove ? pCur->Core.Key : pCur->Core.KeyLast) >> PAGE_SHIFT) != (GCPhys >> PAGE_SHIFT))
366 break;
367 unsigned uThisState = pgmHandlerPhysicalCalcState(pCur);
368 uState = RT_MAX(uState, uThisState);
369
370 /* next? */
371 RTGCPHYS GCPhysNext = fAbove
372 ? pCur->Core.KeyLast + 1
373 : pCur->Core.Key - 1;
374 if ((GCPhysNext >> PAGE_SHIFT) != (GCPhys >> PAGE_SHIFT))
375 break;
376 GCPhys = GCPhysNext;
377 }
378
379 /*
380 * Update if we found something that is a higher priority
381 * state than the current.
382 */
383 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
384 {
385 PPGMPAGE pPage;
386 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, ppRamHint);
387 if ( RT_SUCCESS(rc)
388 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
389 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
390 else
391 AssertRC(rc);
392 }
393}
394
395
396/**
397 * Resets an aliased page.
398 *
399 * @param pVM The VM.
400 * @param pPage The page.
401 * @param GCPhysPage The page address in case it comes in handy.
402 */
403void pgmHandlerPhysicalResetAliasedPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
404{
405 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO);
406 Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
407
408 /*
409 * Flush any shadow page table references *first*.
410 */
411 bool fFlushTLBs = false;
412 int rc = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
413 AssertLogRelRCReturnVoid(rc);
414# ifdef IN_RC
415 if (fFlushTLBs && rc != VINF_PGM_SYNC_CR3)
416 PGM_INVL_VCPU_TLBS(VMMGetCpu0(pVM));
417# else
418 HWACCMFlushTLBOnAllVCpus(pVM);
419# endif
420
421 /*
422 * Make it an MMIO/Zero page.
423 */
424 PGM_PAGE_SET_HCPHYS(pPage, pVM->pgm.s.HCPhysZeroPg);
425 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_MMIO);
426 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
427 PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID);
428 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL);
429
430 /* Flush its TLB entry. */
431 PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
432
433 NOREF(GCPhysPage);
434}
435
436
437/**
438 * Resets ram range flags.
439 *
440 * @returns VBox status code.
441 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
442 * @param pVM The VM handle.
443 * @param pCur The physical handler.
444 *
445 * @remark We don't start messing with the shadow page tables, as we've already got code
446 * in Trap0e which deals with out of sync handler flags (originally conceived for
447 * global pages).
448 */
449static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur)
450{
451 /*
452 * Iterate the guest ram pages updating the state.
453 */
454 RTUINT cPages = pCur->cPages;
455 RTGCPHYS GCPhys = pCur->Core.Key;
456 PPGMRAMRANGE pRamHint = NULL;
457 PPGM pPGM = &pVM->pgm.s;
458 for (;;)
459 {
460 PPGMPAGE pPage;
461 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, &pRamHint);
462 if (RT_SUCCESS(rc))
463 {
464 /* Reset MMIO2 for MMIO pages to MMIO, since this aliasing is our business.
465 (We don't flip MMIO to RAM though, that's PGMPhys.cpp's job.) */
466 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
467 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhys);
468 AssertMsg(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO || PGM_PAGE_IS_MMIO(pPage), ("%RGp %R[pgmpage]\n", GCPhys, pPage));
469 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE);
470 }
471 else
472 AssertRC(rc);
473
474 /* next */
475 if (--cPages == 0)
476 break;
477 GCPhys += PAGE_SIZE;
478 }
479
480 /*
481 * Check for partial start and end pages.
482 */
483 if (pCur->Core.Key & PAGE_OFFSET_MASK)
484 pgmHandlerPhysicalRecalcPageState(pPGM, pCur->Core.Key - 1, false /* fAbove */, &pRamHint);
485 if ((pCur->Core.KeyLast & PAGE_OFFSET_MASK) != PAGE_SIZE - 1)
486 pgmHandlerPhysicalRecalcPageState(pPGM, pCur->Core.KeyLast + 1, true /* fAbove */, &pRamHint);
487}
488
489
490/**
491 * Modify a physical page access handler.
492 *
493 * Modification can only be done to the range it self, not the type or anything else.
494 *
495 * @returns VBox status code.
496 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
497 * and a new registration must be performed!
498 * @param pVM VM handle.
499 * @param GCPhysCurrent Current location.
500 * @param GCPhys New location.
501 * @param GCPhysLast New last location.
502 */
503VMMDECL(int) PGMHandlerPhysicalModify(PVM pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
504{
505 /*
506 * Remove it.
507 */
508 int rc;
509 pgmLock(pVM);
510 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysCurrent);
511 if (pCur)
512 {
513 /*
514 * Clear the ram flags. (We're gonna move or free it!)
515 */
516 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
517 const bool fRestoreAsRAM = pCur->pfnHandlerR3
518 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO; /** @todo this isn't entirely correct. */
519
520 /*
521 * Validate the new range, modify and reinsert.
522 */
523 if (GCPhysLast >= GCPhys)
524 {
525 /*
526 * We require the range to be within registered ram.
527 * There is no apparent need to support ranges which cover more than one ram range.
528 */
529 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
530 while (pRam && GCPhys > pRam->GCPhysLast)
531 pRam = pRam->CTX_SUFF(pNext);
532 if ( pRam
533 && GCPhys <= pRam->GCPhysLast
534 && GCPhysLast >= pRam->GCPhys)
535 {
536 pCur->Core.Key = GCPhys;
537 pCur->Core.KeyLast = GCPhysLast;
538 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> PAGE_SHIFT;
539
540 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pCur->Core))
541 {
542 PGMPHYSHANDLERTYPE enmType = pCur->enmType;
543 RTGCPHYS cb = GCPhysLast - GCPhys + 1;
544 bool fHasHCHandler = !!pCur->pfnHandlerR3;
545
546 /*
547 * Set ram flags, flush shadow PT entries and finally tell REM about this.
548 */
549 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
550 pgmUnlock(pVM);
551
552#ifndef IN_RING3
553 REMNotifyHandlerPhysicalModify(pVM, enmType, GCPhysCurrent, GCPhys, cb,
554 fHasHCHandler, fRestoreAsRAM);
555#else
556 REMR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysCurrent, GCPhys, cb,
557 fHasHCHandler, fRestoreAsRAM);
558#endif
559 PGM_INVL_ALL_VCPU_TLBS(pVM);
560 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%RGp -> GCPhys=%RGp GCPhysLast=%RGp\n",
561 GCPhysCurrent, GCPhys, GCPhysLast));
562 return VINF_SUCCESS;
563 }
564
565 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
566 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
567 }
568 else
569 {
570 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
571 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
572 }
573 }
574 else
575 {
576 AssertMsgFailed(("Invalid range %RGp-%RGp\n", GCPhys, GCPhysLast));
577 rc = VERR_INVALID_PARAMETER;
578 }
579
580 /*
581 * Invalid new location, free it.
582 * We've only gotta notify REM and free the memory.
583 */
584 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
585 MMHyperFree(pVM, pCur);
586 }
587 else
588 {
589 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhysCurrent));
590 rc = VERR_PGM_HANDLER_NOT_FOUND;
591 }
592
593 pgmUnlock(pVM);
594 return rc;
595}
596
597
598/**
599 * Changes the callbacks associated with a physical access handler.
600 *
601 * @returns VBox status code.
602 * @param pVM VM Handle.
603 * @param GCPhys Start physical address.
604 * @param pfnHandlerR3 The R3 handler.
605 * @param pvUserR3 User argument to the R3 handler.
606 * @param pfnHandlerR0 The R0 handler.
607 * @param pvUserR0 User argument to the R0 handler.
608 * @param pfnHandlerRC The RC handler.
609 * @param pvUserRC User argument to the RC handler. Values larger or
610 * equal to 0x10000 will be relocated automatically.
611 * @param pszDesc Pointer to description string. This must not be freed.
612 */
613VMMDECL(int) PGMHandlerPhysicalChangeCallbacks(PVM pVM, RTGCPHYS GCPhys,
614 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
615 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
616 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
617 R3PTRTYPE(const char *) pszDesc)
618{
619 /*
620 * Get the handler.
621 */
622 int rc = VINF_SUCCESS;
623 pgmLock(pVM);
624 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
625 if (pCur)
626 {
627 /*
628 * Change callbacks.
629 */
630 pCur->pfnHandlerR3 = pfnHandlerR3;
631 pCur->pvUserR3 = pvUserR3;
632 pCur->pfnHandlerR0 = pfnHandlerR0;
633 pCur->pvUserR0 = pvUserR0;
634 pCur->pfnHandlerRC = pfnHandlerRC;
635 pCur->pvUserRC = pvUserRC;
636 pCur->pszDesc = pszDesc;
637 }
638 else
639 {
640 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
641 rc = VERR_PGM_HANDLER_NOT_FOUND;
642 }
643
644 pgmUnlock(pVM);
645 return rc;
646}
647
648
649/**
650 * Splits a physical access handler in two.
651 *
652 * @returns VBox status code.
653 * @param pVM VM Handle.
654 * @param GCPhys Start physical address of the handler.
655 * @param GCPhysSplit The split address.
656 */
657VMMDECL(int) PGMHandlerPhysicalSplit(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
658{
659 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
660
661 /*
662 * Do the allocation without owning the lock.
663 */
664 PPGMPHYSHANDLER pNew;
665 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
666 if (RT_FAILURE(rc))
667 return rc;
668
669 /*
670 * Get the handler.
671 */
672 pgmLock(pVM);
673 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
674 if (RT_LIKELY(pCur))
675 {
676 if (RT_LIKELY(GCPhysSplit <= pCur->Core.KeyLast))
677 {
678 /*
679 * Create new handler node for the 2nd half.
680 */
681 *pNew = *pCur;
682 pNew->Core.Key = GCPhysSplit;
683 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
684
685 pCur->Core.KeyLast = GCPhysSplit - 1;
686 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
687
688 if (RT_LIKELY(RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)))
689 {
690 LogFlow(("PGMHandlerPhysicalSplit: %RGp-%RGp and %RGp-%RGp\n",
691 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
692 pgmUnlock(pVM);
693 return VINF_SUCCESS;
694 }
695 AssertMsgFailed(("whu?\n"));
696 rc = VERR_INTERNAL_ERROR;
697 }
698 else
699 {
700 AssertMsgFailed(("outside range: %RGp-%RGp split %RGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
701 rc = VERR_INVALID_PARAMETER;
702 }
703 }
704 else
705 {
706 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
707 rc = VERR_PGM_HANDLER_NOT_FOUND;
708 }
709 pgmUnlock(pVM);
710 MMHyperFree(pVM, pNew);
711 return rc;
712}
713
714
715/**
716 * Joins up two adjacent physical access handlers which has the same callbacks.
717 *
718 * @returns VBox status code.
719 * @param pVM VM Handle.
720 * @param GCPhys1 Start physical address of the first handler.
721 * @param GCPhys2 Start physical address of the second handler.
722 */
723VMMDECL(int) PGMHandlerPhysicalJoin(PVM pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
724{
725 /*
726 * Get the handlers.
727 */
728 int rc;
729 pgmLock(pVM);
730 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys1);
731 if (RT_LIKELY(pCur1))
732 {
733 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
734 if (RT_LIKELY(pCur2))
735 {
736 /*
737 * Make sure that they are adjacent, and that they've got the same callbacks.
738 */
739 if (RT_LIKELY(pCur1->Core.KeyLast + 1 == pCur2->Core.Key))
740 {
741 if (RT_LIKELY( pCur1->pfnHandlerRC == pCur2->pfnHandlerRC
742 && pCur1->pfnHandlerR0 == pCur2->pfnHandlerR0
743 && pCur1->pfnHandlerR3 == pCur2->pfnHandlerR3))
744 {
745 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
746 if (RT_LIKELY(pCur3 == pCur2))
747 {
748 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
749 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
750 LogFlow(("PGMHandlerPhysicalJoin: %RGp-%RGp %RGp-%RGp\n",
751 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
752 MMHyperFree(pVM, pCur2);
753 pgmUnlock(pVM);
754 return VINF_SUCCESS;
755 }
756
757 Assert(pCur3 == pCur2);
758 rc = VERR_INTERNAL_ERROR;
759 }
760 else
761 {
762 AssertMsgFailed(("mismatching handlers\n"));
763 rc = VERR_ACCESS_DENIED;
764 }
765 }
766 else
767 {
768 AssertMsgFailed(("not adjacent: %RGp-%RGp %RGp-%RGp\n",
769 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
770 rc = VERR_INVALID_PARAMETER;
771 }
772 }
773 else
774 {
775 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys2));
776 rc = VERR_PGM_HANDLER_NOT_FOUND;
777 }
778 }
779 else
780 {
781 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys1));
782 rc = VERR_PGM_HANDLER_NOT_FOUND;
783 }
784 pgmUnlock(pVM);
785 return rc;
786
787}
788
789
790/**
791 * Resets any modifications to individual pages in a physical
792 * page access handler region.
793 *
794 * This is used in pair with PGMHandlerPhysicalPageTempOff() or
795 * PGMHandlerPhysicalPageAlias().
796 *
797 * @returns VBox status code.
798 * @param pVM VM Handle
799 * @param GCPhys The start address of the handler regions, i.e. what you
800 * passed to PGMR3HandlerPhysicalRegister(),
801 * PGMHandlerPhysicalRegisterEx() or
802 * PGMHandlerPhysicalModify().
803 */
804VMMDECL(int) PGMHandlerPhysicalReset(PVM pVM, RTGCPHYS GCPhys)
805{
806 LogFlow(("PGMHandlerPhysicalReset GCPhys=%RGp\n", GCPhys));
807 pgmLock(pVM);
808
809 /*
810 * Find the handler.
811 */
812 int rc;
813 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
814 if (RT_LIKELY(pCur))
815 {
816 /*
817 * Validate type.
818 */
819 switch (pCur->enmType)
820 {
821 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
822 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
823 case PGMPHYSHANDLERTYPE_MMIO: /* NOTE: Only use when clearing MMIO ranges with aliased MMIO2 pages! */
824 {
825 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysHandlerReset)); /**@Todo move out of switch */
826 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
827 Assert(pRam);
828 Assert(pRam->GCPhys <= pCur->Core.Key);
829 Assert(pRam->GCPhysLast >= pCur->Core.KeyLast);
830
831 if (pCur->enmType == PGMPHYSHANDLERTYPE_MMIO)
832 {
833 /*
834 * Reset all the PGMPAGETYPE_MMIO2_ALIAS_MMIO pages first and that's it.
835 * This could probably be optimized a bit wrt to flushing, but I'm too lazy
836 * to do that now...
837 */
838 PPGMPAGE pPage = &pRam->aPages[(pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT];
839 uint32_t cLeft = pCur->cPages;
840 while (cLeft-- > 0)
841 {
842 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
843 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << PAGE_SHIFT));
844 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO);
845 pPage++;
846 }
847 }
848 else
849 {
850 /*
851 * Set the flags and flush shadow PT entries.
852 */
853 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
854 }
855
856 rc = VINF_SUCCESS;
857 break;
858 }
859
860 /*
861 * Invalid.
862 */
863 default:
864 AssertMsgFailed(("Invalid type %d! Corruption!\n", pCur->enmType));
865 rc = VERR_INTERNAL_ERROR;
866 break;
867 }
868 }
869 else
870 {
871 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
872 rc = VERR_PGM_HANDLER_NOT_FOUND;
873 }
874
875 pgmUnlock(pVM);
876 return rc;
877}
878
879
880/**
881 * Temporarily turns off the access monitoring of a page within a monitored
882 * physical write/all page access handler region.
883 *
884 * Use this when no further \#PFs are required for that page. Be aware that
885 * a page directory sync might reset the flags, and turn on access monitoring
886 * for the page.
887 *
888 * The caller must do required page table modifications.
889 *
890 * @returns VBox status code.
891 * @param pVM VM Handle
892 * @param GCPhys The start address of the access handler. This
893 * must be a fully page aligned range or we risk
894 * messing up other handlers installed for the
895 * start and end pages.
896 * @param GCPhysPage The physical address of the page to turn off
897 * access monitoring for.
898 */
899VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
900{
901 LogFlow(("PGMHandlerPhysicalPageTempOff GCPhys=%RGp\n", GCPhys));
902
903 pgmLock(pVM);
904 /*
905 * Validate the range.
906 */
907 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
908 if (RT_LIKELY(pCur))
909 {
910 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
911 && GCPhysPage <= pCur->Core.KeyLast))
912 {
913 Assert(!(pCur->Core.Key & PAGE_OFFSET_MASK));
914 Assert((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
915
916 AssertReturnStmt( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
917 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL,
918 pgmUnlock(pVM), VERR_ACCESS_DENIED);
919
920 /*
921 * Change the page status.
922 */
923 PPGMPAGE pPage;
924 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage);
925 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc);
926 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
927 pgmUnlock(pVM);
928 return VINF_SUCCESS;
929 }
930 pgmUnlock(pVM);
931 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
932 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
933 return VERR_INVALID_PARAMETER;
934 }
935 pgmUnlock(pVM);
936 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
937 return VERR_PGM_HANDLER_NOT_FOUND;
938}
939
940
941/**
942 * Replaces an MMIO page with an MMIO2 page.
943 *
944 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
945 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
946 * backing, the caller must provide a replacement page. For various reasons the
947 * replacement page must be an MMIO2 page.
948 *
949 * The caller must do required page table modifications. You can get away
950 * without making any modifations since it's an MMIO page, the cost is an extra
951 * \#PF which will the resync the page.
952 *
953 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
954 *
955 * The caller may still get handler callback even after this call and must be
956 * able to deal correctly with such calls. The reason for these callbacks are
957 * either that we're executing in the recompiler (which doesn't know about this
958 * arrangement) or that we've been restored from saved state (where we won't
959 * save the change).
960 *
961 * @returns VBox status code.
962 * @param pVM The VM handle
963 * @param GCPhys The start address of the access handler. This
964 * must be a fully page aligned range or we risk
965 * messing up other handlers installed for the
966 * start and end pages.
967 * @param GCPhysPage The physical address of the page to turn off
968 * access monitoring for.
969 * @param GCPhysPageRemap The physical address of the MMIO2 page that
970 * serves as backing memory.
971 *
972 * @remark May cause a page pool flush if used on a page that is already
973 * aliased.
974 *
975 * @note This trick does only work reliably if the two pages are never ever
976 * mapped in the same page table. If they are the page pool code will
977 * be confused should either of them be flushed. See the special case
978 * of zero page aliasing mentioned in #3170.
979 *
980 */
981VMMDECL(int) PGMHandlerPhysicalPageAlias(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTGCPHYS GCPhysPageRemap)
982{
983/// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
984
985 pgmLock(pVM);
986 /*
987 * Lookup and validate the range.
988 */
989 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
990 if (RT_LIKELY(pCur))
991 {
992 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
993 && GCPhysPage <= pCur->Core.KeyLast))
994 {
995 AssertReturnStmt(pCur->enmType == PGMPHYSHANDLERTYPE_MMIO, pgmUnlock(pVM), VERR_ACCESS_DENIED);
996 AssertReturnStmt(!(pCur->Core.Key & PAGE_OFFSET_MASK), pgmUnlock(pVM), VERR_INVALID_PARAMETER);
997 AssertReturnStmt((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, pgmUnlock(pVM), VERR_INVALID_PARAMETER);
998
999 /*
1000 * Get and validate the two pages.
1001 */
1002 PPGMPAGE pPageRemap;
1003 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPageRemap, &pPageRemap);
1004 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc);
1005 AssertMsgReturnStmt(PGM_PAGE_GET_TYPE(pPageRemap) == PGMPAGETYPE_MMIO2,
1006 ("GCPhysPageRemap=%RGp %R[pgmpage]\n", GCPhysPageRemap, pPageRemap),
1007 pgmUnlock(pVM), VERR_PGM_PHYS_NOT_MMIO2);
1008
1009 PPGMPAGE pPage;
1010 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage);
1011 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc);
1012 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1013 {
1014 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1015 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1016 VERR_PGM_PHYS_NOT_MMIO2);
1017 if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPageRemap))
1018 {
1019 pgmUnlock(pVM);
1020 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1021 }
1022
1023 /*
1024 * The page is already mapped as some other page, reset it
1025 * to an MMIO/ZERO page before doing the new mapping.
1026 */
1027 Log(("PGMHandlerPhysicalPageAlias: GCPhysPage=%RGp (%R[pgmpage]; %RHp -> %RHp\n",
1028 GCPhysPage, pPage, PGM_PAGE_GET_HCPHYS(pPage), PGM_PAGE_GET_HCPHYS(pPageRemap)));
1029 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage);
1030 }
1031 Assert(PGM_PAGE_IS_ZERO(pPage));
1032
1033 /*
1034 * Do the actual remapping here.
1035 * This page now serves as an alias for the backing memory specified.
1036 */
1037 LogFlow(("PGMHandlerPhysicalPageAlias: %RGp (%R[pgmpage]) alias for %RGp (%R[pgmpage])\n",
1038 GCPhysPage, pPage, GCPhysPageRemap, pPageRemap ));
1039 PGM_PAGE_SET_HCPHYS(pPage, PGM_PAGE_GET_HCPHYS(pPageRemap));
1040 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1041 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1042 PGM_PAGE_SET_PAGEID(pPage, PGM_PAGE_GET_PAGEID(pPageRemap));
1043 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1044
1045 /* Flush its TLB entry. */
1046 PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1047
1048 LogFlow(("PGMHandlerPhysicalPageAlias: => %R[pgmpage]\n", pPage));
1049 pgmUnlock(pVM);
1050 return VINF_SUCCESS;
1051 }
1052
1053 pgmUnlock(pVM);
1054 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
1055 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
1056 return VERR_INVALID_PARAMETER;
1057 }
1058
1059 pgmUnlock(pVM);
1060 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1061 return VERR_PGM_HANDLER_NOT_FOUND;
1062}
1063
1064/**
1065 * Replaces an MMIO page with an arbitrary HC page.
1066 *
1067 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
1068 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
1069 * backing, the caller must provide a replacement page. For various reasons the
1070 * replacement page must be an MMIO2 page.
1071 *
1072 * The caller must do required page table modifications. You can get away
1073 * without making any modifations since it's an MMIO page, the cost is an extra
1074 * \#PF which will the resync the page.
1075 *
1076 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1077 *
1078 * The caller may still get handler callback even after this call and must be
1079 * able to deal correctly with such calls. The reason for these callbacks are
1080 * either that we're executing in the recompiler (which doesn't know about this
1081 * arrangement) or that we've been restored from saved state (where we won't
1082 * save the change).
1083 *
1084 * @returns VBox status code.
1085 * @param pVM The VM handle
1086 * @param GCPhys The start address of the access handler. This
1087 * must be a fully page aligned range or we risk
1088 * messing up other handlers installed for the
1089 * start and end pages.
1090 * @param GCPhysPage The physical address of the page to turn off
1091 * access monitoring for.
1092 * @param HCPhysPageRemap The physical address of the HC page that
1093 * serves as backing memory.
1094 *
1095 * @remark May cause a page pool flush if used on a page that is already
1096 * aliased.
1097 */
1098VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap)
1099{
1100/// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
1101
1102 /*
1103 * Lookup and validate the range.
1104 */
1105 pgmLock(pVM);
1106 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1107 if (RT_LIKELY(pCur))
1108 {
1109 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
1110 && GCPhysPage <= pCur->Core.KeyLast))
1111 {
1112 AssertReturnStmt(pCur->enmType == PGMPHYSHANDLERTYPE_MMIO, pgmUnlock(pVM), VERR_ACCESS_DENIED);
1113 AssertReturnStmt(!(pCur->Core.Key & PAGE_OFFSET_MASK), pgmUnlock(pVM), VERR_INVALID_PARAMETER);
1114 AssertReturnStmt((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, pgmUnlock(pVM), VERR_INVALID_PARAMETER);
1115
1116 /*
1117 * Get and validate the pages.
1118 */
1119 PPGMPAGE pPage;
1120 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage);
1121 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc);
1122 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1123 {
1124 pgmUnlock(pVM);
1125 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1126 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1127 VERR_PGM_PHYS_NOT_MMIO2);
1128 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1129 }
1130 Assert(PGM_PAGE_IS_ZERO(pPage));
1131
1132 /*
1133 * Do the actual remapping here.
1134 * This page now serves as an alias for the backing memory specified.
1135 */
1136 LogFlow(("PGMHandlerPhysicalPageAlias: %RGp (%R[pgmpage]) alias for %RHp\n",
1137 GCPhysPage, pPage, HCPhysPageRemap));
1138 PGM_PAGE_SET_HCPHYS(pPage, HCPhysPageRemap);
1139 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1140 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1141 /** @todo hack alert
1142 * This needs to be done properly. Currently we get away with it as the recompiler directly calls
1143 * IOM read and write functions. Access through PGMPhysRead/Write will crash the process.
1144 */
1145 PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID);
1146 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1147
1148 /* Flush its TLB entry. */
1149 PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1150 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage));
1151 pgmUnlock(pVM);
1152 return VINF_SUCCESS;
1153 }
1154 pgmUnlock(pVM);
1155 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
1156 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
1157 return VERR_INVALID_PARAMETER;
1158 }
1159 pgmUnlock(pVM);
1160
1161 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1162 return VERR_PGM_HANDLER_NOT_FOUND;
1163}
1164
1165
1166/**
1167 * Checks if a physical range is handled
1168 *
1169 * @returns boolean
1170 * @param pVM VM Handle.
1171 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
1172 * @remarks Caller must take the PGM lock...
1173 * @thread EMT.
1174 */
1175VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVM pVM, RTGCPHYS GCPhys)
1176{
1177 /*
1178 * Find the handler.
1179 */
1180 pgmLock(pVM);
1181 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1182 if (pCur)
1183 {
1184 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
1185 Assert( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
1186 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL
1187 || pCur->enmType == PGMPHYSHANDLERTYPE_MMIO);
1188 pgmUnlock(pVM);
1189 return true;
1190 }
1191 pgmUnlock(pVM);
1192 return false;
1193}
1194
1195
1196/**
1197 * Checks if it's an disabled all access handler or write access handler at the
1198 * given address.
1199 *
1200 * @returns true if it's an all access handler, false if it's a write access
1201 * handler.
1202 * @param pVM Pointer to the shared VM structure.
1203 * @param GCPhys The address of the page with a disabled handler.
1204 *
1205 * @remarks The caller, PGMR3PhysTlbGCPhys2Ptr, must hold the PGM lock.
1206 */
1207bool pgmHandlerPhysicalIsAll(PVM pVM, RTGCPHYS GCPhys)
1208{
1209 pgmLock(pVM);
1210 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1211 if (!pCur)
1212 {
1213 pgmUnlock(pVM);
1214 AssertFailed();
1215 return true;
1216 }
1217 Assert( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
1218 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL
1219 || pCur->enmType == PGMPHYSHANDLERTYPE_MMIO); /* sanity */
1220 /* Only whole pages can be disabled. */
1221 Assert( pCur->Core.Key <= (GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK)
1222 && pCur->Core.KeyLast >= (GCPhys | PAGE_OFFSET_MASK));
1223
1224 bool bRet = pCur->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE;
1225 pgmUnlock(pVM);
1226 return bRet;
1227}
1228
1229
1230/**
1231 * Check if particular guest's VA is being monitored.
1232 *
1233 * @returns true or false
1234 * @param pVM VM handle.
1235 * @param GCPtr Virtual address.
1236 * @remarks Will acquire the PGM lock.
1237 * @thread Any.
1238 */
1239VMMDECL(bool) PGMHandlerVirtualIsRegistered(PVM pVM, RTGCPTR GCPtr)
1240{
1241 pgmLock(pVM);
1242 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, GCPtr);
1243 pgmUnlock(pVM);
1244
1245 return pCur != NULL;
1246}
1247
1248
1249/**
1250 * Search for virtual handler with matching physical address
1251 *
1252 * @returns VBox status code
1253 * @param pVM The VM handle.
1254 * @param GCPhys GC physical address to search for.
1255 * @param ppVirt Where to store the pointer to the virtual handler structure.
1256 * @param piPage Where to store the pointer to the index of the cached physical page.
1257 */
1258int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage)
1259{
1260 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a);
1261 Assert(ppVirt);
1262
1263 pgmLock(pVM);
1264 PPGMPHYS2VIRTHANDLER pCur;
1265 pCur = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, GCPhys);
1266 if (pCur)
1267 {
1268 /* found a match! */
1269 *ppVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
1270 *piPage = pCur - &(*ppVirt)->aPhysToVirt[0];
1271 pgmUnlock(pVM);
1272
1273#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1274 AssertRelease(pCur->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD);
1275#endif
1276 LogFlow(("PHYS2VIRT: found match for %RGp -> %RGv *piPage=%#x\n", GCPhys, (*ppVirt)->Core.Key, *piPage));
1277 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a);
1278 return VINF_SUCCESS;
1279 }
1280
1281 pgmUnlock(pVM);
1282 *ppVirt = NULL;
1283 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a);
1284 return VERR_PGM_HANDLER_NOT_FOUND;
1285}
1286
1287
1288/**
1289 * Deal with aliases in phys2virt.
1290 *
1291 * As pointed out by the various todos, this currently only deals with
1292 * aliases where the two ranges match 100%.
1293 *
1294 * @param pVM The VM handle.
1295 * @param pPhys2Virt The node we failed insert.
1296 */
1297static void pgmHandlerVirtualInsertAliased(PVM pVM, PPGMPHYS2VIRTHANDLER pPhys2Virt)
1298{
1299 /*
1300 * First find the node which is conflicting with us.
1301 */
1302 /** @todo Deal with partial overlapping. (Unlikly situation, so I'm too lazy to do anything about it now.) */
1303 /** @todo check if the current head node covers the ground we do. This is highly unlikely
1304 * and I'm too lazy to implement this now as it will require sorting the list and stuff like that. */
1305 PPGMPHYS2VIRTHANDLER pHead = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
1306#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1307 AssertReleaseMsg(pHead != pPhys2Virt, ("%RGp-%RGp offVirtHandler=%#RX32\n",
1308 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler));
1309#endif
1310 if (RT_UNLIKELY(!pHead || pHead->Core.KeyLast != pPhys2Virt->Core.KeyLast))
1311 {
1312 /** @todo do something clever here... */
1313 LogRel(("pgmHandlerVirtualInsertAliased: %RGp-%RGp\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
1314 pPhys2Virt->offNextAlias = 0;
1315 return;
1316 }
1317
1318 /*
1319 * Insert ourselves as the next node.
1320 */
1321 if (!(pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
1322 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IN_TREE;
1323 else
1324 {
1325 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pHead + (pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1326 pPhys2Virt->offNextAlias = ((intptr_t)pNext - (intptr_t)pPhys2Virt)
1327 | PGMPHYS2VIRTHANDLER_IN_TREE;
1328 }
1329 pHead->offNextAlias = ((intptr_t)pPhys2Virt - (intptr_t)pHead)
1330 | (pHead->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
1331 Log(("pgmHandlerVirtualInsertAliased: %RGp-%RGp offNextAlias=%#RX32\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
1332}
1333
1334
1335/**
1336 * Resets one virtual handler range.
1337 *
1338 * This is called by HandlerVirtualUpdate when it has detected some kind of
1339 * problem and have started clearing the virtual handler page states (or
1340 * when there have been registration/deregistrations). For this reason this
1341 * function will only update the page status if it's lower than desired.
1342 *
1343 * @returns 0
1344 * @param pNode Pointer to a PGMVIRTHANDLER.
1345 * @param pvUser The VM handle.
1346 */
1347DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
1348{
1349 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
1350 PVM pVM = (PVM)pvUser;
1351
1352 Assert(PGMIsLockOwner(pVM));
1353 /*
1354 * Iterate the pages and apply the new state.
1355 */
1356 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1357 PPGMRAMRANGE pRamHint = NULL;
1358 RTGCUINTPTR offPage = ((RTGCUINTPTR)pCur->Core.Key & PAGE_OFFSET_MASK);
1359 RTGCUINTPTR cbLeft = pCur->cb;
1360 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
1361 {
1362 PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
1363 if (pPhys2Virt->Core.Key != NIL_RTGCPHYS)
1364 {
1365 /*
1366 * Update the page state wrt virtual handlers.
1367 */
1368 PPGMPAGE pPage;
1369 int rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, pPhys2Virt->Core.Key, &pPage, &pRamHint);
1370 if ( RT_SUCCESS(rc)
1371 && PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < uState)
1372 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, uState);
1373 else
1374 AssertRC(rc);
1375
1376 /*
1377 * Need to insert the page in the Phys2Virt lookup tree?
1378 */
1379 if (pPhys2Virt->Core.KeyLast == NIL_RTGCPHYS)
1380 {
1381#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1382 AssertRelease(!pPhys2Virt->offNextAlias);
1383#endif
1384 unsigned cbPhys = cbLeft;
1385 if (cbPhys > PAGE_SIZE - offPage)
1386 cbPhys = PAGE_SIZE - offPage;
1387 else
1388 Assert(iPage == pCur->cPages - 1);
1389 pPhys2Virt->Core.KeyLast = pPhys2Virt->Core.Key + cbPhys - 1; /* inclusive */
1390 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IS_HEAD | PGMPHYS2VIRTHANDLER_IN_TREE;
1391 if (!RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, &pPhys2Virt->Core))
1392 pgmHandlerVirtualInsertAliased(pVM, pPhys2Virt);
1393#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1394 else
1395 AssertReleaseMsg(RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key) == &pPhys2Virt->Core,
1396 ("%RGp-%RGp offNextAlias=%#RX32\n",
1397 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
1398#endif
1399 Log2(("PHYS2VIRT: Insert physical range %RGp-%RGp offNextAlias=%#RX32 %s\n",
1400 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
1401 }
1402 }
1403 cbLeft -= PAGE_SIZE - offPage;
1404 offPage = 0;
1405 }
1406
1407 return 0;
1408}
1409
1410#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
1411
1412/**
1413 * Worker for pgmHandlerVirtualDumpPhysPages.
1414 *
1415 * @returns 0 (continue enumeration).
1416 * @param pNode The virtual handler node.
1417 * @param pvUser User argument, unused.
1418 */
1419static DECLCALLBACK(int) pgmHandlerVirtualDumpPhysPagesCallback(PAVLROGCPHYSNODECORE pNode, void *pvUser)
1420{
1421 PPGMPHYS2VIRTHANDLER pCur = (PPGMPHYS2VIRTHANDLER)pNode;
1422 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
1423 Log(("PHYS2VIRT: Range %RGp-%RGp for virtual handler: %s\n", pCur->Core.Key, pCur->Core.KeyLast, pVirt->pszDesc));
1424 return 0;
1425}
1426
1427
1428/**
1429 * Assertion / logging helper for dumping all the
1430 * virtual handlers to the log.
1431 *
1432 * @param pVM Pointer to the shared VM structure.
1433 */
1434void pgmHandlerVirtualDumpPhysPages(PVM pVM)
1435{
1436 RTAvlroGCPhysDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, true /* from left */,
1437 pgmHandlerVirtualDumpPhysPagesCallback, 0);
1438}
1439
1440#endif /* VBOX_STRICT || LOG_ENABLED */
1441#ifdef VBOX_STRICT
1442
1443/**
1444 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
1445 * and its AVL enumerators.
1446 */
1447typedef struct PGMAHAFIS
1448{
1449 /** The current physical address. */
1450 RTGCPHYS GCPhys;
1451 /** The state we've calculated. */
1452 unsigned uVirtStateFound;
1453 /** The state we're matching up to. */
1454 unsigned uVirtState;
1455 /** Number of errors. */
1456 unsigned cErrors;
1457 /** The VM handle. */
1458 PVM pVM;
1459} PGMAHAFIS, *PPGMAHAFIS;
1460
1461
1462#if 0 /* unused */
1463/**
1464 * Verify virtual handler by matching physical address.
1465 *
1466 * @returns 0
1467 * @param pNode Pointer to a PGMVIRTHANDLER.
1468 * @param pvUser Pointer to user parameter.
1469 */
1470static DECLCALLBACK(int) pgmHandlerVirtualVerifyOneByPhysAddr(PAVLROGCPTRNODECORE pNode, void *pvUser)
1471{
1472 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
1473 PPGMAHAFIS pState = (PPGMAHAFIS)pvUser;
1474
1475 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
1476 {
1477 if ((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == pState->GCPhys)
1478 {
1479 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1480 if (pState->uVirtState < uState)
1481 {
1482 error
1483 }
1484
1485 if (pState->uVirtState == uState)
1486 break; //??
1487 }
1488 }
1489 return 0;
1490}
1491#endif /* unused */
1492
1493
1494/**
1495 * Verify a virtual handler (enumeration callback).
1496 *
1497 * Called by PGMAssertHandlerAndFlagsInSync to check the sanity of all
1498 * the virtual handlers, esp. that the physical addresses matches up.
1499 *
1500 * @returns 0
1501 * @param pNode Pointer to a PGMVIRTHANDLER.
1502 * @param pvUser Pointer to a PPGMAHAFIS structure.
1503 */
1504static DECLCALLBACK(int) pgmHandlerVirtualVerifyOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
1505{
1506 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)pNode;
1507 PPGMAHAFIS pState = (PPGMAHAFIS)pvUser;
1508 PVM pVM = pState->pVM;
1509
1510 /*
1511 * Validate the type and calc state.
1512 */
1513 switch (pVirt->enmType)
1514 {
1515 case PGMVIRTHANDLERTYPE_WRITE:
1516 case PGMVIRTHANDLERTYPE_ALL:
1517 break;
1518 default:
1519 AssertMsgFailed(("unknown/wrong enmType=%d\n", pVirt->enmType));
1520 pState->cErrors++;
1521 return 0;
1522 }
1523 const unsigned uState = pgmHandlerVirtualCalcState(pVirt);
1524
1525 /*
1526 * Check key alignment.
1527 */
1528 if ( (pVirt->aPhysToVirt[0].Core.Key & PAGE_OFFSET_MASK) != ((RTGCUINTPTR)pVirt->Core.Key & PAGE_OFFSET_MASK)
1529 && pVirt->aPhysToVirt[0].Core.Key != NIL_RTGCPHYS)
1530 {
1531 AssertMsgFailed(("virt handler phys has incorrect key! %RGp %RGv %s\n",
1532 pVirt->aPhysToVirt[0].Core.Key, pVirt->Core.Key, R3STRING(pVirt->pszDesc)));
1533 pState->cErrors++;
1534 }
1535
1536 if ( (pVirt->aPhysToVirt[pVirt->cPages - 1].Core.KeyLast & PAGE_OFFSET_MASK) != ((RTGCUINTPTR)pVirt->Core.KeyLast & PAGE_OFFSET_MASK)
1537 && pVirt->aPhysToVirt[pVirt->cPages - 1].Core.Key != NIL_RTGCPHYS)
1538 {
1539 AssertMsgFailed(("virt handler phys has incorrect key! %RGp %RGv %s\n",
1540 pVirt->aPhysToVirt[pVirt->cPages - 1].Core.KeyLast, pVirt->Core.KeyLast, R3STRING(pVirt->pszDesc)));
1541 pState->cErrors++;
1542 }
1543
1544 /*
1545 * Check pages for sanity and state.
1546 */
1547 RTGCUINTPTR GCPtr = (RTGCUINTPTR)pVirt->Core.Key;
1548 for (unsigned iPage = 0; iPage < pVirt->cPages; iPage++, GCPtr += PAGE_SIZE)
1549 {
1550 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1551 {
1552 PVMCPU pVCpu = &pVM->aCpus[i];
1553
1554 RTGCPHYS GCPhysGst;
1555 uint64_t fGst;
1556 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, &fGst, &GCPhysGst);
1557 if ( rc == VERR_PAGE_NOT_PRESENT
1558 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1559 {
1560 if (pVirt->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
1561 {
1562 AssertMsgFailed(("virt handler phys out of sync. %RGp GCPhysNew=~0 iPage=%#x %RGv %s\n",
1563 pVirt->aPhysToVirt[iPage].Core.Key, iPage, GCPtr, R3STRING(pVirt->pszDesc)));
1564 pState->cErrors++;
1565 }
1566 continue;
1567 }
1568
1569 AssertRCReturn(rc, 0);
1570 if ((pVirt->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) != GCPhysGst)
1571 {
1572 AssertMsgFailed(("virt handler phys out of sync. %RGp GCPhysGst=%RGp iPage=%#x %RGv %s\n",
1573 pVirt->aPhysToVirt[iPage].Core.Key, GCPhysGst, iPage, GCPtr, R3STRING(pVirt->pszDesc)));
1574 pState->cErrors++;
1575 continue;
1576 }
1577
1578 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysGst);
1579 if (!pPage)
1580 {
1581 AssertMsgFailed(("virt handler getting ram flags. GCPhysGst=%RGp iPage=%#x %RGv %s\n",
1582 GCPhysGst, iPage, GCPtr, R3STRING(pVirt->pszDesc)));
1583 pState->cErrors++;
1584 continue;
1585 }
1586
1587 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < uState)
1588 {
1589 AssertMsgFailed(("virt handler state mismatch. pPage=%R[pgmpage] GCPhysGst=%RGp iPage=%#x %RGv state=%d expected>=%d %s\n",
1590 pPage, GCPhysGst, iPage, GCPtr, PGM_PAGE_GET_HNDL_VIRT_STATE(pPage), uState, R3STRING(pVirt->pszDesc)));
1591 pState->cErrors++;
1592 continue;
1593 }
1594 } /* for each VCPU */
1595 } /* for pages in virtual mapping. */
1596
1597 return 0;
1598}
1599
1600
1601/**
1602 * Asserts that the handlers+guest-page-tables == ramrange-flags and
1603 * that the physical addresses associated with virtual handlers are correct.
1604 *
1605 * @returns Number of mismatches.
1606 * @param pVM The VM handle.
1607 */
1608VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVM pVM)
1609{
1610 PPGM pPGM = &pVM->pgm.s;
1611 PGMAHAFIS State;
1612 State.GCPhys = 0;
1613 State.uVirtState = 0;
1614 State.uVirtStateFound = 0;
1615 State.cErrors = 0;
1616 State.pVM = pVM;
1617
1618 Assert(PGMIsLockOwner(pVM));
1619
1620 /*
1621 * Check the RAM flags against the handlers.
1622 */
1623 for (PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges); pRam; pRam = pRam->CTX_SUFF(pNext))
1624 {
1625 const unsigned cPages = pRam->cb >> PAGE_SHIFT;
1626 for (unsigned iPage = 0; iPage < cPages; iPage++)
1627 {
1628 PGMPAGE const *pPage = &pRam->aPages[iPage];
1629 if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
1630 {
1631 State.GCPhys = pRam->GCPhys + (iPage << PAGE_SHIFT);
1632
1633 /*
1634 * Physical first - calculate the state based on the handlers
1635 * active on the page, then compare.
1636 */
1637 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
1638 {
1639 /* the first */
1640 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, State.GCPhys);
1641 if (!pPhys)
1642 {
1643 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, State.GCPhys, true);
1644 if ( pPhys
1645 && pPhys->Core.Key > (State.GCPhys + PAGE_SIZE - 1))
1646 pPhys = NULL;
1647 Assert(!pPhys || pPhys->Core.Key >= State.GCPhys);
1648 }
1649 if (pPhys)
1650 {
1651 unsigned uState = pgmHandlerPhysicalCalcState(pPhys);
1652
1653 /* more? */
1654 while (pPhys->Core.KeyLast < (State.GCPhys | PAGE_OFFSET_MASK))
1655 {
1656 PPGMPHYSHANDLER pPhys2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers,
1657 pPhys->Core.KeyLast + 1, true);
1658 if ( !pPhys2
1659 || pPhys2->Core.Key > (State.GCPhys | PAGE_OFFSET_MASK))
1660 break;
1661 unsigned uState2 = pgmHandlerPhysicalCalcState(pPhys2);
1662 uState = RT_MAX(uState, uState2);
1663 pPhys = pPhys2;
1664 }
1665
1666 /* compare.*/
1667 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != uState
1668 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1669 {
1670 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%RGp state=%d expected=%d %s\n",
1671 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), uState, pPhys->pszDesc));
1672 State.cErrors++;
1673 }
1674
1675#ifdef IN_RING3
1676 /* validate that REM is handling it. */
1677 if ( !REMR3IsPageAccessHandled(pVM, State.GCPhys)
1678 /* ignore shadowed ROM for the time being. */
1679 && PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW)
1680 {
1681 AssertMsgFailed(("ram range vs phys handler REM mismatch. GCPhys=%RGp state=%d %s\n",
1682 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), pPhys->pszDesc));
1683 State.cErrors++;
1684 }
1685#endif
1686 }
1687 else
1688 {
1689 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%RGp\n", State.GCPhys));
1690 State.cErrors++;
1691 }
1692 }
1693
1694 /*
1695 * Virtual handlers.
1696 */
1697 if (PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage))
1698 {
1699 State.uVirtState = PGM_PAGE_GET_HNDL_VIRT_STATE(pPage);
1700#if 1
1701 /* locate all the matching physical ranges. */
1702 State.uVirtStateFound = PGM_PAGE_HNDL_VIRT_STATE_NONE;
1703 RTGCPHYS GCPhysKey = State.GCPhys;
1704 for (;;)
1705 {
1706 PPGMPHYS2VIRTHANDLER pPhys2Virt = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
1707 GCPhysKey, true /* above-or-equal */);
1708 if ( !pPhys2Virt
1709 || (pPhys2Virt->Core.Key & X86_PTE_PAE_PG_MASK) != State.GCPhys)
1710 break;
1711
1712 /* the head */
1713 GCPhysKey = pPhys2Virt->Core.KeyLast;
1714 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)((uintptr_t)pPhys2Virt + pPhys2Virt->offVirtHandler);
1715 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1716 State.uVirtStateFound = RT_MAX(State.uVirtStateFound, uState);
1717
1718 /* any aliases */
1719 while (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
1720 {
1721 pPhys2Virt = (PPGMPHYS2VIRTHANDLER)((uintptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1722 pCur = (PPGMVIRTHANDLER)((uintptr_t)pPhys2Virt + pPhys2Virt->offVirtHandler);
1723 uState = pgmHandlerVirtualCalcState(pCur);
1724 State.uVirtStateFound = RT_MAX(State.uVirtStateFound, uState);
1725 }
1726
1727 /* done? */
1728 if ((GCPhysKey & X86_PTE_PAE_PG_MASK) != State.GCPhys)
1729 break;
1730 }
1731#else
1732 /* very slow */
1733 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualVerifyOneByPhysAddr, &State);
1734#endif
1735 if (State.uVirtState != State.uVirtStateFound)
1736 {
1737 AssertMsgFailed(("ram range vs virt handler flags mismatch. GCPhys=%RGp uVirtState=%#x uVirtStateFound=%#x\n",
1738 State.GCPhys, State.uVirtState, State.uVirtStateFound));
1739 State.cErrors++;
1740 }
1741 }
1742 }
1743 } /* foreach page in ram range. */
1744 } /* foreach ram range. */
1745
1746 /*
1747 * Check that the physical addresses of the virtual handlers matches up
1748 * and that they are otherwise sane.
1749 */
1750 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualVerifyOne, &State);
1751
1752 /*
1753 * Do the reverse check for physical handlers.
1754 */
1755 /** @todo */
1756
1757 return State.cErrors;
1758}
1759
1760#endif /* VBOX_STRICT */
1761
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette