VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 93635

Last change on this file since 93635 was 93635, checked in by vboxsync, 3 years ago

VMM/PGM,VMM/PDM,VGA: Consolidate the user parameters of the physical access handlers into a single uint64_t value that shouldn't be a pointer, at least not for ring-0 callbacks. Special hack for devices where it's translated from a ring-0 device instance index into a current context PPDMDEVINS (not really tested yet). bugref:10094

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 71.8 KB
Line 
1/* $Id: PGMAllHandler.cpp 93635 2022-02-07 10:43:45Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/dbgf.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/iom.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/stam.h>
31#include <VBox/vmm/dbgf.h>
32#ifdef IN_RING0
33# include <VBox/vmm/pdmdev.h>
34#endif
35#include "PGMInternal.h"
36#include <VBox/vmm/vmcc.h>
37#include "PGMInline.h"
38
39#include <VBox/log.h>
40#include <iprt/assert.h>
41#include <iprt/asm-amd64-x86.h>
42#include <iprt/string.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45#include <VBox/vmm/selm.h>
46
47
48/*********************************************************************************************************************************
49* Internal Functions *
50*********************************************************************************************************************************/
51static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
52 void *pvBitmap, uint32_t offBitmap);
53static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur);
54static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur);
55
56
57/**
58 * Internal worker for releasing a physical handler type registration reference.
59 *
60 * @returns New reference count. UINT32_MAX if invalid input (asserted).
61 * @param pVM The cross context VM structure.
62 * @param pType Pointer to the type registration.
63 */
64DECLINLINE(uint32_t) pgmHandlerPhysicalTypeRelease(PVMCC pVM, PPGMPHYSHANDLERTYPEINT pType)
65{
66 AssertMsgReturn(pType->u32Magic == PGMPHYSHANDLERTYPEINT_MAGIC, ("%#x\n", pType->u32Magic), UINT32_MAX);
67 uint32_t cRefs = ASMAtomicDecU32(&pType->cRefs);
68 if (cRefs == 0)
69 {
70 PGM_LOCK_VOID(pVM);
71 pType->u32Magic = PGMPHYSHANDLERTYPEINT_MAGIC_DEAD;
72 RTListOff32NodeRemove(&pType->ListNode);
73 PGM_UNLOCK(pVM);
74 MMHyperFree(pVM, pType);
75 }
76 return cRefs;
77}
78
79
80/**
81 * Internal worker for retaining a physical handler type registration reference.
82 *
83 * @returns New reference count. UINT32_MAX if invalid input (asserted).
84 * @param pVM The cross context VM structure.
85 * @param pType Pointer to the type registration.
86 */
87DECLINLINE(uint32_t) pgmHandlerPhysicalTypeRetain(PVM pVM, PPGMPHYSHANDLERTYPEINT pType)
88{
89 NOREF(pVM);
90 AssertMsgReturn(pType->u32Magic == PGMPHYSHANDLERTYPEINT_MAGIC, ("%#x\n", pType->u32Magic), UINT32_MAX);
91 uint32_t cRefs = ASMAtomicIncU32(&pType->cRefs);
92 Assert(cRefs < _1M && cRefs > 0);
93 return cRefs;
94}
95
96
97/**
98 * Releases a reference to a physical handler type registration.
99 *
100 * @returns New reference count. UINT32_MAX if invalid input (asserted).
101 * @param pVM The cross context VM structure.
102 * @param hType The type regiration handle.
103 */
104VMMDECL(uint32_t) PGMHandlerPhysicalTypeRelease(PVMCC pVM, PGMPHYSHANDLERTYPE hType)
105{
106 if (hType != NIL_PGMPHYSHANDLERTYPE)
107 return pgmHandlerPhysicalTypeRelease(pVM, PGMPHYSHANDLERTYPEINT_FROM_HANDLE(pVM, hType));
108 return 0;
109}
110
111
112/**
113 * Retains a reference to a physical handler type registration.
114 *
115 * @returns New reference count. UINT32_MAX if invalid input (asserted).
116 * @param pVM The cross context VM structure.
117 * @param hType The type regiration handle.
118 */
119VMMDECL(uint32_t) PGMHandlerPhysicalTypeRetain(PVM pVM, PGMPHYSHANDLERTYPE hType)
120{
121 return pgmHandlerPhysicalTypeRetain(pVM, PGMPHYSHANDLERTYPEINT_FROM_HANDLE(pVM, hType));
122}
123
124
125/**
126 * Creates a physical access handler.
127 *
128 * @returns VBox status code.
129 * @retval VINF_SUCCESS when successfully installed.
130 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
131 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
132 * flagged together with a pool clearing.
133 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
134 * one. A debug assertion is raised.
135 *
136 * @param pVM The cross context VM structure.
137 * @param hType The handler type registration handle.
138 * @param uUser User argument to the handlers (not pointer).
139 * @param pszDesc Description of this handler. If NULL, the type
140 * description will be used instead.
141 * @param ppPhysHandler Where to return the access handler structure on
142 * success.
143 */
144int pgmHandlerPhysicalExCreate(PVMCC pVM, PGMPHYSHANDLERTYPE hType, uint64_t uUser,
145 R3PTRTYPE(const char *) pszDesc, PPGMPHYSHANDLER *ppPhysHandler)
146{
147 PPGMPHYSHANDLERTYPEINT pType = PGMPHYSHANDLERTYPEINT_FROM_HANDLE(pVM, hType);
148 Log(("pgmHandlerPhysicalExCreate: uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
149 uUser, hType, pType->enmKind, R3STRING(pType->pszDesc), pszDesc, R3STRING(pszDesc)));
150
151 /*
152 * Validate input.
153 */
154 AssertPtr(ppPhysHandler);
155 AssertReturn(pType->u32Magic == PGMPHYSHANDLERTYPEINT_MAGIC, VERR_INVALID_HANDLE);
156
157 /*
158 * Allocate and initialize the new entry.
159 */
160 PPGMPHYSHANDLER pNew;
161 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
162 if (RT_SUCCESS(rc))
163 {
164 pNew->Core.Key = NIL_RTGCPHYS;
165 pNew->Core.KeyLast = NIL_RTGCPHYS;
166 pNew->cPages = 0;
167 pNew->cAliasedPages = 0;
168 pNew->cTmpOffPages = 0;
169 pNew->uUser = uUser;
170 pNew->hType = hType;
171 pNew->pszDesc = pszDesc != NIL_RTR3PTR ? pszDesc : pType->pszDesc;
172 pgmHandlerPhysicalTypeRetain(pVM, pType);
173 *ppPhysHandler = pNew;
174 return VINF_SUCCESS;
175 }
176
177 return rc;
178}
179
180
181/**
182 * Duplicates a physical access handler.
183 *
184 * @returns VBox status code.
185 * @retval VINF_SUCCESS when successfully installed.
186 *
187 * @param pVM The cross context VM structure.
188 * @param pPhysHandlerSrc The source handler to duplicate
189 * @param ppPhysHandler Where to return the access handler structure on
190 * success.
191 */
192int pgmHandlerPhysicalExDup(PVMCC pVM, PPGMPHYSHANDLER pPhysHandlerSrc, PPGMPHYSHANDLER *ppPhysHandler)
193{
194 return pgmHandlerPhysicalExCreate(pVM, pPhysHandlerSrc->hType, pPhysHandlerSrc->uUser,
195 pPhysHandlerSrc->pszDesc, ppPhysHandler);
196}
197
198
199/**
200 * Register a access handler for a physical range.
201 *
202 * @returns VBox status code.
203 * @retval VINF_SUCCESS when successfully installed.
204 *
205 * @param pVM The cross context VM structure.
206 * @param pPhysHandler The physical handler.
207 * @param GCPhys Start physical address.
208 * @param GCPhysLast Last physical address. (inclusive)
209 */
210int pgmHandlerPhysicalExRegister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
211{
212 /*
213 * Validate input.
214 */
215 AssertPtr(pPhysHandler);
216 PPGMPHYSHANDLERTYPEINT pType = PGMPHYSHANDLERTYPEINT_FROM_HANDLE(pVM, pPhysHandler->hType);
217 Assert(pType->u32Magic == PGMPHYSHANDLERTYPEINT_MAGIC);
218 Log(("pgmHandlerPhysicalExRegister: GCPhys=%RGp GCPhysLast=%RGp hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
219 GCPhys, GCPhysLast, pPhysHandler->hType, pType->enmKind, R3STRING(pType->pszDesc), pPhysHandler->pszDesc, R3STRING(pPhysHandler->pszDesc)));
220 AssertReturn(pPhysHandler->Core.Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
221
222 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);
223 switch (pType->enmKind)
224 {
225 case PGMPHYSHANDLERKIND_WRITE:
226 break;
227 case PGMPHYSHANDLERKIND_MMIO:
228 case PGMPHYSHANDLERKIND_ALL:
229 /* Simplification for PGMPhysRead, PGMR0Trap0eHandlerNPMisconfig and others: Full pages. */
230 AssertMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
231 AssertMsgReturn((GCPhysLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);
232 break;
233 default:
234 AssertMsgFailed(("Invalid input enmKind=%d!\n", pType->enmKind));
235 return VERR_INVALID_PARAMETER;
236 }
237
238 /*
239 * We require the range to be within registered ram.
240 * There is no apparent need to support ranges which cover more than one ram range.
241 */
242 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
243 if ( !pRam
244 || GCPhysLast > pRam->GCPhysLast)
245 {
246#ifdef IN_RING3
247 DBGFR3Info(pVM->pUVM, "phys", NULL, NULL);
248#endif
249 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
250 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
251 }
252 Assert(GCPhys >= pRam->GCPhys && GCPhys < pRam->GCPhysLast);
253 Assert(GCPhysLast <= pRam->GCPhysLast && GCPhysLast >= pRam->GCPhys);
254
255 /*
256 * Try insert into list.
257 */
258 pPhysHandler->Core.Key = GCPhys;
259 pPhysHandler->Core.KeyLast = GCPhysLast;
260 pPhysHandler->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
261
262 PGM_LOCK_VOID(pVM);
263 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pPhysHandler->Core))
264 {
265 int rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
266 if (rc == VINF_PGM_SYNC_CR3)
267 rc = VINF_PGM_GCPHYS_ALIASED;
268
269#if defined(IN_RING3) || defined(IN_RING0)
270 NEMHCNotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1);
271#endif
272 PGM_UNLOCK(pVM);
273
274 if (rc != VINF_SUCCESS)
275 Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast));
276 return rc;
277 }
278 PGM_UNLOCK(pVM);
279
280 pPhysHandler->Core.Key = NIL_RTGCPHYS;
281 pPhysHandler->Core.KeyLast = NIL_RTGCPHYS;
282
283#if defined(IN_RING3) && defined(VBOX_STRICT)
284 DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL);
285#endif
286 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s/%s\n",
287 GCPhys, GCPhysLast, R3STRING(pPhysHandler->pszDesc), R3STRING(pType->pszDesc)));
288 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
289}
290
291
292/**
293 * Register a access handler for a physical range.
294 *
295 * @returns VBox status code.
296 * @retval VINF_SUCCESS when successfully installed.
297 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
298 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
299 * flagged together with a pool clearing.
300 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
301 * one. A debug assertion is raised.
302 *
303 * @param pVM The cross context VM structure.
304 * @param GCPhys Start physical address.
305 * @param GCPhysLast Last physical address. (inclusive)
306 * @param hType The handler type registration handle.
307 * @param uUser User argument to the handler.
308 * @param pszDesc Description of this handler. If NULL, the type
309 * description will be used instead.
310 */
311VMMDECL(int) PGMHandlerPhysicalRegister(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, PGMPHYSHANDLERTYPE hType,
312 uint64_t uUser, R3PTRTYPE(const char *) pszDesc)
313{
314#ifdef LOG_ENABLED
315 PPGMPHYSHANDLERTYPEINT pType = PGMPHYSHANDLERTYPEINT_FROM_HANDLE(pVM, hType);
316 Log(("PGMHandlerPhysicalRegister: GCPhys=%RGp GCPhysLast=%RGp uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
317 GCPhys, GCPhysLast, uUser, hType, pType->enmKind, R3STRING(pType->pszDesc), pszDesc, R3STRING(pszDesc)));
318#endif
319
320 PPGMPHYSHANDLER pNew;
321 int rc = pgmHandlerPhysicalExCreate(pVM, hType, uUser, pszDesc, &pNew);
322 if (RT_SUCCESS(rc))
323 {
324 rc = pgmHandlerPhysicalExRegister(pVM, pNew, GCPhys, GCPhysLast);
325 if (RT_SUCCESS(rc))
326 return rc;
327 pgmHandlerPhysicalExDestroy(pVM, pNew);
328 }
329 return rc;
330}
331
332
333/**
334 * Sets ram range flags and attempts updating shadow PTs.
335 *
336 * @returns VBox status code.
337 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
338 * @retval VINF_PGM_SYNC_CR3 when the shadow PTs could be updated because
339 * the guest page aliased or/and mapped by multiple PTs. FFs set.
340 * @param pVM The cross context VM structure.
341 * @param pCur The physical handler.
342 * @param pRam The RAM range.
343 * @param pvBitmap Dirty bitmap. Optional.
344 * @param offBitmap Dirty bitmap offset.
345 */
346static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
347 void *pvBitmap, uint32_t offBitmap)
348{
349 /*
350 * Iterate the guest ram pages updating the flags and flushing PT entries
351 * mapping the page.
352 */
353 bool fFlushTLBs = false;
354 int rc = VINF_SUCCESS;
355 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
356 const unsigned uState = pCurType->uState;
357 uint32_t cPages = pCur->cPages;
358 uint32_t i = (pCur->Core.Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
359 for (;;)
360 {
361 PPGMPAGE pPage = &pRam->aPages[i];
362 AssertMsg(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage),
363 ("%RGp %R[pgmpage]\n", pRam->GCPhys + (i << GUEST_PAGE_SHIFT), pPage));
364
365 /* Only do upgrades. */
366 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
367 {
368 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
369
370 const RTGCPHYS GCPhysPage = pRam->GCPhys + (i << GUEST_PAGE_SHIFT);
371 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage,
372 false /* allow updates of PTEs (instead of flushing) */, &fFlushTLBs);
373 if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
374 rc = rc2;
375
376#ifdef VBOX_WITH_NATIVE_NEM
377 /* Tell NEM about the protection update. */
378 if (VM_IS_NEM_ENABLED(pVM))
379 {
380 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
381 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
382 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
383 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
384 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
385 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
386 }
387#endif
388 if (pvBitmap)
389 ASMBitSet(pvBitmap, offBitmap);
390 }
391
392 /* next */
393 if (--cPages == 0)
394 break;
395 i++;
396 offBitmap++;
397 }
398
399 if (fFlushTLBs)
400 {
401 PGM_INVL_ALL_VCPU_TLBS(pVM);
402 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs; rc=%d\n", rc));
403 }
404 else
405 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc; sync flags=%x VMCPU_FF_PGM_SYNC_CR3=%d\n", rc, VMMGetCpu(pVM)->pgm.s.fSyncFlags, VMCPU_FF_IS_SET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3)));
406
407 return rc;
408}
409
410
411/**
412 * Deregister a physical page access handler.
413 *
414 * @returns VBox status code.
415 * @param pVM The cross context VM structure.
416 * @param pPhysHandler The handler to deregister (but not free).
417 */
418int pgmHandlerPhysicalExDeregister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler)
419{
420 LogFlow(("pgmHandlerPhysicalExDeregister: Removing Range %RGp-%RGp %s\n",
421 pPhysHandler->Core.Key, pPhysHandler->Core.KeyLast, R3STRING(pPhysHandler->pszDesc)));
422 AssertReturn(pPhysHandler->Core.Key != NIL_RTGCPHYS, VERR_PGM_HANDLER_NOT_FOUND);
423
424 /*
425 * Remove the handler from the tree.
426 */
427 PGM_LOCK_VOID(pVM);
428 PPGMPHYSHANDLER pRemoved = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
429 pPhysHandler->Core.Key);
430 if (pRemoved == pPhysHandler)
431 {
432 /*
433 * Clear the page bits, notify the REM about this change and clear
434 * the cache.
435 */
436 pgmHandlerPhysicalResetRamFlags(pVM, pPhysHandler);
437 if (VM_IS_NEM_ENABLED(pVM))
438 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pPhysHandler);
439 pVM->pgm.s.pLastPhysHandlerR0 = 0;
440 pVM->pgm.s.pLastPhysHandlerR3 = 0;
441
442 pPhysHandler->Core.Key = NIL_RTGCPHYS;
443 pPhysHandler->Core.KeyLast = NIL_RTGCPHYS;
444
445 PGM_UNLOCK(pVM);
446
447 return VINF_SUCCESS;
448 }
449
450 /*
451 * Both of the failure conditions here are considered internal processing
452 * errors because they can only be caused by race conditions or corruption.
453 * If we ever need to handle concurrent deregistration, we have to move
454 * the NIL_RTGCPHYS check inside the PGM lock.
455 */
456 if (pRemoved)
457 RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pRemoved->Core);
458
459 PGM_UNLOCK(pVM);
460
461 if (!pRemoved)
462 AssertMsgFailed(("Didn't find range starting at %RGp in the tree!\n", pPhysHandler->Core.Key));
463 else
464 AssertMsgFailed(("Found different handle at %RGp in the tree: got %p insteaded of %p\n",
465 pPhysHandler->Core.Key, pRemoved, pPhysHandler));
466 return VERR_PGM_HANDLER_IPE_1;
467}
468
469
470/**
471 * Destroys (frees) a physical handler.
472 *
473 * The caller must deregister it before destroying it!
474 *
475 * @returns VBox status code.
476 * @param pVM The cross context VM structure.
477 * @param pHandler The handler to free. NULL if ignored.
478 */
479int pgmHandlerPhysicalExDestroy(PVMCC pVM, PPGMPHYSHANDLER pHandler)
480{
481 if (pHandler)
482 {
483 AssertPtr(pHandler);
484 AssertReturn(pHandler->Core.Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
485 PGMHandlerPhysicalTypeRelease(pVM, pHandler->hType);
486 MMHyperFree(pVM, pHandler);
487 }
488 return VINF_SUCCESS;
489}
490
491
492/**
493 * Deregister a physical page access handler.
494 *
495 * @returns VBox status code.
496 * @param pVM The cross context VM structure.
497 * @param GCPhys Start physical address.
498 */
499VMMDECL(int) PGMHandlerPhysicalDeregister(PVMCC pVM, RTGCPHYS GCPhys)
500{
501 /*
502 * Find the handler.
503 */
504 PGM_LOCK_VOID(pVM);
505 PPGMPHYSHANDLER pRemoved = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
506 if (pRemoved)
507 {
508 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n",
509 pRemoved->Core.Key, pRemoved->Core.KeyLast, R3STRING(pRemoved->pszDesc)));
510
511 /*
512 * Clear the page bits, notify the REM about this change and clear
513 * the cache.
514 */
515 pgmHandlerPhysicalResetRamFlags(pVM, pRemoved);
516 if (VM_IS_NEM_ENABLED(pVM))
517 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pRemoved);
518 pVM->pgm.s.pLastPhysHandlerR0 = 0;
519 pVM->pgm.s.pLastPhysHandlerR3 = 0;
520
521 PGM_UNLOCK(pVM);
522
523 pRemoved->Core.Key = NIL_RTGCPHYS;
524 pgmHandlerPhysicalExDestroy(pVM, pRemoved);
525 return VINF_SUCCESS;
526 }
527
528 PGM_UNLOCK(pVM);
529
530 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
531 return VERR_PGM_HANDLER_NOT_FOUND;
532}
533
534
535/**
536 * Shared code with modify.
537 */
538static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur)
539{
540#ifdef VBOX_WITH_NATIVE_NEM
541 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
542 RTGCPHYS GCPhysStart = pCur->Core.Key;
543 RTGCPHYS GCPhysLast = pCur->Core.KeyLast;
544
545 /*
546 * Page align the range.
547 *
548 * Since we've reset (recalculated) the physical handler state of all pages
549 * we can make use of the page states to figure out whether a page should be
550 * included in the REM notification or not.
551 */
552 if ( (pCur->Core.Key & GUEST_PAGE_OFFSET_MASK)
553 || ((pCur->Core.KeyLast + 1) & GUEST_PAGE_OFFSET_MASK))
554 {
555 Assert(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO);
556
557 if (GCPhysStart & GUEST_PAGE_OFFSET_MASK)
558 {
559 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysStart);
560 if ( pPage
561 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
562 {
563 RTGCPHYS GCPhys = (GCPhysStart + (GUEST_PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
564 if ( GCPhys > GCPhysLast
565 || GCPhys < GCPhysStart)
566 return;
567 GCPhysStart = GCPhys;
568 }
569 else
570 GCPhysStart &= X86_PTE_PAE_PG_MASK;
571 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
572 }
573
574 if (GCPhysLast & GUEST_PAGE_OFFSET_MASK)
575 {
576 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysLast);
577 if ( pPage
578 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
579 {
580 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
581 if ( GCPhys < GCPhysStart
582 || GCPhys > GCPhysLast)
583 return;
584 GCPhysLast = GCPhys;
585 }
586 else
587 GCPhysLast |= GUEST_PAGE_OFFSET_MASK;
588 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
589 }
590 }
591
592 /*
593 * Tell NEM.
594 */
595 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhysStart);
596 RTGCPHYS const cb = GCPhysLast - GCPhysStart + 1;
597 uint8_t u2State = UINT8_MAX;
598 NEMHCNotifyHandlerPhysicalDeregister(pVM, pCurType->enmKind, GCPhysStart, cb,
599 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysStart) : NULL, &u2State);
600 if (u2State != UINT8_MAX && pRam)
601 pgmPhysSetNemStateForPages(&pRam->aPages[(GCPhysStart - pRam->GCPhys) >> GUEST_PAGE_SHIFT],
602 cb >> GUEST_PAGE_SHIFT, u2State);
603#else
604 RT_NOREF(pVM, pCur);
605#endif
606}
607
608
609/**
610 * pgmHandlerPhysicalResetRamFlags helper that checks for other handlers on
611 * edge pages.
612 */
613DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PVMCC pVM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
614{
615 /*
616 * Look for other handlers.
617 */
618 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
619 for (;;)
620 {
621 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys, fAbove);
622 if ( !pCur
623 || ((fAbove ? pCur->Core.Key : pCur->Core.KeyLast) >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
624 break;
625 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
626 uState = RT_MAX(uState, pCurType->uState);
627
628 /* next? */
629 RTGCPHYS GCPhysNext = fAbove
630 ? pCur->Core.KeyLast + 1
631 : pCur->Core.Key - 1;
632 if ((GCPhysNext >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
633 break;
634 GCPhys = GCPhysNext;
635 }
636
637 /*
638 * Update if we found something that is a higher priority
639 * state than the current.
640 */
641 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
642 {
643 PPGMPAGE pPage;
644 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, ppRamHint);
645 if ( RT_SUCCESS(rc)
646 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
647 {
648 /* This should normally not be necessary. */
649 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
650 bool fFlushTLBs ;
651 rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, false /*fFlushPTEs*/, &fFlushTLBs);
652 if (RT_SUCCESS(rc) && fFlushTLBs)
653 PGM_INVL_ALL_VCPU_TLBS(pVM);
654 else
655 AssertRC(rc);
656
657#ifdef VBOX_WITH_NATIVE_NEM
658 /* Tell NEM about the protection update. */
659 if (VM_IS_NEM_ENABLED(pVM))
660 {
661 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
662 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
663 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
664 PGM_RAMRANGE_CALC_PAGE_R3PTR(*ppRamHint, GCPhys),
665 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
666 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
667 }
668#endif
669 }
670 else
671 AssertRC(rc);
672 }
673}
674
675
676/**
677 * Resets an aliased page.
678 *
679 * @param pVM The cross context VM structure.
680 * @param pPage The page.
681 * @param GCPhysPage The page address in case it comes in handy.
682 * @param pRam The RAM range the page is associated with (for NEM
683 * notifications).
684 * @param fDoAccounting Whether to perform accounting. (Only set during
685 * reset where pgmR3PhysRamReset doesn't have the
686 * handler structure handy.)
687 */
688void pgmHandlerPhysicalResetAliasedPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage, PPGMRAMRANGE pRam, bool fDoAccounting)
689{
690 Assert( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
691 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
692 Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
693#ifdef VBOX_WITH_NATIVE_NEM
694 RTHCPHYS const HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage);
695#endif
696
697 /*
698 * Flush any shadow page table references *first*.
699 */
700 bool fFlushTLBs = false;
701 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage, true /*fFlushPTEs*/, &fFlushTLBs);
702 AssertLogRelRCReturnVoid(rc);
703 HMFlushTlbOnAllVCpus(pVM);
704
705 /*
706 * Make it an MMIO/Zero page.
707 */
708 PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
709 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO);
710 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
711 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
712 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL);
713
714 /* Flush its TLB entry. */
715 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
716
717 /*
718 * Do accounting for pgmR3PhysRamReset.
719 */
720 if (fDoAccounting)
721 {
722 PPGMPHYSHANDLER pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysPage);
723 if (RT_LIKELY(pHandler))
724 {
725 Assert(pHandler->cAliasedPages > 0);
726 pHandler->cAliasedPages--;
727 }
728 else
729 AssertFailed();
730 }
731
732#ifdef VBOX_WITH_NATIVE_NEM
733 /*
734 * Tell NEM about the protection change.
735 */
736 if (VM_IS_NEM_ENABLED(pVM))
737 {
738 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
739 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, HCPhysPrev, pVM->pgm.s.HCPhysZeroPg,
740 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
741 NEM_PAGE_PROT_NONE, PGMPAGETYPE_MMIO, &u2State);
742 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
743 }
744#else
745 RT_NOREF(pRam);
746#endif
747}
748
749
750/**
751 * Resets ram range flags.
752 *
753 * @returns VBox status code.
754 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
755 * @param pVM The cross context VM structure.
756 * @param pCur The physical handler.
757 *
758 * @remark We don't start messing with the shadow page tables, as we've
759 * already got code in Trap0e which deals with out of sync handler
760 * flags (originally conceived for global pages).
761 */
762static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur)
763{
764 /*
765 * Iterate the guest ram pages updating the state.
766 */
767 RTUINT cPages = pCur->cPages;
768 RTGCPHYS GCPhys = pCur->Core.Key;
769 PPGMRAMRANGE pRamHint = NULL;
770 for (;;)
771 {
772 PPGMPAGE pPage;
773 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
774 if (RT_SUCCESS(rc))
775 {
776 /* Reset aliased MMIO pages to MMIO, since this aliasing is our business.
777 (We don't flip MMIO to RAM though, that's PGMPhys.cpp's job.) */
778 bool fNemNotifiedAlready = false;
779 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
780 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
781 {
782 Assert(pCur->cAliasedPages > 0);
783 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhys, pRamHint, false /*fDoAccounting*/);
784 pCur->cAliasedPages--;
785 fNemNotifiedAlready = true;
786 }
787#ifdef VBOX_STRICT
788 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
789 AssertMsg(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage), ("%RGp %R[pgmpage]\n", GCPhys, pPage));
790#endif
791 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE);
792
793#ifdef VBOX_WITH_NATIVE_NEM
794 /* Tell NEM about the protection change. */
795 if (VM_IS_NEM_ENABLED(pVM) && !fNemNotifiedAlready)
796 {
797 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
798 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
799 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
800 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRamHint, GCPhys),
801 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
802 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
803 }
804#endif
805 RT_NOREF(fNemNotifiedAlready);
806 }
807 else
808 AssertRC(rc);
809
810 /* next */
811 if (--cPages == 0)
812 break;
813 GCPhys += GUEST_PAGE_SIZE;
814 }
815
816 pCur->cAliasedPages = 0;
817 pCur->cTmpOffPages = 0;
818
819 /*
820 * Check for partial start and end pages.
821 */
822 if (pCur->Core.Key & GUEST_PAGE_OFFSET_MASK)
823 pgmHandlerPhysicalRecalcPageState(pVM, pCur->Core.Key - 1, false /* fAbove */, &pRamHint);
824 if ((pCur->Core.KeyLast & GUEST_PAGE_OFFSET_MASK) != GUEST_PAGE_OFFSET_MASK)
825 pgmHandlerPhysicalRecalcPageState(pVM, pCur->Core.KeyLast + 1, true /* fAbove */, &pRamHint);
826}
827
828
829#if 0 /* unused */
830/**
831 * Modify a physical page access handler.
832 *
833 * Modification can only be done to the range it self, not the type or anything else.
834 *
835 * @returns VBox status code.
836 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
837 * and a new registration must be performed!
838 * @param pVM The cross context VM structure.
839 * @param GCPhysCurrent Current location.
840 * @param GCPhys New location.
841 * @param GCPhysLast New last location.
842 */
843VMMDECL(int) PGMHandlerPhysicalModify(PVMCC pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
844{
845 /*
846 * Remove it.
847 */
848 int rc;
849 PGM_LOCK_VOID(pVM);
850 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysCurrent);
851 if (pCur)
852 {
853 /*
854 * Clear the ram flags. (We're gonna move or free it!)
855 */
856 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
857 PPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
858 bool const fRestoreAsRAM = pCurType->pfnHandlerR3 /** @todo this isn't entirely correct. */
859 && pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO;
860
861 /*
862 * Validate the new range, modify and reinsert.
863 */
864 if (GCPhysLast >= GCPhys)
865 {
866 /*
867 * We require the range to be within registered ram.
868 * There is no apparent need to support ranges which cover more than one ram range.
869 */
870 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
871 if ( pRam
872 && GCPhys <= pRam->GCPhysLast
873 && GCPhysLast >= pRam->GCPhys)
874 {
875 pCur->Core.Key = GCPhys;
876 pCur->Core.KeyLast = GCPhysLast;
877 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> GUEST_PAGE_SHIFT;
878
879 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pCur->Core))
880 {
881 RTGCPHYS const cb = GCPhysLast - GCPhys + 1;
882 PGMPHYSHANDLERKIND const enmKind = pCurType->enmKind;
883
884 /*
885 * Set ram flags, flush shadow PT entries and finally tell REM about this.
886 */
887 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL, 0);
888
889 /** @todo NEM: not sure we need this notification... */
890 NEMHCNotifyHandlerPhysicalModify(pVM, enmKind, GCPhysCurrent, GCPhys, cb, fRestoreAsRAM);
891
892 PGM_UNLOCK(pVM);
893
894 PGM_INVL_ALL_VCPU_TLBS(pVM);
895 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%RGp -> GCPhys=%RGp GCPhysLast=%RGp\n",
896 GCPhysCurrent, GCPhys, GCPhysLast));
897 return VINF_SUCCESS;
898 }
899
900 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
901 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
902 }
903 else
904 {
905 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
906 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
907 }
908 }
909 else
910 {
911 AssertMsgFailed(("Invalid range %RGp-%RGp\n", GCPhys, GCPhysLast));
912 rc = VERR_INVALID_PARAMETER;
913 }
914
915 /*
916 * Invalid new location, flush the cache and free it.
917 * We've only gotta notify REM and free the memory.
918 */
919 if (VM_IS_NEM_ENABLED(pVM))
920 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pCur);
921 pVM->pgm.s.pLastPhysHandlerR0 = 0;
922 pVM->pgm.s.pLastPhysHandlerR3 = 0;
923 PGMHandlerPhysicalTypeRelease(pVM, pCur->hType);
924 MMHyperFree(pVM, pCur);
925 }
926 else
927 {
928 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhysCurrent));
929 rc = VERR_PGM_HANDLER_NOT_FOUND;
930 }
931
932 PGM_UNLOCK(pVM);
933 return rc;
934}
935#endif /* unused */
936
937
938/**
939 * Changes the user callback arguments associated with a physical access handler.
940 *
941 * @returns VBox status code.
942 * @param pVM The cross context VM structure.
943 * @param GCPhys Start physical address of the handler.
944 * @param uUser User argument to the handlers.
945 */
946VMMDECL(int) PGMHandlerPhysicalChangeUserArg(PVMCC pVM, RTGCPHYS GCPhys, uint64_t uUser)
947{
948 /*
949 * Find the handler and make the change.
950 */
951 int rc;
952 PGM_LOCK_VOID(pVM);
953 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
954 if (pCur)
955 {
956 pCur->uUser = uUser;
957 rc = VINF_SUCCESS;
958 }
959 else
960 {
961 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
962 rc = VERR_PGM_HANDLER_NOT_FOUND;
963 }
964
965 PGM_UNLOCK(pVM);
966 return rc;
967}
968
969#if 0 /* unused */
970
971/**
972 * Splits a physical access handler in two.
973 *
974 * @returns VBox status code.
975 * @param pVM The cross context VM structure.
976 * @param GCPhys Start physical address of the handler.
977 * @param GCPhysSplit The split address.
978 */
979VMMDECL(int) PGMHandlerPhysicalSplit(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
980{
981 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
982
983 /*
984 * Do the allocation without owning the lock.
985 */
986 PPGMPHYSHANDLER pNew;
987 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
988 if (RT_FAILURE(rc))
989 return rc;
990
991 /*
992 * Get the handler.
993 */
994 PGM_LOCK_VOID(pVM);
995 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
996 if (RT_LIKELY(pCur))
997 {
998 if (RT_LIKELY(GCPhysSplit <= pCur->Core.KeyLast))
999 {
1000 /*
1001 * Create new handler node for the 2nd half.
1002 */
1003 *pNew = *pCur;
1004 pNew->Core.Key = GCPhysSplit;
1005 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1006
1007 pCur->Core.KeyLast = GCPhysSplit - 1;
1008 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1009
1010 if (RT_LIKELY(RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)))
1011 {
1012 LogFlow(("PGMHandlerPhysicalSplit: %RGp-%RGp and %RGp-%RGp\n",
1013 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
1014 PGM_UNLOCK(pVM);
1015 return VINF_SUCCESS;
1016 }
1017 AssertMsgFailed(("whu?\n"));
1018 rc = VERR_PGM_PHYS_HANDLER_IPE;
1019 }
1020 else
1021 {
1022 AssertMsgFailed(("outside range: %RGp-%RGp split %RGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
1023 rc = VERR_INVALID_PARAMETER;
1024 }
1025 }
1026 else
1027 {
1028 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1029 rc = VERR_PGM_HANDLER_NOT_FOUND;
1030 }
1031 PGM_UNLOCK(pVM);
1032 MMHyperFree(pVM, pNew);
1033 return rc;
1034}
1035
1036
1037/**
1038 * Joins up two adjacent physical access handlers which has the same callbacks.
1039 *
1040 * @returns VBox status code.
1041 * @param pVM The cross context VM structure.
1042 * @param GCPhys1 Start physical address of the first handler.
1043 * @param GCPhys2 Start physical address of the second handler.
1044 */
1045VMMDECL(int) PGMHandlerPhysicalJoin(PVMCC pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
1046{
1047 /*
1048 * Get the handlers.
1049 */
1050 int rc;
1051 PGM_LOCK_VOID(pVM);
1052 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys1);
1053 if (RT_LIKELY(pCur1))
1054 {
1055 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1056 if (RT_LIKELY(pCur2))
1057 {
1058 /*
1059 * Make sure that they are adjacent, and that they've got the same callbacks.
1060 */
1061 if (RT_LIKELY(pCur1->Core.KeyLast + 1 == pCur2->Core.Key))
1062 {
1063 if (RT_LIKELY(pCur1->hType == pCur2->hType))
1064 {
1065 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1066 if (RT_LIKELY(pCur3 == pCur2))
1067 {
1068 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
1069 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1070 LogFlow(("PGMHandlerPhysicalJoin: %RGp-%RGp %RGp-%RGp\n",
1071 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1072 pVM->pgm.s.pLastPhysHandlerR0 = 0;
1073 pVM->pgm.s.pLastPhysHandlerR3 = 0;
1074 PGMHandlerPhysicalTypeRelease(pVM, pCur2->hType);
1075 MMHyperFree(pVM, pCur2);
1076 PGM_UNLOCK(pVM);
1077 return VINF_SUCCESS;
1078 }
1079
1080 Assert(pCur3 == pCur2);
1081 rc = VERR_PGM_PHYS_HANDLER_IPE;
1082 }
1083 else
1084 {
1085 AssertMsgFailed(("mismatching handlers\n"));
1086 rc = VERR_ACCESS_DENIED;
1087 }
1088 }
1089 else
1090 {
1091 AssertMsgFailed(("not adjacent: %RGp-%RGp %RGp-%RGp\n",
1092 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1093 rc = VERR_INVALID_PARAMETER;
1094 }
1095 }
1096 else
1097 {
1098 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys2));
1099 rc = VERR_PGM_HANDLER_NOT_FOUND;
1100 }
1101 }
1102 else
1103 {
1104 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys1));
1105 rc = VERR_PGM_HANDLER_NOT_FOUND;
1106 }
1107 PGM_UNLOCK(pVM);
1108 return rc;
1109
1110}
1111
1112#endif /* unused */
1113
1114/**
1115 * Resets any modifications to individual pages in a physical page access
1116 * handler region.
1117 *
1118 * This is used in pair with PGMHandlerPhysicalPageTempOff(),
1119 * PGMHandlerPhysicalPageAliasMmio2() or PGMHandlerPhysicalPageAliasHC().
1120 *
1121 * @returns VBox status code.
1122 * @param pVM The cross context VM structure.
1123 * @param GCPhys The start address of the handler regions, i.e. what you
1124 * passed to PGMR3HandlerPhysicalRegister(),
1125 * PGMHandlerPhysicalRegisterEx() or
1126 * PGMHandlerPhysicalModify().
1127 */
1128VMMDECL(int) PGMHandlerPhysicalReset(PVMCC pVM, RTGCPHYS GCPhys)
1129{
1130 LogFlow(("PGMHandlerPhysicalReset GCPhys=%RGp\n", GCPhys));
1131 PGM_LOCK_VOID(pVM);
1132
1133 /*
1134 * Find the handler.
1135 */
1136 int rc;
1137 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1138 if (RT_LIKELY(pCur))
1139 {
1140 /*
1141 * Validate kind.
1142 */
1143 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1144 switch (pCurType->enmKind)
1145 {
1146 case PGMPHYSHANDLERKIND_WRITE:
1147 case PGMPHYSHANDLERKIND_ALL:
1148 case PGMPHYSHANDLERKIND_MMIO: /* NOTE: Only use when clearing MMIO ranges with aliased MMIO2 pages! */
1149 {
1150 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset)); /** @todo move out of switch */
1151 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1152 Assert(pRam);
1153 Assert(pRam->GCPhys <= pCur->Core.Key);
1154 Assert(pRam->GCPhysLast >= pCur->Core.KeyLast);
1155
1156 if (pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO)
1157 {
1158 /*
1159 * Reset all the PGMPAGETYPE_MMIO2_ALIAS_MMIO pages first and that's it.
1160 * This could probably be optimized a bit wrt to flushing, but I'm too lazy
1161 * to do that now...
1162 */
1163 if (pCur->cAliasedPages)
1164 {
1165 PPGMPAGE pPage = &pRam->aPages[(pCur->Core.Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
1166 RTGCPHYS GCPhysPage = pCur->Core.Key;
1167 uint32_t cLeft = pCur->cPages;
1168 while (cLeft-- > 0)
1169 {
1170 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
1171 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
1172 {
1173 Assert(pCur->cAliasedPages > 0);
1174 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam, false /*fDoAccounting*/);
1175 --pCur->cAliasedPages;
1176#ifndef VBOX_STRICT
1177 if (pCur->cAliasedPages == 0)
1178 break;
1179#endif
1180 }
1181 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO);
1182 GCPhysPage += GUEST_PAGE_SIZE;
1183 pPage++;
1184 }
1185 Assert(pCur->cAliasedPages == 0);
1186 }
1187 }
1188 else if (pCur->cTmpOffPages > 0)
1189 {
1190 /*
1191 * Set the flags and flush shadow PT entries.
1192 */
1193 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
1194 }
1195
1196 pCur->cAliasedPages = 0;
1197 pCur->cTmpOffPages = 0;
1198
1199 rc = VINF_SUCCESS;
1200 break;
1201 }
1202
1203 /*
1204 * Invalid.
1205 */
1206 default:
1207 AssertMsgFailed(("Invalid type %d! Corruption!\n", pCurType->enmKind));
1208 rc = VERR_PGM_PHYS_HANDLER_IPE;
1209 break;
1210 }
1211 }
1212 else
1213 {
1214 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1215 rc = VERR_PGM_HANDLER_NOT_FOUND;
1216 }
1217
1218 PGM_UNLOCK(pVM);
1219 return rc;
1220}
1221
1222
1223/**
1224 * Special version of PGMHandlerPhysicalReset used by MMIO2 w/ dirty page
1225 * tracking.
1226 *
1227 * @returns VBox status code.
1228 * @param pVM The cross context VM structure.
1229 * @param GCPhys The start address of the handler region.
1230 * @param pvBitmap Dirty bitmap. Caller has cleared this already, only
1231 * dirty bits will be set. Caller also made sure it's big
1232 * enough.
1233 * @param offBitmap Dirty bitmap offset.
1234 * @remarks Caller must own the PGM critical section.
1235 */
1236DECLHIDDEN(int) pgmHandlerPhysicalResetMmio2WithBitmap(PVMCC pVM, RTGCPHYS GCPhys, void *pvBitmap, uint32_t offBitmap)
1237{
1238 LogFlow(("pgmHandlerPhysicalResetMmio2WithBitmap GCPhys=%RGp\n", GCPhys));
1239 PGM_LOCK_ASSERT_OWNER(pVM);
1240
1241 /*
1242 * Find the handler.
1243 */
1244 int rc;
1245 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1246 if (RT_LIKELY(pCur))
1247 {
1248 /*
1249 * Validate kind.
1250 */
1251 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1252 if (pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE)
1253 {
1254 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset));
1255
1256 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1257 Assert(pRam);
1258 Assert(pRam->GCPhys <= pCur->Core.Key);
1259 Assert(pRam->GCPhysLast >= pCur->Core.KeyLast);
1260
1261 /*
1262 * Set the flags and flush shadow PT entries.
1263 */
1264 if (pCur->cTmpOffPages > 0)
1265 {
1266 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, pvBitmap, offBitmap);
1267 pCur->cTmpOffPages = 0;
1268 }
1269 else
1270 rc = VINF_SUCCESS;
1271 }
1272 else
1273 {
1274 AssertFailed();
1275 rc = VERR_WRONG_TYPE;
1276 }
1277 }
1278 else
1279 {
1280 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1281 rc = VERR_PGM_HANDLER_NOT_FOUND;
1282 }
1283
1284 return rc;
1285}
1286
1287
1288/**
1289 * Temporarily turns off the access monitoring of a page within a monitored
1290 * physical write/all page access handler region.
1291 *
1292 * Use this when no further \#PFs are required for that page. Be aware that
1293 * a page directory sync might reset the flags, and turn on access monitoring
1294 * for the page.
1295 *
1296 * The caller must do required page table modifications.
1297 *
1298 * @returns VBox status code.
1299 * @param pVM The cross context VM structure.
1300 * @param GCPhys The start address of the access handler. This
1301 * must be a fully page aligned range or we risk
1302 * messing up other handlers installed for the
1303 * start and end pages.
1304 * @param GCPhysPage The physical address of the page to turn off
1305 * access monitoring for.
1306 */
1307VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
1308{
1309 LogFlow(("PGMHandlerPhysicalPageTempOff GCPhysPage=%RGp\n", GCPhysPage));
1310 PGM_LOCK_VOID(pVM);
1311
1312 /*
1313 * Validate the range.
1314 */
1315 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1316 if (RT_LIKELY(pCur))
1317 {
1318 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
1319 && GCPhysPage <= pCur->Core.KeyLast))
1320 {
1321 Assert(!(pCur->Core.Key & GUEST_PAGE_OFFSET_MASK));
1322 Assert((pCur->Core.KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
1323
1324 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1325 AssertReturnStmt( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1326 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL,
1327 PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1328
1329 /*
1330 * Change the page status.
1331 */
1332 PPGMPAGE pPage;
1333 PPGMRAMRANGE pRam;
1334 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1335 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1336 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1337 {
1338 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1339 pCur->cTmpOffPages++;
1340
1341#ifdef VBOX_WITH_NATIVE_NEM
1342 /* Tell NEM about the protection change (VGA is using this to track dirty pages). */
1343 if (VM_IS_NEM_ENABLED(pVM))
1344 {
1345 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1346 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1347 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
1348 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1349 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1350 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1351 }
1352#endif
1353 }
1354 PGM_UNLOCK(pVM);
1355 return VINF_SUCCESS;
1356 }
1357 PGM_UNLOCK(pVM);
1358 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
1359 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
1360 return VERR_INVALID_PARAMETER;
1361 }
1362 PGM_UNLOCK(pVM);
1363 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1364 return VERR_PGM_HANDLER_NOT_FOUND;
1365}
1366
1367
1368/**
1369 * Resolves an MMIO2 page.
1370 *
1371 * Caller as taken the PGM lock.
1372 *
1373 * @returns Pointer to the page if valid, NULL otherwise
1374 * @param pVM The cross context VM structure.
1375 * @param pDevIns The device owning it.
1376 * @param hMmio2 The MMIO2 region.
1377 * @param offMmio2Page The offset into the region.
1378 */
1379static PPGMPAGE pgmPhysResolveMmio2PageLocked(PVMCC pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2Page)
1380{
1381 /* Only works if the handle is in the handle table! */
1382 AssertReturn(hMmio2 != 0, NULL);
1383 hMmio2--;
1384
1385 /* Must check the first one for PGMREGMMIO2RANGE_F_FIRST_CHUNK. */
1386 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL);
1387 PPGMREGMMIO2RANGE pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2];
1388 AssertReturn(pCur, NULL);
1389 AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL);
1390
1391 /* Loop thru the sub-ranges till we find the one covering offMmio2. */
1392 for (;;)
1393 {
1394#ifdef IN_RING3
1395 AssertReturn(pCur->pDevInsR3 == pDevIns, NULL);
1396#else
1397 AssertReturn(pCur->pDevInsR3 == pDevIns->pDevInsForR3, NULL);
1398#endif
1399
1400 /* Does it match the offset? */
1401 if (offMmio2Page < pCur->cbReal)
1402 return &pCur->RamRange.aPages[offMmio2Page >> GUEST_PAGE_SHIFT];
1403
1404 /* Advance if we can. */
1405 AssertReturn(!(pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK), NULL);
1406 offMmio2Page -= pCur->cbReal;
1407 hMmio2++;
1408 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL);
1409 pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2];
1410 AssertReturn(pCur, NULL);
1411 }
1412}
1413
1414
1415/**
1416 * Replaces an MMIO page with an MMIO2 page.
1417 *
1418 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
1419 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
1420 * backing, the caller must provide a replacement page. For various reasons the
1421 * replacement page must be an MMIO2 page.
1422 *
1423 * The caller must do required page table modifications. You can get away
1424 * without making any modifications since it's an MMIO page, the cost is an extra
1425 * \#PF which will the resync the page.
1426 *
1427 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1428 *
1429 * The caller may still get handler callback even after this call and must be
1430 * able to deal correctly with such calls. The reason for these callbacks are
1431 * either that we're executing in the recompiler (which doesn't know about this
1432 * arrangement) or that we've been restored from saved state (where we won't
1433 * save the change).
1434 *
1435 * @returns VBox status code.
1436 * @param pVM The cross context VM structure.
1437 * @param GCPhys The start address of the access handler. This
1438 * must be a fully page aligned range or we risk
1439 * messing up other handlers installed for the
1440 * start and end pages.
1441 * @param GCPhysPage The physical address of the page to turn off
1442 * access monitoring for and replace with the MMIO2
1443 * page.
1444 * @param pDevIns The device instance owning @a hMmio2.
1445 * @param hMmio2 Handle to the MMIO2 region containing the page
1446 * to remap in the the MMIO page at @a GCPhys.
1447 * @param offMmio2PageRemap The offset into @a hMmio2 of the MMIO2 page that
1448 * should serve as backing memory.
1449 *
1450 * @remark May cause a page pool flush if used on a page that is already
1451 * aliased.
1452 *
1453 * @note This trick does only work reliably if the two pages are never ever
1454 * mapped in the same page table. If they are the page pool code will
1455 * be confused should either of them be flushed. See the special case
1456 * of zero page aliasing mentioned in #3170.
1457 *
1458 */
1459VMMDECL(int) PGMHandlerPhysicalPageAliasMmio2(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage,
1460 PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2PageRemap)
1461{
1462#ifdef VBOX_WITH_PGM_NEM_MODE
1463 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1464#endif
1465 PGM_LOCK_VOID(pVM);
1466
1467 /*
1468 * Resolve the MMIO2 reference.
1469 */
1470 PPGMPAGE pPageRemap = pgmPhysResolveMmio2PageLocked(pVM, pDevIns, hMmio2, offMmio2PageRemap);
1471 if (RT_LIKELY(pPageRemap))
1472 AssertMsgReturnStmt(PGM_PAGE_GET_TYPE(pPageRemap) == PGMPAGETYPE_MMIO2,
1473 ("hMmio2=%RU64 offMmio2PageRemap=%RGp %R[pgmpage]\n", hMmio2, offMmio2PageRemap, pPageRemap),
1474 PGM_UNLOCK(pVM), VERR_PGM_PHYS_NOT_MMIO2);
1475 else
1476 {
1477 PGM_UNLOCK(pVM);
1478 return VERR_OUT_OF_RANGE;
1479 }
1480
1481 /*
1482 * Lookup and validate the range.
1483 */
1484 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1485 if (RT_LIKELY(pCur))
1486 {
1487 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
1488 && GCPhysPage <= pCur->Core.KeyLast))
1489 {
1490 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1491 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1492 AssertReturnStmt(!(pCur->Core.Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1493 AssertReturnStmt((pCur->Core.KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1494 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1495
1496 /*
1497 * Validate the page.
1498 */
1499 PPGMPAGE pPage;
1500 PPGMRAMRANGE pRam;
1501 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1502 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1503 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1504 {
1505 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1506 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1507 VERR_PGM_PHYS_NOT_MMIO2);
1508 if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPageRemap))
1509 {
1510 PGM_UNLOCK(pVM);
1511 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1512 }
1513
1514 /*
1515 * The page is already mapped as some other page, reset it
1516 * to an MMIO/ZERO page before doing the new mapping.
1517 */
1518 Log(("PGMHandlerPhysicalPageAliasMmio2: GCPhysPage=%RGp (%R[pgmpage]; %RHp -> %RHp\n",
1519 GCPhysPage, pPage, PGM_PAGE_GET_HCPHYS(pPage), PGM_PAGE_GET_HCPHYS(pPageRemap)));
1520 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam, false /*fDoAccounting*/);
1521 pCur->cAliasedPages--;
1522 }
1523 Assert(PGM_PAGE_IS_ZERO(pPage));
1524
1525 /*
1526 * Do the actual remapping here.
1527 * This page now serves as an alias for the backing memory specified.
1528 */
1529 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: %RGp (%R[pgmpage]) alias for %RU64/%RGp (%R[pgmpage])\n",
1530 GCPhysPage, pPage, hMmio2, offMmio2PageRemap, pPageRemap ));
1531 PGM_PAGE_SET_HCPHYS(pVM, pPage, PGM_PAGE_GET_HCPHYS(pPageRemap));
1532 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1533 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1534 PGM_PAGE_SET_PAGEID(pVM, pPage, PGM_PAGE_GET_PAGEID(pPageRemap));
1535 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1536 pCur->cAliasedPages++;
1537 Assert(pCur->cAliasedPages <= pCur->cPages);
1538
1539 /* Flush its TLB entry. */
1540 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1541
1542#ifdef VBOX_WITH_NATIVE_NEM
1543 /* Tell NEM about the backing and protection change. */
1544 if (VM_IS_NEM_ENABLED(pVM))
1545 {
1546 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1547 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1548 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1549 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO),
1550 PGMPAGETYPE_MMIO2_ALIAS_MMIO, &u2State);
1551 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1552 }
1553#endif
1554 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: => %R[pgmpage]\n", pPage));
1555 PGM_UNLOCK(pVM);
1556 return VINF_SUCCESS;
1557 }
1558
1559 PGM_UNLOCK(pVM);
1560 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
1561 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
1562 return VERR_INVALID_PARAMETER;
1563 }
1564
1565 PGM_UNLOCK(pVM);
1566 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1567 return VERR_PGM_HANDLER_NOT_FOUND;
1568}
1569
1570
1571/**
1572 * Replaces an MMIO page with an arbitrary HC page in the shadow page tables.
1573 *
1574 * This differs from PGMHandlerPhysicalPageAliasMmio2 in that the page doesn't
1575 * need to be a known MMIO2 page and that only shadow paging may access the
1576 * page. The latter distinction is important because the only use for this
1577 * feature is for mapping the special APIC access page that VT-x uses to detect
1578 * APIC MMIO operations, the page is shared between all guest CPUs and actually
1579 * not written to. At least at the moment.
1580 *
1581 * The caller must do required page table modifications. You can get away
1582 * without making any modifications since it's an MMIO page, the cost is an extra
1583 * \#PF which will the resync the page.
1584 *
1585 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1586 *
1587 *
1588 * @returns VBox status code.
1589 * @param pVM The cross context VM structure.
1590 * @param GCPhys The start address of the access handler. This
1591 * must be a fully page aligned range or we risk
1592 * messing up other handlers installed for the
1593 * start and end pages.
1594 * @param GCPhysPage The physical address of the page to turn off
1595 * access monitoring for.
1596 * @param HCPhysPageRemap The physical address of the HC page that
1597 * serves as backing memory.
1598 *
1599 * @remark May cause a page pool flush if used on a page that is already
1600 * aliased.
1601 */
1602VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap)
1603{
1604/// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
1605#ifdef VBOX_WITH_PGM_NEM_MODE
1606 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1607#endif
1608 PGM_LOCK_VOID(pVM);
1609
1610 /*
1611 * Lookup and validate the range.
1612 */
1613 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1614 if (RT_LIKELY(pCur))
1615 {
1616 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
1617 && GCPhysPage <= pCur->Core.KeyLast))
1618 {
1619 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1620 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1621 AssertReturnStmt(!(pCur->Core.Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1622 AssertReturnStmt((pCur->Core.KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1623 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1624
1625 /*
1626 * Get and validate the pages.
1627 */
1628 PPGMPAGE pPage;
1629 int rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage);
1630 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1631 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1632 {
1633 PGM_UNLOCK(pVM);
1634 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
1635 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1636 VERR_PGM_PHYS_NOT_MMIO2);
1637 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1638 }
1639 Assert(PGM_PAGE_IS_ZERO(pPage));
1640
1641 /*
1642 * Do the actual remapping here.
1643 * This page now serves as an alias for the backing memory
1644 * specified as far as shadow paging is concerned.
1645 */
1646 LogFlow(("PGMHandlerPhysicalPageAliasHC: %RGp (%R[pgmpage]) alias for %RHp\n",
1647 GCPhysPage, pPage, HCPhysPageRemap));
1648 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhysPageRemap);
1649 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
1650 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1651 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
1652 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1653 pCur->cAliasedPages++;
1654 Assert(pCur->cAliasedPages <= pCur->cPages);
1655
1656 /* Flush its TLB entry. */
1657 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1658
1659#ifdef VBOX_WITH_NATIVE_NEM
1660 /* Tell NEM about the backing and protection change. */
1661 if (VM_IS_NEM_ENABLED(pVM))
1662 {
1663 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhysPage);
1664 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1665 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1666 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1667 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO),
1668 PGMPAGETYPE_SPECIAL_ALIAS_MMIO, &u2State);
1669 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1670 }
1671#endif
1672 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage));
1673 PGM_UNLOCK(pVM);
1674 return VINF_SUCCESS;
1675 }
1676 PGM_UNLOCK(pVM);
1677 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
1678 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
1679 return VERR_INVALID_PARAMETER;
1680 }
1681 PGM_UNLOCK(pVM);
1682
1683 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1684 return VERR_PGM_HANDLER_NOT_FOUND;
1685}
1686
1687
1688/**
1689 * Checks if a physical range is handled
1690 *
1691 * @returns boolean
1692 * @param pVM The cross context VM structure.
1693 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
1694 * @remarks Caller must take the PGM lock...
1695 * @thread EMT.
1696 */
1697VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVMCC pVM, RTGCPHYS GCPhys)
1698{
1699 /*
1700 * Find the handler.
1701 */
1702 PGM_LOCK_VOID(pVM);
1703 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
1704 if (pCur)
1705 {
1706#ifdef VBOX_STRICT
1707 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
1708 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1709 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1710 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
1711 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO);
1712#endif
1713 PGM_UNLOCK(pVM);
1714 return true;
1715 }
1716 PGM_UNLOCK(pVM);
1717 return false;
1718}
1719
1720
1721/**
1722 * Checks if it's an disabled all access handler or write access handler at the
1723 * given address.
1724 *
1725 * @returns true if it's an all access handler, false if it's a write access
1726 * handler.
1727 * @param pVM The cross context VM structure.
1728 * @param GCPhys The address of the page with a disabled handler.
1729 *
1730 * @remarks The caller, PGMR3PhysTlbGCPhys2Ptr, must hold the PGM lock.
1731 */
1732bool pgmHandlerPhysicalIsAll(PVMCC pVM, RTGCPHYS GCPhys)
1733{
1734 PGM_LOCK_VOID(pVM);
1735 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
1736 if (!pCur)
1737 {
1738 PGM_UNLOCK(pVM);
1739 AssertFailed();
1740 return true;
1741 }
1742 PPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1743 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1744 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
1745 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO); /* sanity */
1746 /* Only whole pages can be disabled. */
1747 Assert( pCur->Core.Key <= (GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK)
1748 && pCur->Core.KeyLast >= (GCPhys | GUEST_PAGE_OFFSET_MASK));
1749
1750 bool bRet = pCurType->enmKind != PGMPHYSHANDLERKIND_WRITE;
1751 PGM_UNLOCK(pVM);
1752 return bRet;
1753}
1754
1755#ifdef VBOX_STRICT
1756
1757/**
1758 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
1759 * and its AVL enumerators.
1760 */
1761typedef struct PGMAHAFIS
1762{
1763 /** The current physical address. */
1764 RTGCPHYS GCPhys;
1765 /** Number of errors. */
1766 unsigned cErrors;
1767 /** Pointer to the VM. */
1768 PVM pVM;
1769} PGMAHAFIS, *PPGMAHAFIS;
1770
1771
1772/**
1773 * Asserts that the handlers+guest-page-tables == ramrange-flags and
1774 * that the physical addresses associated with virtual handlers are correct.
1775 *
1776 * @returns Number of mismatches.
1777 * @param pVM The cross context VM structure.
1778 */
1779VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVMCC pVM)
1780{
1781 PPGM pPGM = &pVM->pgm.s;
1782 PGMAHAFIS State;
1783 State.GCPhys = 0;
1784 State.cErrors = 0;
1785 State.pVM = pVM;
1786
1787 PGM_LOCK_ASSERT_OWNER(pVM);
1788
1789 /*
1790 * Check the RAM flags against the handlers.
1791 */
1792 for (PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
1793 {
1794 const uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT;
1795 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1796 {
1797 PGMPAGE const *pPage = &pRam->aPages[iPage];
1798 if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
1799 {
1800 State.GCPhys = pRam->GCPhys + (iPage << GUEST_PAGE_SHIFT);
1801
1802 /*
1803 * Physical first - calculate the state based on the handlers
1804 * active on the page, then compare.
1805 */
1806 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
1807 {
1808 /* the first */
1809 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, State.GCPhys);
1810 if (!pPhys)
1811 {
1812 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, State.GCPhys, true);
1813 if ( pPhys
1814 && pPhys->Core.Key > (State.GCPhys + GUEST_PAGE_SIZE - 1))
1815 pPhys = NULL;
1816 Assert(!pPhys || pPhys->Core.Key >= State.GCPhys);
1817 }
1818 if (pPhys)
1819 {
1820 PPGMPHYSHANDLERTYPEINT pPhysType = (PPGMPHYSHANDLERTYPEINT)MMHyperHeapOffsetToPtr(pVM, pPhys->hType);
1821 unsigned uState = pPhysType->uState;
1822
1823 /* more? */
1824 while (pPhys->Core.KeyLast < (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
1825 {
1826 PPGMPHYSHANDLER pPhys2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers,
1827 pPhys->Core.KeyLast + 1, true);
1828 if ( !pPhys2
1829 || pPhys2->Core.Key > (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
1830 break;
1831 PPGMPHYSHANDLERTYPEINT pPhysType2 = (PPGMPHYSHANDLERTYPEINT)MMHyperHeapOffsetToPtr(pVM, pPhys2->hType);
1832 uState = RT_MAX(uState, pPhysType2->uState);
1833 pPhys = pPhys2;
1834 }
1835
1836 /* compare.*/
1837 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != uState
1838 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1839 {
1840 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%RGp state=%d expected=%d %s\n",
1841 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), uState, pPhysType->pszDesc));
1842 State.cErrors++;
1843 }
1844 }
1845 else
1846 {
1847 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%RGp\n", State.GCPhys));
1848 State.cErrors++;
1849 }
1850 }
1851 }
1852 } /* foreach page in ram range. */
1853 } /* foreach ram range. */
1854
1855 /*
1856 * Do the reverse check for physical handlers.
1857 */
1858 /** @todo */
1859
1860 return State.cErrors;
1861}
1862
1863#endif /* VBOX_STRICT */
1864
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette