VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 93635

Last change on this file since 93635 was 93635, checked in by vboxsync, 3 years ago

VMM/PGM,VMM/PDM,VGA: Consolidate the user parameters of the physical access handlers into a single uint64_t value that shouldn't be a pointer, at least not for ring-0 callbacks. Special hack for devices where it's translated from a ring-0 device instance index into a current context PPDMDEVINS (not really tested yet). bugref:10094

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 144.2 KB
Line 
1/* $Id: PGMAllPhys.cpp 93635 2022-02-07 10:43:45Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM_PHYS
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/iom.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/nem.h>
30#include "PGMInternal.h"
31#include <VBox/vmm/vmcc.h>
32#include "PGMInline.h"
33#include <VBox/param.h>
34#include <VBox/err.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <iprt/asm-amd64-x86.h>
38#include <VBox/log.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*********************************************************************************************************************************
45* Defined Constants And Macros *
46*********************************************************************************************************************************/
47/** Enable the physical TLB. */
48#define PGM_WITH_PHYS_TLB
49
50/** @def PGM_HANDLER_PHYS_IS_VALID_STATUS
51 * Checks if valid physical access handler return code (normal handler, not PF).
52 *
53 * Checks if the given strict status code is one of the expected ones for a
54 * physical access handler in the current context.
55 *
56 * @returns true or false.
57 * @param a_rcStrict The status code.
58 * @param a_fWrite Whether it is a write or read being serviced.
59 *
60 * @remarks We wish to keep the list of statuses here as short as possible.
61 * When changing, please make sure to update the PGMPhysRead,
62 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
63 */
64#ifdef IN_RING3
65# define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
66 ( (a_rcStrict) == VINF_SUCCESS \
67 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
68#elif defined(IN_RING0)
69#define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
70 ( (a_rcStrict) == VINF_SUCCESS \
71 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT \
72 \
73 || (a_rcStrict) == ((a_fWrite) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ) \
74 || (a_rcStrict) == VINF_IOM_R3_MMIO_READ_WRITE \
75 || ((a_rcStrict) == VINF_IOM_R3_MMIO_COMMIT_WRITE && (a_fWrite)) \
76 \
77 || (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR \
78 || (a_rcStrict) == VINF_EM_DBG_STOP \
79 || (a_rcStrict) == VINF_EM_DBG_EVENT \
80 || (a_rcStrict) == VINF_EM_DBG_BREAKPOINT \
81 || (a_rcStrict) == VINF_EM_OFF \
82 || (a_rcStrict) == VINF_EM_SUSPEND \
83 || (a_rcStrict) == VINF_EM_RESET \
84 )
85#else
86# error "Context?"
87#endif
88
89/** @def PGM_HANDLER_VIRT_IS_VALID_STATUS
90 * Checks if valid virtual access handler return code (normal handler, not PF).
91 *
92 * Checks if the given strict status code is one of the expected ones for a
93 * virtual access handler in the current context.
94 *
95 * @returns true or false.
96 * @param a_rcStrict The status code.
97 * @param a_fWrite Whether it is a write or read being serviced.
98 *
99 * @remarks We wish to keep the list of statuses here as short as possible.
100 * When changing, please make sure to update the PGMPhysRead,
101 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
102 */
103#ifdef IN_RING3
104# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
105 ( (a_rcStrict) == VINF_SUCCESS \
106 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
107#elif defined(IN_RING0)
108# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
109 (false /* no virtual handlers in ring-0! */ )
110#else
111# error "Context?"
112#endif
113
114
115
116#ifndef IN_RING3
117
118/**
119 * @callback_method_impl{FNPGMPHYSHANDLER,
120 * Dummy for forcing ring-3 handling of the access.}
121 */
122DECLEXPORT(VBOXSTRICTRC)
123pgmPhysHandlerRedirectToHC(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
124 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
125{
126 RT_NOREF(pVM, pVCpu, GCPhys, pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin, uUser);
127 return VINF_EM_RAW_EMULATE_INSTR;
128}
129
130
131/**
132 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
133 * Dummy for forcing ring-3 handling of the access.}
134 */
135VMMDECL(VBOXSTRICTRC) pgmPhysPfHandlerRedirectToHC(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
136 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
137{
138 RT_NOREF(pVM, pVCpu, uErrorCode, pRegFrame, pvFault, GCPhysFault, uUser);
139 return VINF_EM_RAW_EMULATE_INSTR;
140}
141
142#endif /* !IN_RING3 */
143
144/**
145 * Looks up a ROM range by its PGMROMRANGE::GCPhys value.
146 */
147DECLINLINE(PPGMROMRANGE) pgmPhysRomLookupByBase(PVMCC pVM, RTGCPHYS GCPhys)
148{
149 for (PPGMROMRANGE pRom = pVM->pgm.s.CTX_SUFF(pRomRanges); pRom; pRom = pRom->CTX_SUFF(pNext))
150 if (pRom->GCPhys == GCPhys)
151 return pRom;
152 return NULL;
153}
154
155#ifndef IN_RING3
156
157/**
158 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
159 * \#PF access handler callback for guest ROM range write access.}
160 *
161 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
162 */
163DECLEXPORT(VBOXSTRICTRC) pgmPhysRomWritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
164 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
165
166{
167 PPGMROMRANGE const pRom = pgmPhysRomLookupByBase(pVM, uUser);
168 AssertReturn(pRom, VINF_EM_RAW_EMULATE_INSTR);
169 uint32_t const iPage = (GCPhysFault - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
170 int rc;
171 RT_NOREF(uErrorCode, pvFault);
172
173 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
174
175 Assert(iPage < (pRom->cb >> GUEST_PAGE_SHIFT));
176 switch (pRom->aPages[iPage].enmProt)
177 {
178 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
179 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
180 {
181 /*
182 * If it's a simple instruction which doesn't change the cpu state
183 * we will simply skip it. Otherwise we'll have to defer it to REM.
184 */
185 uint32_t cbOp;
186 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
187 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
188 if ( RT_SUCCESS(rc)
189 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
190 && !(pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
191 {
192 switch (pDis->bOpCode)
193 {
194 /** @todo Find other instructions we can safely skip, possibly
195 * adding this kind of detection to DIS or EM. */
196 case OP_MOV:
197 pRegFrame->rip += cbOp;
198 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteHandled);
199 return VINF_SUCCESS;
200 }
201 }
202 break;
203 }
204
205 case PGMROMPROT_READ_RAM_WRITE_RAM:
206 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
207 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
208 AssertRC(rc);
209 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
210
211 case PGMROMPROT_READ_ROM_WRITE_RAM:
212 /* Handle it in ring-3 because it's *way* easier there. */
213 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
214 break;
215
216 default:
217 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
218 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
219 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
220 }
221
222 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteUnhandled);
223 return VINF_EM_RAW_EMULATE_INSTR;
224}
225
226#endif /* !IN_RING3 */
227
228
229/**
230 * @callback_method_impl{FNPGMPHYSHANDLER,
231 * Access handler callback for ROM write accesses.}
232 *
233 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
234 */
235PGM_ALL_CB2_DECL(VBOXSTRICTRC)
236pgmPhysRomWriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
237 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
238{
239 PPGMROMRANGE const pRom = pgmPhysRomLookupByBase(pVM, uUser);
240 AssertReturn(pRom, VERR_INTERNAL_ERROR_3);
241 uint32_t const iPage = (GCPhys - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
242 Assert(iPage < (pRom->cb >> GUEST_PAGE_SHIFT));
243 PPGMROMPAGE const pRomPage = &pRom->aPages[iPage];
244
245 Log5(("pgmPhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
246 RT_NOREF(pVCpu, pvPhys, enmOrigin);
247
248 if (enmAccessType == PGMACCESSTYPE_READ)
249 {
250 switch (pRomPage->enmProt)
251 {
252 /*
253 * Take the default action.
254 */
255 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
256 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
257 case PGMROMPROT_READ_ROM_WRITE_RAM:
258 case PGMROMPROT_READ_RAM_WRITE_RAM:
259 return VINF_PGM_HANDLER_DO_DEFAULT;
260
261 default:
262 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
263 pRom->aPages[iPage].enmProt, iPage, GCPhys),
264 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
265 }
266 }
267 else
268 {
269 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
270 switch (pRomPage->enmProt)
271 {
272 /*
273 * Ignore writes.
274 */
275 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
276 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
277 return VINF_SUCCESS;
278
279 /*
280 * Write to the RAM page.
281 */
282 case PGMROMPROT_READ_ROM_WRITE_RAM:
283 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
284 {
285 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
286 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> GUEST_PAGE_SHIFT) == iPage);
287
288 /*
289 * Take the lock, do lazy allocation, map the page and copy the data.
290 *
291 * Note that we have to bypass the mapping TLB since it works on
292 * guest physical addresses and entering the shadow page would
293 * kind of screw things up...
294 */
295 PGM_LOCK_VOID(pVM);
296
297 PPGMPAGE pShadowPage = &pRomPage->Shadow;
298 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
299 {
300 pShadowPage = pgmPhysGetPage(pVM, GCPhys);
301 AssertLogRelMsgReturnStmt(pShadowPage, ("%RGp\n", GCPhys), PGM_UNLOCK(pVM), VERR_PGM_PHYS_PAGE_GET_IPE);
302 }
303
304 void *pvDstPage;
305 int rc;
306#if defined(VBOX_WITH_PGM_NEM_MODE) && defined(IN_RING3)
307 if (PGM_IS_IN_NEM_MODE(pVM) && PGMROMPROT_IS_ROM(pRomPage->enmProt))
308 {
309 pvDstPage = &pRom->pbR3Alternate[GCPhys - pRom->GCPhys];
310 rc = VINF_SUCCESS;
311 }
312 else
313#endif
314 {
315 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);
316 if (RT_SUCCESS(rc))
317 pvDstPage = (uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK);
318 }
319 if (RT_SUCCESS(rc))
320 {
321 memcpy((uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK), pvBuf, cbBuf);
322 pRomPage->LiveSave.fWrittenTo = true;
323
324 AssertMsg( rc == VINF_SUCCESS
325 || ( rc == VINF_PGM_SYNC_CR3
326 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
327 , ("%Rrc\n", rc));
328 rc = VINF_SUCCESS;
329 }
330
331 PGM_UNLOCK(pVM);
332 return rc;
333 }
334
335 default:
336 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
337 pRom->aPages[iPage].enmProt, iPage, GCPhys),
338 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
339 }
340 }
341}
342
343
344/**
345 * Common worker for pgmPhysMmio2WriteHandler and pgmPhysMmio2WritePfHandler.
346 */
347static VBOXSTRICTRC pgmPhysMmio2WriteHandlerCommon(PVMCC pVM, PVMCPUCC pVCpu, uint64_t hMmio2, RTGCPHYS GCPhys, RTGCPTR GCPtr)
348{
349 /*
350 * Get the MMIO2 range.
351 */
352 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), VERR_INTERNAL_ERROR_3);
353 AssertReturn(hMmio2 != 0, VERR_INTERNAL_ERROR_3);
354 PPGMREGMMIO2RANGE pMmio2 = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2 - 1];
355 Assert(pMmio2->idMmio2 == hMmio2);
356 AssertReturn((pMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES) == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES,
357 VERR_INTERNAL_ERROR_4);
358
359 /*
360 * Get the page and make sure it's an MMIO2 page.
361 */
362 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
363 AssertReturn(pPage, VINF_EM_RAW_EMULATE_INSTR);
364 AssertReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, VINF_EM_RAW_EMULATE_INSTR);
365
366 /*
367 * Set the dirty flag so we can avoid scanning all the pages when it isn't dirty.
368 * (The PGM_PAGE_HNDL_PHYS_STATE_DISABLED handler state indicates that a single
369 * page is dirty, saving the need for additional storage (bitmap).)
370 */
371 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_DIRTY;
372
373 /*
374 * Disable the handler for this page.
375 */
376 int rc = PGMHandlerPhysicalPageTempOff(pVM, pMmio2->RamRange.GCPhys, GCPhys & X86_PTE_PG_MASK);
377 AssertRC(rc);
378#ifndef IN_RING3
379 if (RT_SUCCESS(rc) && GCPtr != ~(RTGCPTR)0)
380 {
381 rc = PGMShwMakePageWritable(pVCpu, GCPtr, PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT);
382 AssertMsgReturn(rc == VINF_SUCCESS, ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", GCPtr, rc), rc);
383 }
384#else
385 RT_NOREF(pVCpu, GCPtr);
386#endif
387 return VINF_SUCCESS;
388}
389
390
391#ifndef IN_RING3
392/**
393 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
394 * \#PF access handler callback for guest MMIO2 dirty page tracing.}
395 *
396 * @remarks The @a uUser is the MMIO2 index.
397 */
398DECLEXPORT(VBOXSTRICTRC) pgmPhysMmio2WritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
399 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
400{
401 RT_NOREF(pVCpu, uErrorCode, pRegFrame);
402 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
403 if (RT_SUCCESS(rcStrict))
404 {
405 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhysFault, pvFault);
406 PGM_UNLOCK(pVM);
407 }
408 return rcStrict;
409}
410#endif /* !IN_RING3 */
411
412
413/**
414 * @callback_method_impl{FNPGMPHYSHANDLER,
415 * Access handler callback for MMIO2 dirty page tracing.}
416 *
417 * @remarks The @a uUser is the MMIO2 index.
418 */
419PGM_ALL_CB2_DECL(VBOXSTRICTRC)
420pgmPhysMmio2WriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
421 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
422{
423 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
424 if (RT_SUCCESS(rcStrict))
425 {
426 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhys, ~(RTGCPTR)0);
427 PGM_UNLOCK(pVM);
428 if (rcStrict == VINF_SUCCESS)
429 rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
430 }
431 RT_NOREF(pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin);
432 return rcStrict;
433}
434
435
436/**
437 * Invalidates the RAM range TLBs.
438 *
439 * @param pVM The cross context VM structure.
440 */
441void pgmPhysInvalidRamRangeTlbs(PVMCC pVM)
442{
443 PGM_LOCK_VOID(pVM);
444 RT_ZERO(pVM->pgm.s.apRamRangesTlbR3);
445 RT_ZERO(pVM->pgm.s.apRamRangesTlbR0);
446 PGM_UNLOCK(pVM);
447}
448
449
450/**
451 * Tests if a value of type RTGCPHYS is negative if the type had been signed
452 * instead of unsigned.
453 *
454 * @returns @c true if negative, @c false if positive or zero.
455 * @param a_GCPhys The value to test.
456 * @todo Move me to iprt/types.h.
457 */
458#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
459
460
461/**
462 * Slow worker for pgmPhysGetRange.
463 *
464 * @copydoc pgmPhysGetRange
465 */
466PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
467{
468 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
469
470 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
471 while (pRam)
472 {
473 RTGCPHYS off = GCPhys - pRam->GCPhys;
474 if (off < pRam->cb)
475 {
476 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
477 return pRam;
478 }
479 if (RTGCPHYS_IS_NEGATIVE(off))
480 pRam = pRam->CTX_SUFF(pLeft);
481 else
482 pRam = pRam->CTX_SUFF(pRight);
483 }
484 return NULL;
485}
486
487
488/**
489 * Slow worker for pgmPhysGetRangeAtOrAbove.
490 *
491 * @copydoc pgmPhysGetRangeAtOrAbove
492 */
493PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
494{
495 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
496
497 PPGMRAMRANGE pLastLeft = NULL;
498 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
499 while (pRam)
500 {
501 RTGCPHYS off = GCPhys - pRam->GCPhys;
502 if (off < pRam->cb)
503 {
504 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
505 return pRam;
506 }
507 if (RTGCPHYS_IS_NEGATIVE(off))
508 {
509 pLastLeft = pRam;
510 pRam = pRam->CTX_SUFF(pLeft);
511 }
512 else
513 pRam = pRam->CTX_SUFF(pRight);
514 }
515 return pLastLeft;
516}
517
518
519/**
520 * Slow worker for pgmPhysGetPage.
521 *
522 * @copydoc pgmPhysGetPage
523 */
524PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
525{
526 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
527
528 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
529 while (pRam)
530 {
531 RTGCPHYS off = GCPhys - pRam->GCPhys;
532 if (off < pRam->cb)
533 {
534 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
535 return &pRam->aPages[off >> GUEST_PAGE_SHIFT];
536 }
537
538 if (RTGCPHYS_IS_NEGATIVE(off))
539 pRam = pRam->CTX_SUFF(pLeft);
540 else
541 pRam = pRam->CTX_SUFF(pRight);
542 }
543 return NULL;
544}
545
546
547/**
548 * Slow worker for pgmPhysGetPageEx.
549 *
550 * @copydoc pgmPhysGetPageEx
551 */
552int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
553{
554 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
555
556 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
557 while (pRam)
558 {
559 RTGCPHYS off = GCPhys - pRam->GCPhys;
560 if (off < pRam->cb)
561 {
562 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
563 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
564 return VINF_SUCCESS;
565 }
566
567 if (RTGCPHYS_IS_NEGATIVE(off))
568 pRam = pRam->CTX_SUFF(pLeft);
569 else
570 pRam = pRam->CTX_SUFF(pRight);
571 }
572
573 *ppPage = NULL;
574 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
575}
576
577
578/**
579 * Slow worker for pgmPhysGetPageAndRangeEx.
580 *
581 * @copydoc pgmPhysGetPageAndRangeEx
582 */
583int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
584{
585 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
586
587 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
588 while (pRam)
589 {
590 RTGCPHYS off = GCPhys - pRam->GCPhys;
591 if (off < pRam->cb)
592 {
593 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
594 *ppRam = pRam;
595 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
596 return VINF_SUCCESS;
597 }
598
599 if (RTGCPHYS_IS_NEGATIVE(off))
600 pRam = pRam->CTX_SUFF(pLeft);
601 else
602 pRam = pRam->CTX_SUFF(pRight);
603 }
604
605 *ppRam = NULL;
606 *ppPage = NULL;
607 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
608}
609
610
611/**
612 * Checks if Address Gate 20 is enabled or not.
613 *
614 * @returns true if enabled.
615 * @returns false if disabled.
616 * @param pVCpu The cross context virtual CPU structure.
617 */
618VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
619{
620 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
621 return pVCpu->pgm.s.fA20Enabled;
622}
623
624
625/**
626 * Validates a GC physical address.
627 *
628 * @returns true if valid.
629 * @returns false if invalid.
630 * @param pVM The cross context VM structure.
631 * @param GCPhys The physical address to validate.
632 */
633VMMDECL(bool) PGMPhysIsGCPhysValid(PVMCC pVM, RTGCPHYS GCPhys)
634{
635 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
636 return pPage != NULL;
637}
638
639
640/**
641 * Checks if a GC physical address is a normal page,
642 * i.e. not ROM, MMIO or reserved.
643 *
644 * @returns true if normal.
645 * @returns false if invalid, ROM, MMIO or reserved page.
646 * @param pVM The cross context VM structure.
647 * @param GCPhys The physical address to check.
648 */
649VMMDECL(bool) PGMPhysIsGCPhysNormal(PVMCC pVM, RTGCPHYS GCPhys)
650{
651 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
652 return pPage
653 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
654}
655
656
657/**
658 * Converts a GC physical address to a HC physical address.
659 *
660 * @returns VINF_SUCCESS on success.
661 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
662 * page but has no physical backing.
663 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
664 * GC physical address.
665 *
666 * @param pVM The cross context VM structure.
667 * @param GCPhys The GC physical address to convert.
668 * @param pHCPhys Where to store the HC physical address on success.
669 */
670VMM_INT_DECL(int) PGMPhysGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
671{
672 PGM_LOCK_VOID(pVM);
673 PPGMPAGE pPage;
674 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
675 if (RT_SUCCESS(rc))
676 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & GUEST_PAGE_OFFSET_MASK);
677 PGM_UNLOCK(pVM);
678 return rc;
679}
680
681
682/**
683 * Invalidates all page mapping TLBs.
684 *
685 * @param pVM The cross context VM structure.
686 */
687void pgmPhysInvalidatePageMapTLB(PVMCC pVM)
688{
689 PGM_LOCK_VOID(pVM);
690 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushes);
691
692 /* Clear the R3 & R0 TLBs completely. */
693 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR0.aEntries); i++)
694 {
695 pVM->pgm.s.PhysTlbR0.aEntries[i].GCPhys = NIL_RTGCPHYS;
696 pVM->pgm.s.PhysTlbR0.aEntries[i].pPage = 0;
697 pVM->pgm.s.PhysTlbR0.aEntries[i].pv = 0;
698 }
699
700 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
701 {
702 pVM->pgm.s.PhysTlbR3.aEntries[i].GCPhys = NIL_RTGCPHYS;
703 pVM->pgm.s.PhysTlbR3.aEntries[i].pPage = 0;
704 pVM->pgm.s.PhysTlbR3.aEntries[i].pMap = 0;
705 pVM->pgm.s.PhysTlbR3.aEntries[i].pv = 0;
706 }
707
708 PGM_UNLOCK(pVM);
709}
710
711
712/**
713 * Invalidates a page mapping TLB entry
714 *
715 * @param pVM The cross context VM structure.
716 * @param GCPhys GCPhys entry to flush
717 */
718void pgmPhysInvalidatePageMapTLBEntry(PVMCC pVM, RTGCPHYS GCPhys)
719{
720 PGM_LOCK_ASSERT_OWNER(pVM);
721
722 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushEntry);
723
724 unsigned const idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
725
726 pVM->pgm.s.PhysTlbR0.aEntries[idx].GCPhys = NIL_RTGCPHYS;
727 pVM->pgm.s.PhysTlbR0.aEntries[idx].pPage = 0;
728 pVM->pgm.s.PhysTlbR0.aEntries[idx].pv = 0;
729
730 pVM->pgm.s.PhysTlbR3.aEntries[idx].GCPhys = NIL_RTGCPHYS;
731 pVM->pgm.s.PhysTlbR3.aEntries[idx].pPage = 0;
732 pVM->pgm.s.PhysTlbR3.aEntries[idx].pMap = 0;
733 pVM->pgm.s.PhysTlbR3.aEntries[idx].pv = 0;
734}
735
736
737/**
738 * Makes sure that there is at least one handy page ready for use.
739 *
740 * This will also take the appropriate actions when reaching water-marks.
741 *
742 * @returns VBox status code.
743 * @retval VINF_SUCCESS on success.
744 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
745 *
746 * @param pVM The cross context VM structure.
747 *
748 * @remarks Must be called from within the PGM critical section. It may
749 * nip back to ring-3/0 in some cases.
750 */
751static int pgmPhysEnsureHandyPage(PVMCC pVM)
752{
753 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
754
755 /*
756 * Do we need to do anything special?
757 */
758#ifdef IN_RING3
759 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
760#else
761 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
762#endif
763 {
764 /*
765 * Allocate pages only if we're out of them, or in ring-3, almost out.
766 */
767#ifdef IN_RING3
768 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
769#else
770 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
771#endif
772 {
773 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
774 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
775#ifdef IN_RING3
776 int rc = PGMR3PhysAllocateHandyPages(pVM);
777#else
778 int rc = pgmR0PhysAllocateHandyPages(pVM, VMMGetCpuId(pVM), false /*fRing3*/);
779#endif
780 if (RT_UNLIKELY(rc != VINF_SUCCESS))
781 {
782 if (RT_FAILURE(rc))
783 return rc;
784 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
785 if (!pVM->pgm.s.cHandyPages)
786 {
787 LogRel(("PGM: no more handy pages!\n"));
788 return VERR_EM_NO_MEMORY;
789 }
790 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
791 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
792#ifndef IN_RING3
793 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
794#endif
795 }
796 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
797 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
798 ("%u\n", pVM->pgm.s.cHandyPages),
799 VERR_PGM_HANDY_PAGE_IPE);
800 }
801 else
802 {
803 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
804 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
805#ifndef IN_RING3
806 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
807 {
808 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
809 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
810 }
811#endif
812 }
813 }
814
815 return VINF_SUCCESS;
816}
817
818
819/**
820 * Replace a zero or shared page with new page that we can write to.
821 *
822 * @returns The following VBox status codes.
823 * @retval VINF_SUCCESS on success, pPage is modified.
824 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
825 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
826 *
827 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
828 *
829 * @param pVM The cross context VM structure.
830 * @param pPage The physical page tracking structure. This will
831 * be modified on success.
832 * @param GCPhys The address of the page.
833 *
834 * @remarks Must be called from within the PGM critical section. It may
835 * nip back to ring-3/0 in some cases.
836 *
837 * @remarks This function shouldn't really fail, however if it does
838 * it probably means we've screwed up the size of handy pages and/or
839 * the low-water mark. Or, that some device I/O is causing a lot of
840 * pages to be allocated while while the host is in a low-memory
841 * condition. This latter should be handled elsewhere and in a more
842 * controlled manner, it's on the @bugref{3170} todo list...
843 */
844int pgmPhysAllocPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
845{
846 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
847
848 /*
849 * Prereqs.
850 */
851 PGM_LOCK_ASSERT_OWNER(pVM);
852 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
853 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
854
855# ifdef PGM_WITH_LARGE_PAGES
856 /*
857 * Try allocate a large page if applicable.
858 */
859 if ( PGMIsUsingLargePages(pVM)
860 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
861 && !VM_IS_NEM_ENABLED(pVM)) /** @todo NEM: Implement large pages support. */
862 {
863 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
864 PPGMPAGE pBasePage;
865
866 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
867 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
868 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
869 {
870 rc = pgmPhysAllocLargePage(pVM, GCPhys);
871 if (rc == VINF_SUCCESS)
872 return rc;
873 }
874 /* Mark the base as type page table, so we don't check over and over again. */
875 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
876
877 /* fall back to 4KB pages. */
878 }
879# endif
880
881 /*
882 * Flush any shadow page table mappings of the page.
883 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
884 */
885 bool fFlushTLBs = false;
886 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
887 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
888
889 /*
890 * Ensure that we've got a page handy, take it and use it.
891 */
892 int rc2 = pgmPhysEnsureHandyPage(pVM);
893 if (RT_FAILURE(rc2))
894 {
895 if (fFlushTLBs)
896 PGM_INVL_ALL_VCPU_TLBS(pVM);
897 Assert(rc2 == VERR_EM_NO_MEMORY);
898 return rc2;
899 }
900 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
901 PGM_LOCK_ASSERT_OWNER(pVM);
902 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
903 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
904
905 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
906 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
907 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_GMMPAGEDESC_PHYS);
908 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
909 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
910 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
911
912 /*
913 * There are one or two action to be taken the next time we allocate handy pages:
914 * - Tell the GMM (global memory manager) what the page is being used for.
915 * (Speeds up replacement operations - sharing and defragmenting.)
916 * - If the current backing is shared, it must be freed.
917 */
918 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
919 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
920
921 void const *pvSharedPage = NULL;
922 if (PGM_PAGE_IS_SHARED(pPage))
923 {
924 /* Mark this shared page for freeing/dereferencing. */
925 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
926 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
927
928 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
929 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
930 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageReplaceShared));
931 pVM->pgm.s.cSharedPages--;
932
933 /* Grab the address of the page so we can make a copy later on. (safe) */
934 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
935 AssertRC(rc);
936 }
937 else
938 {
939 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
940 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatRZPageReplaceZero);
941 pVM->pgm.s.cZeroPages--;
942 }
943
944 /*
945 * Do the PGMPAGE modifications.
946 */
947 pVM->pgm.s.cPrivatePages++;
948 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
949 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
950 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
951 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
952 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
953
954 /* Copy the shared page contents to the replacement page. */
955 if (pvSharedPage)
956 {
957 /* Get the virtual address of the new page. */
958 PGMPAGEMAPLOCK PgMpLck;
959 void *pvNewPage;
960 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
961 if (RT_SUCCESS(rc))
962 {
963 memcpy(pvNewPage, pvSharedPage, GUEST_PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
964 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
965 }
966 }
967
968 if ( fFlushTLBs
969 && rc != VINF_PGM_GCPHYS_ALIASED)
970 PGM_INVL_ALL_VCPU_TLBS(pVM);
971
972 /*
973 * Notify NEM about the mapping change for this page.
974 *
975 * Note! Shadow ROM pages are complicated as they can definitely be
976 * allocated while not visible, so play safe.
977 */
978 if (VM_IS_NEM_ENABLED(pVM))
979 {
980 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
981 if ( enmType != PGMPAGETYPE_ROM_SHADOW
982 || pgmPhysGetPage(pVM, GCPhys) == pPage)
983 {
984 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
985 rc2 = NEMHCNotifyPhysPageAllocated(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, HCPhys,
986 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
987 if (RT_SUCCESS(rc))
988 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
989 else
990 rc = rc2;
991 }
992 }
993
994 return rc;
995}
996
997#ifdef PGM_WITH_LARGE_PAGES
998
999/**
1000 * Replace a 2 MB range of zero pages with new pages that we can write to.
1001 *
1002 * @returns The following VBox status codes.
1003 * @retval VINF_SUCCESS on success, pPage is modified.
1004 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1005 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
1006 *
1007 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
1008 *
1009 * @param pVM The cross context VM structure.
1010 * @param GCPhys The address of the page.
1011 *
1012 * @remarks Must be called from within the PGM critical section. It may block
1013 * on GMM and host mutexes/locks, leaving HM context.
1014 */
1015int pgmPhysAllocLargePage(PVMCC pVM, RTGCPHYS GCPhys)
1016{
1017 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
1018 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
1019 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
1020
1021 /*
1022 * Check Prereqs.
1023 */
1024 PGM_LOCK_ASSERT_OWNER(pVM);
1025 Assert(PGMIsUsingLargePages(pVM));
1026
1027 /*
1028 * All the pages must be unallocated RAM pages, i.e. mapping the ZERO page.
1029 */
1030 PPGMPAGE pFirstPage;
1031 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
1032 if ( RT_SUCCESS(rc)
1033 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM
1034 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
1035 {
1036 /*
1037 * Further they should have PDE type set to PGM_PAGE_PDE_TYPE_DONTCARE,
1038 * since they are unallocated.
1039 */
1040 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
1041 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
1042 if (uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE)
1043 {
1044 /*
1045 * Now, make sure all the other pages in the 2 MB is in the same state.
1046 */
1047 GCPhys = GCPhysBase;
1048 unsigned cLeft = _2M / GUEST_PAGE_SIZE;
1049 while (cLeft-- > 0)
1050 {
1051 PPGMPAGE pSubPage = pgmPhysGetPage(pVM, GCPhys);
1052 if ( pSubPage
1053 && PGM_PAGE_GET_TYPE(pSubPage) == PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
1054 && PGM_PAGE_GET_STATE(pSubPage) == PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
1055 {
1056 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
1057 GCPhys += GUEST_PAGE_SIZE;
1058 }
1059 else
1060 {
1061 LogFlow(("pgmPhysAllocLargePage: Found page %RGp with wrong attributes (type=%d; state=%d); cancel check.\n",
1062 GCPhys, pSubPage ? PGM_PAGE_GET_TYPE(pSubPage) : -1, pSubPage ? PGM_PAGE_GET_STATE(pSubPage) : -1));
1063
1064 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
1065 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
1066 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
1067 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1068 }
1069 }
1070
1071 /*
1072 * Do the allocation.
1073 */
1074# ifdef IN_RING3
1075 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE, GCPhysBase, NULL);
1076# elif defined(IN_RING0)
1077 rc = pgmR0PhysAllocateLargePage(pVM, VMMGetCpuId(pVM), GCPhysBase);
1078# else
1079# error "Port me"
1080# endif
1081 if (RT_SUCCESS(rc))
1082 {
1083 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
1084 pVM->pgm.s.cLargePages++;
1085 return VINF_SUCCESS;
1086 }
1087
1088 /* If we fail once, it most likely means the host's memory is too
1089 fragmented; don't bother trying again. */
1090 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
1091 return rc;
1092 }
1093 }
1094 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1095}
1096
1097
1098/**
1099 * Recheck the entire 2 MB range to see if we can use it again as a large page.
1100 *
1101 * @returns The following VBox status codes.
1102 * @retval VINF_SUCCESS on success, the large page can be used again
1103 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
1104 *
1105 * @param pVM The cross context VM structure.
1106 * @param GCPhys The address of the page.
1107 * @param pLargePage Page structure of the base page
1108 */
1109int pgmPhysRecheckLargePage(PVMCC pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
1110{
1111 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
1112
1113 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
1114
1115 GCPhys &= X86_PDE2M_PAE_PG_MASK;
1116
1117 /* Check the base page. */
1118 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
1119 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
1120 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
1121 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1122 {
1123 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
1124 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1125 }
1126
1127 STAM_PROFILE_START(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1128 /* Check all remaining pages in the 2 MB range. */
1129 unsigned i;
1130 GCPhys += GUEST_PAGE_SIZE;
1131 for (i = 1; i < _2M / GUEST_PAGE_SIZE; i++)
1132 {
1133 PPGMPAGE pPage;
1134 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1135 AssertRCBreak(rc);
1136
1137 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
1138 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
1139 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
1140 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1141 {
1142 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
1143 break;
1144 }
1145
1146 GCPhys += GUEST_PAGE_SIZE;
1147 }
1148 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1149
1150 if (i == _2M / GUEST_PAGE_SIZE)
1151 {
1152 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
1153 pVM->pgm.s.cLargePagesDisabled--;
1154 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
1155 return VINF_SUCCESS;
1156 }
1157
1158 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1159}
1160
1161#endif /* PGM_WITH_LARGE_PAGES */
1162
1163
1164/**
1165 * Deal with a write monitored page.
1166 *
1167 * @returns VBox strict status code.
1168 *
1169 * @param pVM The cross context VM structure.
1170 * @param pPage The physical page tracking structure.
1171 * @param GCPhys The guest physical address of the page.
1172 * PGMPhysReleasePageMappingLock() passes NIL_RTGCPHYS in a
1173 * very unlikely situation where it is okay that we let NEM
1174 * fix the page access in a lazy fasion.
1175 *
1176 * @remarks Called from within the PGM critical section.
1177 */
1178void pgmPhysPageMakeWriteMonitoredWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1179{
1180 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
1181 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1182 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1183 Assert(pVM->pgm.s.cMonitoredPages > 0);
1184 pVM->pgm.s.cMonitoredPages--;
1185 pVM->pgm.s.cWrittenToPages++;
1186
1187#ifdef VBOX_WITH_NATIVE_NEM
1188 /*
1189 * Notify NEM about the protection change so we won't spin forever.
1190 *
1191 * Note! NEM need to be handle to lazily correct page protection as we cannot
1192 * really get it 100% right here it seems. The page pool does this too.
1193 */
1194 if (VM_IS_NEM_ENABLED(pVM) && GCPhys != NIL_RTGCPHYS)
1195 {
1196 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1197 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1198 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1199 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
1200 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhys) : NULL,
1201 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1202 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1203 }
1204#else
1205 RT_NOREF(GCPhys);
1206#endif
1207}
1208
1209
1210/**
1211 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
1212 *
1213 * @returns VBox strict status code.
1214 * @retval VINF_SUCCESS on success.
1215 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1216 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1217 *
1218 * @param pVM The cross context VM structure.
1219 * @param pPage The physical page tracking structure.
1220 * @param GCPhys The address of the page.
1221 *
1222 * @remarks Called from within the PGM critical section.
1223 */
1224int pgmPhysPageMakeWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1225{
1226 PGM_LOCK_ASSERT_OWNER(pVM);
1227 switch (PGM_PAGE_GET_STATE(pPage))
1228 {
1229 case PGM_PAGE_STATE_WRITE_MONITORED:
1230 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
1231 RT_FALL_THRU();
1232 default: /* to shut up GCC */
1233 case PGM_PAGE_STATE_ALLOCATED:
1234 return VINF_SUCCESS;
1235
1236 /*
1237 * Zero pages can be dummy pages for MMIO or reserved memory,
1238 * so we need to check the flags before joining cause with
1239 * shared page replacement.
1240 */
1241 case PGM_PAGE_STATE_ZERO:
1242 if (PGM_PAGE_IS_MMIO(pPage))
1243 return VERR_PGM_PHYS_PAGE_RESERVED;
1244 RT_FALL_THRU();
1245 case PGM_PAGE_STATE_SHARED:
1246 return pgmPhysAllocPage(pVM, pPage, GCPhys);
1247
1248 /* Not allowed to write to ballooned pages. */
1249 case PGM_PAGE_STATE_BALLOONED:
1250 return VERR_PGM_PHYS_PAGE_BALLOONED;
1251 }
1252}
1253
1254
1255/**
1256 * Internal usage: Map the page specified by its GMM ID.
1257 *
1258 * This is similar to pgmPhysPageMap
1259 *
1260 * @returns VBox status code.
1261 *
1262 * @param pVM The cross context VM structure.
1263 * @param idPage The Page ID.
1264 * @param HCPhys The physical address (for SUPR0HCPhysToVirt).
1265 * @param ppv Where to store the mapping address.
1266 *
1267 * @remarks Called from within the PGM critical section. The mapping is only
1268 * valid while you are inside this section.
1269 */
1270int pgmPhysPageMapByPageID(PVMCC pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
1271{
1272 /*
1273 * Validation.
1274 */
1275 PGM_LOCK_ASSERT_OWNER(pVM);
1276 AssertReturn(HCPhys && !(HCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1277 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
1278 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
1279
1280#ifdef IN_RING0
1281# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
1282 return SUPR0HCPhysToVirt(HCPhys & ~(RTHCPHYS)GUEST_PAGE_OFFSET_MASK, ppv);
1283# else
1284 return GMMR0PageIdToVirt(pVM, idPage, ppv);
1285# endif
1286
1287#else
1288 /*
1289 * Find/make Chunk TLB entry for the mapping chunk.
1290 */
1291 PPGMCHUNKR3MAP pMap;
1292 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1293 if (pTlbe->idChunk == idChunk)
1294 {
1295 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1296 pMap = pTlbe->pChunk;
1297 }
1298 else
1299 {
1300 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1301
1302 /*
1303 * Find the chunk, map it if necessary.
1304 */
1305 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1306 if (pMap)
1307 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1308 else
1309 {
1310 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1311 if (RT_FAILURE(rc))
1312 return rc;
1313 }
1314
1315 /*
1316 * Enter it into the Chunk TLB.
1317 */
1318 pTlbe->idChunk = idChunk;
1319 pTlbe->pChunk = pMap;
1320 }
1321
1322 *ppv = (uint8_t *)pMap->pv + ((idPage & GMM_PAGEID_IDX_MASK) << GUEST_PAGE_SHIFT);
1323 return VINF_SUCCESS;
1324#endif
1325}
1326
1327
1328/**
1329 * Maps a page into the current virtual address space so it can be accessed.
1330 *
1331 * @returns VBox status code.
1332 * @retval VINF_SUCCESS on success.
1333 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1334 *
1335 * @param pVM The cross context VM structure.
1336 * @param pPage The physical page tracking structure.
1337 * @param GCPhys The address of the page.
1338 * @param ppMap Where to store the address of the mapping tracking structure.
1339 * @param ppv Where to store the mapping address of the page. The page
1340 * offset is masked off!
1341 *
1342 * @remarks Called from within the PGM critical section.
1343 */
1344static int pgmPhysPageMapCommon(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1345{
1346 PGM_LOCK_ASSERT_OWNER(pVM);
1347 NOREF(GCPhys);
1348
1349 /*
1350 * Special cases: MMIO2, ZERO and specially aliased MMIO pages.
1351 */
1352 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
1353 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1354 {
1355 /* Decode the page id to a page in a MMIO2 ram range. */
1356 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
1357 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
1358 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
1359 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
1360 RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,
1361 pPage->s.idPage, pPage->s.uStateY),
1362 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1363 PPGMREGMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
1364 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1365 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1366 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> GUEST_PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1367 *ppMap = NULL;
1368# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1369 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1370# elif defined(IN_RING0)
1371 *ppv = (uint8_t *)pMmio2Range->pvR0 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
1372 return VINF_SUCCESS;
1373# else
1374 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
1375 return VINF_SUCCESS;
1376# endif
1377 }
1378
1379# ifdef VBOX_WITH_PGM_NEM_MODE
1380 if (pVM->pgm.s.fNemMode)
1381 {
1382# ifdef IN_RING3
1383 /*
1384 * Find the corresponding RAM range and use that to locate the mapping address.
1385 */
1386 /** @todo Use the page ID for some kind of indexing as we do with MMIO2 above. */
1387 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhys);
1388 AssertLogRelMsgReturn(pRam, ("%RTGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
1389 size_t const idxPage = (GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
1390 Assert(pPage == &pRam->aPages[idxPage]);
1391 *ppMap = NULL;
1392 *ppv = (uint8_t *)pRam->pvR3 + (idxPage << GUEST_PAGE_SHIFT);
1393 return VINF_SUCCESS;
1394# else
1395 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1396# endif
1397 }
1398# endif
1399
1400 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1401 if (idChunk == NIL_GMM_CHUNKID)
1402 {
1403 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
1404 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1405 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
1406 {
1407 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
1408 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1409 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
1410 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1411 *ppv = pVM->pgm.s.abZeroPg;
1412 }
1413 else
1414 *ppv = pVM->pgm.s.abZeroPg;
1415 *ppMap = NULL;
1416 return VINF_SUCCESS;
1417 }
1418
1419# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1420 /*
1421 * Just use the physical address.
1422 */
1423 *ppMap = NULL;
1424 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1425
1426# elif defined(IN_RING0)
1427 /*
1428 * Go by page ID thru GMMR0.
1429 */
1430 *ppMap = NULL;
1431 return GMMR0PageIdToVirt(pVM, PGM_PAGE_GET_PAGEID(pPage), ppv);
1432
1433# else
1434 /*
1435 * Find/make Chunk TLB entry for the mapping chunk.
1436 */
1437 PPGMCHUNKR3MAP pMap;
1438 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1439 if (pTlbe->idChunk == idChunk)
1440 {
1441 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1442 pMap = pTlbe->pChunk;
1443 AssertPtr(pMap->pv);
1444 }
1445 else
1446 {
1447 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1448
1449 /*
1450 * Find the chunk, map it if necessary.
1451 */
1452 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1453 if (pMap)
1454 {
1455 AssertPtr(pMap->pv);
1456 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1457 }
1458 else
1459 {
1460 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1461 if (RT_FAILURE(rc))
1462 return rc;
1463 AssertPtr(pMap->pv);
1464 }
1465
1466 /*
1467 * Enter it into the Chunk TLB.
1468 */
1469 pTlbe->idChunk = idChunk;
1470 pTlbe->pChunk = pMap;
1471 }
1472
1473 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << GUEST_PAGE_SHIFT);
1474 *ppMap = pMap;
1475 return VINF_SUCCESS;
1476# endif /* !IN_RING0 */
1477}
1478
1479
1480/**
1481 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1482 *
1483 * This is typically used is paths where we cannot use the TLB methods (like ROM
1484 * pages) or where there is no point in using them since we won't get many hits.
1485 *
1486 * @returns VBox strict status code.
1487 * @retval VINF_SUCCESS on success.
1488 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1489 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1490 *
1491 * @param pVM The cross context VM structure.
1492 * @param pPage The physical page tracking structure.
1493 * @param GCPhys The address of the page.
1494 * @param ppv Where to store the mapping address of the page. The page
1495 * offset is masked off!
1496 *
1497 * @remarks Called from within the PGM critical section. The mapping is only
1498 * valid while you are inside section.
1499 */
1500int pgmPhysPageMakeWritableAndMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1501{
1502 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1503 if (RT_SUCCESS(rc))
1504 {
1505 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1506 PPGMPAGEMAP pMapIgnore;
1507 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1508 if (RT_FAILURE(rc2)) /* preserve rc */
1509 rc = rc2;
1510 }
1511 return rc;
1512}
1513
1514
1515/**
1516 * Maps a page into the current virtual address space so it can be accessed for
1517 * both writing and reading.
1518 *
1519 * This is typically used is paths where we cannot use the TLB methods (like ROM
1520 * pages) or where there is no point in using them since we won't get many hits.
1521 *
1522 * @returns VBox status code.
1523 * @retval VINF_SUCCESS on success.
1524 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1525 *
1526 * @param pVM The cross context VM structure.
1527 * @param pPage The physical page tracking structure. Must be in the
1528 * allocated state.
1529 * @param GCPhys The address of the page.
1530 * @param ppv Where to store the mapping address of the page. The page
1531 * offset is masked off!
1532 *
1533 * @remarks Called from within the PGM critical section. The mapping is only
1534 * valid while you are inside section.
1535 */
1536int pgmPhysPageMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1537{
1538 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1539 PPGMPAGEMAP pMapIgnore;
1540 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1541}
1542
1543
1544/**
1545 * Maps a page into the current virtual address space so it can be accessed for
1546 * reading.
1547 *
1548 * This is typically used is paths where we cannot use the TLB methods (like ROM
1549 * pages) or where there is no point in using them since we won't get many hits.
1550 *
1551 * @returns VBox status code.
1552 * @retval VINF_SUCCESS on success.
1553 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1554 *
1555 * @param pVM The cross context VM structure.
1556 * @param pPage The physical page tracking structure.
1557 * @param GCPhys The address of the page.
1558 * @param ppv Where to store the mapping address of the page. The page
1559 * offset is masked off!
1560 *
1561 * @remarks Called from within the PGM critical section. The mapping is only
1562 * valid while you are inside this section.
1563 */
1564int pgmPhysPageMapReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1565{
1566 PPGMPAGEMAP pMapIgnore;
1567 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1568}
1569
1570
1571/**
1572 * Load a guest page into the ring-3 physical TLB.
1573 *
1574 * @returns VBox status code.
1575 * @retval VINF_SUCCESS on success
1576 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1577 * @param pVM The cross context VM structure.
1578 * @param GCPhys The guest physical address in question.
1579 */
1580int pgmPhysPageLoadIntoTlb(PVMCC pVM, RTGCPHYS GCPhys)
1581{
1582 PGM_LOCK_ASSERT_OWNER(pVM);
1583
1584 /*
1585 * Find the ram range and page and hand it over to the with-page function.
1586 * 99.8% of requests are expected to be in the first range.
1587 */
1588 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1589 if (!pPage)
1590 {
1591 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1592 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1593 }
1594
1595 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1596}
1597
1598
1599/**
1600 * Load a guest page into the ring-3 physical TLB.
1601 *
1602 * @returns VBox status code.
1603 * @retval VINF_SUCCESS on success
1604 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1605 *
1606 * @param pVM The cross context VM structure.
1607 * @param pPage Pointer to the PGMPAGE structure corresponding to
1608 * GCPhys.
1609 * @param GCPhys The guest physical address in question.
1610 */
1611int pgmPhysPageLoadIntoTlbWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1612{
1613 PGM_LOCK_ASSERT_OWNER(pVM);
1614 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1615
1616 /*
1617 * Map the page.
1618 * Make a special case for the zero page as it is kind of special.
1619 */
1620 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1621 if ( !PGM_PAGE_IS_ZERO(pPage)
1622 && !PGM_PAGE_IS_BALLOONED(pPage))
1623 {
1624 void *pv;
1625 PPGMPAGEMAP pMap;
1626 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1627 if (RT_FAILURE(rc))
1628 return rc;
1629# ifndef IN_RING0
1630 pTlbe->pMap = pMap;
1631# endif
1632 pTlbe->pv = pv;
1633 Assert(!((uintptr_t)pTlbe->pv & GUEST_PAGE_OFFSET_MASK));
1634 }
1635 else
1636 {
1637 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1638# ifndef IN_RING0
1639 pTlbe->pMap = NULL;
1640# endif
1641 pTlbe->pv = pVM->pgm.s.abZeroPg;
1642 }
1643# ifdef PGM_WITH_PHYS_TLB
1644 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1645 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1646 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1647 else
1648 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1649# else
1650 pTlbe->GCPhys = NIL_RTGCPHYS;
1651# endif
1652 pTlbe->pPage = pPage;
1653 return VINF_SUCCESS;
1654}
1655
1656
1657/**
1658 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1659 * own the PGM lock and therefore not need to lock the mapped page.
1660 *
1661 * @returns VBox status code.
1662 * @retval VINF_SUCCESS on success.
1663 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1664 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1665 *
1666 * @param pVM The cross context VM structure.
1667 * @param GCPhys The guest physical address of the page that should be mapped.
1668 * @param pPage Pointer to the PGMPAGE structure for the page.
1669 * @param ppv Where to store the address corresponding to GCPhys.
1670 *
1671 * @internal
1672 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1673 */
1674int pgmPhysGCPhys2CCPtrInternalDepr(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1675{
1676 int rc;
1677 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1678 PGM_LOCK_ASSERT_OWNER(pVM);
1679 pVM->pgm.s.cDeprecatedPageLocks++;
1680
1681 /*
1682 * Make sure the page is writable.
1683 */
1684 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1685 {
1686 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1687 if (RT_FAILURE(rc))
1688 return rc;
1689 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1690 }
1691 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1692
1693 /*
1694 * Get the mapping address.
1695 */
1696 PPGMPAGEMAPTLBE pTlbe;
1697 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1698 if (RT_FAILURE(rc))
1699 return rc;
1700 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1701 return VINF_SUCCESS;
1702}
1703
1704
1705/**
1706 * Locks a page mapping for writing.
1707 *
1708 * @param pVM The cross context VM structure.
1709 * @param pPage The page.
1710 * @param pTlbe The mapping TLB entry for the page.
1711 * @param pLock The lock structure (output).
1712 */
1713DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1714{
1715# ifndef IN_RING0
1716 PPGMPAGEMAP pMap = pTlbe->pMap;
1717 if (pMap)
1718 pMap->cRefs++;
1719# else
1720 RT_NOREF(pTlbe);
1721# endif
1722
1723 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1724 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1725 {
1726 if (cLocks == 0)
1727 pVM->pgm.s.cWriteLockedPages++;
1728 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1729 }
1730 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1731 {
1732 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1733 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1734# ifndef IN_RING0
1735 if (pMap)
1736 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1737# endif
1738 }
1739
1740 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1741# ifndef IN_RING0
1742 pLock->pvMap = pMap;
1743# else
1744 pLock->pvMap = NULL;
1745# endif
1746}
1747
1748/**
1749 * Locks a page mapping for reading.
1750 *
1751 * @param pVM The cross context VM structure.
1752 * @param pPage The page.
1753 * @param pTlbe The mapping TLB entry for the page.
1754 * @param pLock The lock structure (output).
1755 */
1756DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1757{
1758# ifndef IN_RING0
1759 PPGMPAGEMAP pMap = pTlbe->pMap;
1760 if (pMap)
1761 pMap->cRefs++;
1762# else
1763 RT_NOREF(pTlbe);
1764# endif
1765
1766 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1767 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1768 {
1769 if (cLocks == 0)
1770 pVM->pgm.s.cReadLockedPages++;
1771 PGM_PAGE_INC_READ_LOCKS(pPage);
1772 }
1773 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1774 {
1775 PGM_PAGE_INC_READ_LOCKS(pPage);
1776 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1777# ifndef IN_RING0
1778 if (pMap)
1779 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1780# endif
1781 }
1782
1783 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1784# ifndef IN_RING0
1785 pLock->pvMap = pMap;
1786# else
1787 pLock->pvMap = NULL;
1788# endif
1789}
1790
1791
1792/**
1793 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1794 * own the PGM lock and have access to the page structure.
1795 *
1796 * @returns VBox status code.
1797 * @retval VINF_SUCCESS on success.
1798 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1799 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1800 *
1801 * @param pVM The cross context VM structure.
1802 * @param GCPhys The guest physical address of the page that should be mapped.
1803 * @param pPage Pointer to the PGMPAGE structure for the page.
1804 * @param ppv Where to store the address corresponding to GCPhys.
1805 * @param pLock Where to store the lock information that
1806 * pgmPhysReleaseInternalPageMappingLock needs.
1807 *
1808 * @internal
1809 */
1810int pgmPhysGCPhys2CCPtrInternal(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1811{
1812 int rc;
1813 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1814 PGM_LOCK_ASSERT_OWNER(pVM);
1815
1816 /*
1817 * Make sure the page is writable.
1818 */
1819 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1820 {
1821 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1822 if (RT_FAILURE(rc))
1823 return rc;
1824 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1825 }
1826 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1827
1828 /*
1829 * Do the job.
1830 */
1831 PPGMPAGEMAPTLBE pTlbe;
1832 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1833 if (RT_FAILURE(rc))
1834 return rc;
1835 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1836 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1837 return VINF_SUCCESS;
1838}
1839
1840
1841/**
1842 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1843 * own the PGM lock and have access to the page structure.
1844 *
1845 * @returns VBox status code.
1846 * @retval VINF_SUCCESS on success.
1847 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1848 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1849 *
1850 * @param pVM The cross context VM structure.
1851 * @param GCPhys The guest physical address of the page that should be mapped.
1852 * @param pPage Pointer to the PGMPAGE structure for the page.
1853 * @param ppv Where to store the address corresponding to GCPhys.
1854 * @param pLock Where to store the lock information that
1855 * pgmPhysReleaseInternalPageMappingLock needs.
1856 *
1857 * @internal
1858 */
1859int pgmPhysGCPhys2CCPtrInternalReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1860{
1861 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1862 PGM_LOCK_ASSERT_OWNER(pVM);
1863 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1864
1865 /*
1866 * Do the job.
1867 */
1868 PPGMPAGEMAPTLBE pTlbe;
1869 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1870 if (RT_FAILURE(rc))
1871 return rc;
1872 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1873 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1874 return VINF_SUCCESS;
1875}
1876
1877
1878/**
1879 * Requests the mapping of a guest page into the current context.
1880 *
1881 * This API should only be used for very short term, as it will consume scarse
1882 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1883 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1884 *
1885 * This API will assume your intention is to write to the page, and will
1886 * therefore replace shared and zero pages. If you do not intend to modify
1887 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1888 *
1889 * @returns VBox status code.
1890 * @retval VINF_SUCCESS on success.
1891 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1892 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1893 *
1894 * @param pVM The cross context VM structure.
1895 * @param GCPhys The guest physical address of the page that should be
1896 * mapped.
1897 * @param ppv Where to store the address corresponding to GCPhys.
1898 * @param pLock Where to store the lock information that
1899 * PGMPhysReleasePageMappingLock needs.
1900 *
1901 * @remarks The caller is responsible for dealing with access handlers.
1902 * @todo Add an informational return code for pages with access handlers?
1903 *
1904 * @remark Avoid calling this API from within critical sections (other than
1905 * the PGM one) because of the deadlock risk. External threads may
1906 * need to delegate jobs to the EMTs.
1907 * @remarks Only one page is mapped! Make no assumption about what's after or
1908 * before the returned page!
1909 * @thread Any thread.
1910 */
1911VMM_INT_DECL(int) PGMPhysGCPhys2CCPtr(PVMCC pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1912{
1913 int rc = PGM_LOCK(pVM);
1914 AssertRCReturn(rc, rc);
1915
1916 /*
1917 * Query the Physical TLB entry for the page (may fail).
1918 */
1919 PPGMPAGEMAPTLBE pTlbe;
1920 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1921 if (RT_SUCCESS(rc))
1922 {
1923 /*
1924 * If the page is shared, the zero page, or being write monitored
1925 * it must be converted to a page that's writable if possible.
1926 */
1927 PPGMPAGE pPage = pTlbe->pPage;
1928 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1929 {
1930 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1931 if (RT_SUCCESS(rc))
1932 {
1933 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1934 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1935 }
1936 }
1937 if (RT_SUCCESS(rc))
1938 {
1939 /*
1940 * Now, just perform the locking and calculate the return address.
1941 */
1942 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1943 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1944 }
1945 }
1946
1947 PGM_UNLOCK(pVM);
1948 return rc;
1949}
1950
1951
1952/**
1953 * Requests the mapping of a guest page into the current context.
1954 *
1955 * This API should only be used for very short term, as it will consume scarse
1956 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1957 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1958 *
1959 * @returns VBox status code.
1960 * @retval VINF_SUCCESS on success.
1961 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1962 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1963 *
1964 * @param pVM The cross context VM structure.
1965 * @param GCPhys The guest physical address of the page that should be
1966 * mapped.
1967 * @param ppv Where to store the address corresponding to GCPhys.
1968 * @param pLock Where to store the lock information that
1969 * PGMPhysReleasePageMappingLock needs.
1970 *
1971 * @remarks The caller is responsible for dealing with access handlers.
1972 * @todo Add an informational return code for pages with access handlers?
1973 *
1974 * @remarks Avoid calling this API from within critical sections (other than
1975 * the PGM one) because of the deadlock risk.
1976 * @remarks Only one page is mapped! Make no assumption about what's after or
1977 * before the returned page!
1978 * @thread Any thread.
1979 */
1980VMM_INT_DECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVMCC pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1981{
1982 int rc = PGM_LOCK(pVM);
1983 AssertRCReturn(rc, rc);
1984
1985 /*
1986 * Query the Physical TLB entry for the page (may fail).
1987 */
1988 PPGMPAGEMAPTLBE pTlbe;
1989 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1990 if (RT_SUCCESS(rc))
1991 {
1992 /* MMIO pages doesn't have any readable backing. */
1993 PPGMPAGE pPage = pTlbe->pPage;
1994 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1995 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1996 else
1997 {
1998 /*
1999 * Now, just perform the locking and calculate the return address.
2000 */
2001 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
2002 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
2003 }
2004 }
2005
2006 PGM_UNLOCK(pVM);
2007 return rc;
2008}
2009
2010
2011/**
2012 * Requests the mapping of a guest page given by virtual address into the current context.
2013 *
2014 * This API should only be used for very short term, as it will consume
2015 * scarse resources (R0 and GC) in the mapping cache. When you're done
2016 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2017 *
2018 * This API will assume your intention is to write to the page, and will
2019 * therefore replace shared and zero pages. If you do not intend to modify
2020 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
2021 *
2022 * @returns VBox status code.
2023 * @retval VINF_SUCCESS on success.
2024 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2025 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2026 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2027 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2028 *
2029 * @param pVCpu The cross context virtual CPU structure.
2030 * @param GCPtr The guest physical address of the page that should be
2031 * mapped.
2032 * @param ppv Where to store the address corresponding to GCPhys.
2033 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2034 *
2035 * @remark Avoid calling this API from within critical sections (other than
2036 * the PGM one) because of the deadlock risk.
2037 * @thread EMT
2038 */
2039VMM_INT_DECL(int) PGMPhysGCPtr2CCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
2040{
2041 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2042 RTGCPHYS GCPhys;
2043 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2044 if (RT_SUCCESS(rc))
2045 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2046 return rc;
2047}
2048
2049
2050/**
2051 * Requests the mapping of a guest page given by virtual address into the current context.
2052 *
2053 * This API should only be used for very short term, as it will consume
2054 * scarse resources (R0 and GC) in the mapping cache. When you're done
2055 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2056 *
2057 * @returns VBox status code.
2058 * @retval VINF_SUCCESS on success.
2059 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2060 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2061 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2062 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2063 *
2064 * @param pVCpu The cross context virtual CPU structure.
2065 * @param GCPtr The guest physical address of the page that should be
2066 * mapped.
2067 * @param ppv Where to store the address corresponding to GCPtr.
2068 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2069 *
2070 * @remark Avoid calling this API from within critical sections (other than
2071 * the PGM one) because of the deadlock risk.
2072 * @thread EMT
2073 */
2074VMM_INT_DECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPUCC pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
2075{
2076 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2077 RTGCPHYS GCPhys;
2078 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2079 if (RT_SUCCESS(rc))
2080 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2081 return rc;
2082}
2083
2084
2085/**
2086 * Release the mapping of a guest page.
2087 *
2088 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
2089 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
2090 *
2091 * @param pVM The cross context VM structure.
2092 * @param pLock The lock structure initialized by the mapping function.
2093 */
2094VMMDECL(void) PGMPhysReleasePageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2095{
2096# ifndef IN_RING0
2097 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
2098# endif
2099 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2100 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2101
2102 pLock->uPageAndType = 0;
2103 pLock->pvMap = NULL;
2104
2105 PGM_LOCK_VOID(pVM);
2106 if (fWriteLock)
2107 {
2108 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2109 Assert(cLocks > 0);
2110 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2111 {
2112 if (cLocks == 1)
2113 {
2114 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2115 pVM->pgm.s.cWriteLockedPages--;
2116 }
2117 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2118 }
2119
2120 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2121 { /* probably extremely likely */ }
2122 else
2123 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2124 }
2125 else
2126 {
2127 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2128 Assert(cLocks > 0);
2129 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2130 {
2131 if (cLocks == 1)
2132 {
2133 Assert(pVM->pgm.s.cReadLockedPages > 0);
2134 pVM->pgm.s.cReadLockedPages--;
2135 }
2136 PGM_PAGE_DEC_READ_LOCKS(pPage);
2137 }
2138 }
2139
2140# ifndef IN_RING0
2141 if (pMap)
2142 {
2143 Assert(pMap->cRefs >= 1);
2144 pMap->cRefs--;
2145 }
2146# endif
2147 PGM_UNLOCK(pVM);
2148}
2149
2150
2151#ifdef IN_RING3
2152/**
2153 * Release the mapping of multiple guest pages.
2154 *
2155 * This is the counter part to PGMR3PhysBulkGCPhys2CCPtrExternal() and
2156 * PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal().
2157 *
2158 * @param pVM The cross context VM structure.
2159 * @param cPages Number of pages to unlock.
2160 * @param paLocks Array of locks lock structure initialized by the mapping
2161 * function.
2162 */
2163VMMDECL(void) PGMPhysBulkReleasePageMappingLocks(PVMCC pVM, uint32_t cPages, PPGMPAGEMAPLOCK paLocks)
2164{
2165 Assert(cPages > 0);
2166 bool const fWriteLock = (paLocks[0].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2167#ifdef VBOX_STRICT
2168 for (uint32_t i = 1; i < cPages; i++)
2169 {
2170 Assert(fWriteLock == ((paLocks[i].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE));
2171 AssertPtr(paLocks[i].uPageAndType);
2172 }
2173#endif
2174
2175 PGM_LOCK_VOID(pVM);
2176 if (fWriteLock)
2177 {
2178 /*
2179 * Write locks:
2180 */
2181 for (uint32_t i = 0; i < cPages; i++)
2182 {
2183 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2184 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2185 Assert(cLocks > 0);
2186 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2187 {
2188 if (cLocks == 1)
2189 {
2190 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2191 pVM->pgm.s.cWriteLockedPages--;
2192 }
2193 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2194 }
2195
2196 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2197 { /* probably extremely likely */ }
2198 else
2199 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2200
2201 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2202 if (pMap)
2203 {
2204 Assert(pMap->cRefs >= 1);
2205 pMap->cRefs--;
2206 }
2207
2208 /* Yield the lock: */
2209 if ((i & 1023) == 1023 && i + 1 < cPages)
2210 {
2211 PGM_UNLOCK(pVM);
2212 PGM_LOCK_VOID(pVM);
2213 }
2214 }
2215 }
2216 else
2217 {
2218 /*
2219 * Read locks:
2220 */
2221 for (uint32_t i = 0; i < cPages; i++)
2222 {
2223 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2224 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2225 Assert(cLocks > 0);
2226 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2227 {
2228 if (cLocks == 1)
2229 {
2230 Assert(pVM->pgm.s.cReadLockedPages > 0);
2231 pVM->pgm.s.cReadLockedPages--;
2232 }
2233 PGM_PAGE_DEC_READ_LOCKS(pPage);
2234 }
2235
2236 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2237 if (pMap)
2238 {
2239 Assert(pMap->cRefs >= 1);
2240 pMap->cRefs--;
2241 }
2242
2243 /* Yield the lock: */
2244 if ((i & 1023) == 1023 && i + 1 < cPages)
2245 {
2246 PGM_UNLOCK(pVM);
2247 PGM_LOCK_VOID(pVM);
2248 }
2249 }
2250 }
2251 PGM_UNLOCK(pVM);
2252
2253 RT_BZERO(paLocks, sizeof(paLocks[0]) * cPages);
2254}
2255#endif /* IN_RING3 */
2256
2257
2258/**
2259 * Release the internal mapping of a guest page.
2260 *
2261 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
2262 * pgmPhysGCPhys2CCPtrInternalReadOnly.
2263 *
2264 * @param pVM The cross context VM structure.
2265 * @param pLock The lock structure initialized by the mapping function.
2266 *
2267 * @remarks Caller must hold the PGM lock.
2268 */
2269void pgmPhysReleaseInternalPageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2270{
2271 PGM_LOCK_ASSERT_OWNER(pVM);
2272 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
2273}
2274
2275
2276/**
2277 * Converts a GC physical address to a HC ring-3 pointer.
2278 *
2279 * @returns VINF_SUCCESS on success.
2280 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
2281 * page but has no physical backing.
2282 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
2283 * GC physical address.
2284 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
2285 * a dynamic ram chunk boundary
2286 *
2287 * @param pVM The cross context VM structure.
2288 * @param GCPhys The GC physical address to convert.
2289 * @param pR3Ptr Where to store the R3 pointer on success.
2290 *
2291 * @deprecated Avoid when possible!
2292 */
2293int pgmPhysGCPhys2R3Ptr(PVMCC pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2294{
2295/** @todo this is kind of hacky and needs some more work. */
2296#ifndef DEBUG_sandervl
2297 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
2298#endif
2299
2300 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
2301 PGM_LOCK_VOID(pVM);
2302
2303 PPGMRAMRANGE pRam;
2304 PPGMPAGE pPage;
2305 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
2306 if (RT_SUCCESS(rc))
2307 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
2308
2309 PGM_UNLOCK(pVM);
2310 Assert(rc <= VINF_SUCCESS);
2311 return rc;
2312}
2313
2314
2315/**
2316 * Converts a guest pointer to a GC physical address.
2317 *
2318 * This uses the current CR3/CR0/CR4 of the guest.
2319 *
2320 * @returns VBox status code.
2321 * @param pVCpu The cross context virtual CPU structure.
2322 * @param GCPtr The guest pointer to convert.
2323 * @param pGCPhys Where to store the GC physical address.
2324 */
2325VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
2326{
2327 PGMPTWALK Walk;
2328 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
2329 if (pGCPhys && RT_SUCCESS(rc))
2330 *pGCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK);
2331 return rc;
2332}
2333
2334
2335/**
2336 * Converts a guest pointer to a HC physical address.
2337 *
2338 * This uses the current CR3/CR0/CR4 of the guest.
2339 *
2340 * @returns VBox status code.
2341 * @param pVCpu The cross context virtual CPU structure.
2342 * @param GCPtr The guest pointer to convert.
2343 * @param pHCPhys Where to store the HC physical address.
2344 */
2345VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2346{
2347 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2348 PGMPTWALK Walk;
2349 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
2350 if (RT_SUCCESS(rc))
2351 rc = PGMPhysGCPhys2HCPhys(pVM, Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK), pHCPhys);
2352 return rc;
2353}
2354
2355
2356
2357#undef LOG_GROUP
2358#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2359
2360
2361#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2362/**
2363 * Cache PGMPhys memory access
2364 *
2365 * @param pVM The cross context VM structure.
2366 * @param pCache Cache structure pointer
2367 * @param GCPhys GC physical address
2368 * @param pbHC HC pointer corresponding to physical page
2369 *
2370 * @thread EMT.
2371 */
2372static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2373{
2374 uint32_t iCacheIndex;
2375
2376 Assert(VM_IS_EMT(pVM));
2377
2378 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
2379 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
2380
2381 iCacheIndex = ((GCPhys >> GUEST_PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2382
2383 ASMBitSet(&pCache->aEntries, iCacheIndex);
2384
2385 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2386 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2387}
2388#endif /* IN_RING3 */
2389
2390
2391/**
2392 * Deals with reading from a page with one or more ALL access handlers.
2393 *
2394 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2395 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2396 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2397 *
2398 * @param pVM The cross context VM structure.
2399 * @param pPage The page descriptor.
2400 * @param GCPhys The physical address to start reading at.
2401 * @param pvBuf Where to put the bits we read.
2402 * @param cb How much to read - less or equal to a page.
2403 * @param enmOrigin The origin of this call.
2404 */
2405static VBOXSTRICTRC pgmPhysReadHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb,
2406 PGMACCESSORIGIN enmOrigin)
2407{
2408 /*
2409 * The most frequent access here is MMIO and shadowed ROM.
2410 * The current code ASSUMES all these access handlers covers full pages!
2411 */
2412
2413 /*
2414 * Whatever we do we need the source page, map it first.
2415 */
2416 PGMPAGEMAPLOCK PgMpLck;
2417 const void *pvSrc = NULL;
2418 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2419/** @todo Check how this can work for MMIO pages? */
2420 if (RT_FAILURE(rc))
2421 {
2422 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2423 GCPhys, pPage, rc));
2424 memset(pvBuf, 0xff, cb);
2425 return VINF_SUCCESS;
2426 }
2427
2428 VBOXSTRICTRC rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
2429
2430 /*
2431 * Deal with any physical handlers.
2432 */
2433 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2434 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
2435 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2436 {
2437 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2438 AssertReleaseMsg(pCur, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2439 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2440 Assert((pCur->Core.Key & GUEST_PAGE_OFFSET_MASK) == 0);
2441 Assert((pCur->Core.KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
2442#ifndef IN_RING3
2443 if (enmOrigin != PGMACCESSORIGIN_IEM)
2444 {
2445 /* Cannot reliably handle informational status codes in this context */
2446 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2447 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2448 }
2449#endif
2450 PPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
2451 PFNPGMPHYSHANDLER const pfnHandler = pCurType->CTX_SUFF(pfnHandler); Assert(pfnHandler);
2452 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
2453 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
2454
2455 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pCur->pszDesc) ));
2456 STAM_PROFILE_START(&pCur->Stat, h);
2457 PGM_LOCK_ASSERT_OWNER(pVM);
2458
2459 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2460 PGM_UNLOCK(pVM);
2461 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, uUser);
2462 PGM_LOCK_VOID(pVM);
2463
2464#ifdef VBOX_WITH_STATISTICS
2465 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2466 if (pCur)
2467 STAM_PROFILE_STOP(&pCur->Stat, h);
2468#else
2469 pCur = NULL; /* might not be valid anymore. */
2470#endif
2471 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, false),
2472 ("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys));
2473 if ( rcStrict != VINF_PGM_HANDLER_DO_DEFAULT
2474 && !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2475 {
2476 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2477 return rcStrict;
2478 }
2479 }
2480
2481 /*
2482 * Take the default action.
2483 */
2484 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2485 {
2486 memcpy(pvBuf, pvSrc, cb);
2487 rcStrict = VINF_SUCCESS;
2488 }
2489 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2490 return rcStrict;
2491}
2492
2493
2494/**
2495 * Read physical memory.
2496 *
2497 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2498 * want to ignore those.
2499 *
2500 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2501 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2502 * @retval VINF_SUCCESS in all context - read completed.
2503 *
2504 * @retval VINF_EM_OFF in RC and R0 - read completed.
2505 * @retval VINF_EM_SUSPEND in RC and R0 - read completed.
2506 * @retval VINF_EM_RESET in RC and R0 - read completed.
2507 * @retval VINF_EM_HALT in RC and R0 - read completed.
2508 * @retval VINF_SELM_SYNC_GDT in RC only - read completed.
2509 *
2510 * @retval VINF_EM_DBG_STOP in RC and R0 - read completed.
2511 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - read completed.
2512 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2513 *
2514 * @retval VINF_IOM_R3_MMIO_READ in RC and R0.
2515 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2516 *
2517 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2518 *
2519 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2520 * haven't been cleared for strict status codes yet.
2521 *
2522 * @param pVM The cross context VM structure.
2523 * @param GCPhys Physical address start reading from.
2524 * @param pvBuf Where to put the read bits.
2525 * @param cbRead How many bytes to read.
2526 * @param enmOrigin The origin of this call.
2527 */
2528VMMDECL(VBOXSTRICTRC) PGMPhysRead(PVMCC pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
2529{
2530 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2531 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2532
2533 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysRead));
2534 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2535
2536 PGM_LOCK_VOID(pVM);
2537
2538 /*
2539 * Copy loop on ram ranges.
2540 */
2541 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2542 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2543 for (;;)
2544 {
2545 /* Inside range or not? */
2546 if (pRam && GCPhys >= pRam->GCPhys)
2547 {
2548 /*
2549 * Must work our way thru this page by page.
2550 */
2551 RTGCPHYS off = GCPhys - pRam->GCPhys;
2552 while (off < pRam->cb)
2553 {
2554 unsigned iPage = off >> GUEST_PAGE_SHIFT;
2555 PPGMPAGE pPage = &pRam->aPages[iPage];
2556 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
2557 if (cb > cbRead)
2558 cb = cbRead;
2559
2560 /*
2561 * Normal page? Get the pointer to it.
2562 */
2563 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
2564 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2565 {
2566 /*
2567 * Get the pointer to the page.
2568 */
2569 PGMPAGEMAPLOCK PgMpLck;
2570 const void *pvSrc;
2571 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2572 if (RT_SUCCESS(rc))
2573 {
2574 memcpy(pvBuf, pvSrc, cb);
2575 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2576 }
2577 else
2578 {
2579 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2580 pRam->GCPhys + off, pPage, rc));
2581 memset(pvBuf, 0xff, cb);
2582 }
2583 }
2584 /*
2585 * Have ALL/MMIO access handlers.
2586 */
2587 else
2588 {
2589 VBOXSTRICTRC rcStrict2 = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2590 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2591 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2592 else
2593 {
2594 memset(pvBuf, 0xff, cb);
2595 PGM_UNLOCK(pVM);
2596 return rcStrict2;
2597 }
2598 }
2599
2600 /* next page */
2601 if (cb >= cbRead)
2602 {
2603 PGM_UNLOCK(pVM);
2604 return rcStrict;
2605 }
2606 cbRead -= cb;
2607 off += cb;
2608 pvBuf = (char *)pvBuf + cb;
2609 } /* walk pages in ram range. */
2610
2611 GCPhys = pRam->GCPhysLast + 1;
2612 }
2613 else
2614 {
2615 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2616
2617 /*
2618 * Unassigned address space.
2619 */
2620 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2621 if (cb >= cbRead)
2622 {
2623 memset(pvBuf, 0xff, cbRead);
2624 break;
2625 }
2626 memset(pvBuf, 0xff, cb);
2627
2628 cbRead -= cb;
2629 pvBuf = (char *)pvBuf + cb;
2630 GCPhys += cb;
2631 }
2632
2633 /* Advance range if necessary. */
2634 while (pRam && GCPhys > pRam->GCPhysLast)
2635 pRam = pRam->CTX_SUFF(pNext);
2636 } /* Ram range walk */
2637
2638 PGM_UNLOCK(pVM);
2639 return rcStrict;
2640}
2641
2642
2643/**
2644 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2645 *
2646 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2647 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2648 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2649 *
2650 * @param pVM The cross context VM structure.
2651 * @param pPage The page descriptor.
2652 * @param GCPhys The physical address to start writing at.
2653 * @param pvBuf What to write.
2654 * @param cbWrite How much to write - less or equal to a page.
2655 * @param enmOrigin The origin of this call.
2656 */
2657static VBOXSTRICTRC pgmPhysWriteHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,
2658 PGMACCESSORIGIN enmOrigin)
2659{
2660 PGMPAGEMAPLOCK PgMpLck;
2661 void *pvDst = NULL;
2662 VBOXSTRICTRC rcStrict;
2663
2664 /*
2665 * Give priority to physical handlers (like #PF does).
2666 *
2667 * Hope for a lonely physical handler first that covers the whole
2668 * write area. This should be a pretty frequent case with MMIO and
2669 * the heavy usage of full page handlers in the page pool.
2670 */
2671 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2672 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2673 if (pCur)
2674 {
2675 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2676#ifndef IN_RING3
2677 if (enmOrigin != PGMACCESSORIGIN_IEM)
2678 /* Cannot reliably handle informational status codes in this context */
2679 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2680#endif
2681 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2682 if (cbRange > cbWrite)
2683 cbRange = cbWrite;
2684
2685 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler));
2686 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
2687 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2688 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2689 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2690 else
2691 rcStrict = VINF_SUCCESS;
2692 if (RT_SUCCESS(rcStrict))
2693 {
2694 PPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
2695 PFNPGMPHYSHANDLER const pfnHandler = pCurType->CTX_SUFF(pfnHandler);
2696 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
2697 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
2698 STAM_PROFILE_START(&pCur->Stat, h);
2699
2700 /* Most handlers will want to release the PGM lock for deadlock prevention
2701 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2702 dirty page trackers will want to keep it for performance reasons. */
2703 PGM_LOCK_ASSERT_OWNER(pVM);
2704 if (pCurType->fKeepPgmLock)
2705 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2706 else
2707 {
2708 PGM_UNLOCK(pVM);
2709 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2710 PGM_LOCK_VOID(pVM);
2711 }
2712
2713#ifdef VBOX_WITH_STATISTICS
2714 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2715 if (pCur)
2716 STAM_PROFILE_STOP(&pCur->Stat, h);
2717#else
2718 pCur = NULL; /* might not be valid anymore. */
2719#endif
2720 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2721 {
2722 if (pvDst)
2723 memcpy(pvDst, pvBuf, cbRange);
2724 rcStrict = VINF_SUCCESS;
2725 }
2726 else
2727 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, true),
2728 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2729 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pCur ? R3STRING(pCur->pszDesc) : ""));
2730 }
2731 else
2732 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2733 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
2734 if (RT_LIKELY(cbRange == cbWrite) || !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2735 {
2736 if (pvDst)
2737 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2738 return rcStrict;
2739 }
2740
2741 /* more fun to be had below */
2742 cbWrite -= cbRange;
2743 GCPhys += cbRange;
2744 pvBuf = (uint8_t *)pvBuf + cbRange;
2745 pvDst = (uint8_t *)pvDst + cbRange;
2746 }
2747 else /* The handler is somewhere else in the page, deal with it below. */
2748 rcStrict = VINF_SUCCESS;
2749 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2750
2751 /*
2752 * Deal with all the odd ends (used to be deal with virt+phys).
2753 */
2754 Assert(rcStrict != VINF_PGM_HANDLER_DO_DEFAULT);
2755
2756 /* We need a writable destination page. */
2757 if (!pvDst)
2758 {
2759 int rc2 = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2760 AssertLogRelMsgReturn(RT_SUCCESS(rc2),
2761 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", GCPhys, pPage, rc2),
2762 rc2);
2763 }
2764
2765 /* The loop state (big + ugly). */
2766 PPGMPHYSHANDLER pPhys = NULL;
2767 uint32_t offPhys = GUEST_PAGE_SIZE;
2768 uint32_t offPhysLast = GUEST_PAGE_SIZE;
2769 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2770
2771 /* The loop. */
2772 for (;;)
2773 {
2774 if (fMorePhys && !pPhys)
2775 {
2776 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2777 if (pPhys)
2778 {
2779 offPhys = 0;
2780 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2781 }
2782 else
2783 {
2784 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2785 GCPhys, true /* fAbove */);
2786 if ( pPhys
2787 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2788 {
2789 offPhys = pPhys->Core.Key - GCPhys;
2790 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2791 }
2792 else
2793 {
2794 pPhys = NULL;
2795 fMorePhys = false;
2796 offPhys = offPhysLast = GUEST_PAGE_SIZE;
2797 }
2798 }
2799 }
2800
2801 /*
2802 * Handle access to space without handlers (that's easy).
2803 */
2804 VBOXSTRICTRC rcStrict2 = VINF_PGM_HANDLER_DO_DEFAULT;
2805 uint32_t cbRange = (uint32_t)cbWrite;
2806
2807 /*
2808 * Physical handler.
2809 */
2810 if (!offPhys)
2811 {
2812#ifndef IN_RING3
2813 if (enmOrigin != PGMACCESSORIGIN_IEM)
2814 /* Cannot reliably handle informational status codes in this context */
2815 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2816#endif
2817 if (cbRange > offPhysLast + 1)
2818 cbRange = offPhysLast + 1;
2819
2820 PPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys);
2821 PFNPGMPHYSHANDLER const pfnHandler = pCurType->CTX_SUFF(pfnHandler);
2822 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pPhys->uUser
2823 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pPhys->uUser);
2824
2825 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2826 STAM_PROFILE_START(&pPhys->Stat, h);
2827
2828 /* Most handlers will want to release the PGM lock for deadlock prevention
2829 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2830 dirty page trackers will want to keep it for performance reasons. */
2831 PGM_LOCK_ASSERT_OWNER(pVM);
2832 if (pCurType->fKeepPgmLock)
2833 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2834 else
2835 {
2836 PGM_UNLOCK(pVM);
2837 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2838 PGM_LOCK_VOID(pVM);
2839 }
2840
2841#ifdef VBOX_WITH_STATISTICS
2842 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2843 if (pPhys)
2844 STAM_PROFILE_STOP(&pPhys->Stat, h);
2845#else
2846 pPhys = NULL; /* might not be valid anymore. */
2847#endif
2848 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict2, true),
2849 ("rcStrict2=%Rrc (rcStrict=%Rrc) GCPhys=%RGp pPage=%R[pgmpage] %s\n", VBOXSTRICTRC_VAL(rcStrict2),
2850 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pPhys ? R3STRING(pPhys->pszDesc) : ""));
2851 }
2852
2853 /*
2854 * Execute the default action and merge the status codes.
2855 */
2856 if (rcStrict2 == VINF_PGM_HANDLER_DO_DEFAULT)
2857 {
2858 memcpy(pvDst, pvBuf, cbRange);
2859 rcStrict2 = VINF_SUCCESS;
2860 }
2861 else if (!PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2862 {
2863 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2864 return rcStrict2;
2865 }
2866 else
2867 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2868
2869 /*
2870 * Advance if we've got more stuff to do.
2871 */
2872 if (cbRange >= cbWrite)
2873 {
2874 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2875 return rcStrict;
2876 }
2877
2878
2879 cbWrite -= cbRange;
2880 GCPhys += cbRange;
2881 pvBuf = (uint8_t *)pvBuf + cbRange;
2882 pvDst = (uint8_t *)pvDst + cbRange;
2883
2884 offPhys -= cbRange;
2885 offPhysLast -= cbRange;
2886 }
2887}
2888
2889
2890/**
2891 * Write to physical memory.
2892 *
2893 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2894 * want to ignore those.
2895 *
2896 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2897 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2898 * @retval VINF_SUCCESS in all context - write completed.
2899 *
2900 * @retval VINF_EM_OFF in RC and R0 - write completed.
2901 * @retval VINF_EM_SUSPEND in RC and R0 - write completed.
2902 * @retval VINF_EM_RESET in RC and R0 - write completed.
2903 * @retval VINF_EM_HALT in RC and R0 - write completed.
2904 * @retval VINF_SELM_SYNC_GDT in RC only - write completed.
2905 *
2906 * @retval VINF_EM_DBG_STOP in RC and R0 - write completed.
2907 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - write completed.
2908 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2909 *
2910 * @retval VINF_IOM_R3_MMIO_WRITE in RC and R0.
2911 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2912 * @retval VINF_IOM_R3_MMIO_COMMIT_WRITE in RC and R0.
2913 *
2914 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT in RC only - write completed.
2915 * @retval VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT in RC only.
2916 * @retval VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT in RC only.
2917 * @retval VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT in RC only.
2918 * @retval VINF_CSAM_PENDING_ACTION in RC only.
2919 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2920 *
2921 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2922 * haven't been cleared for strict status codes yet.
2923 *
2924 *
2925 * @param pVM The cross context VM structure.
2926 * @param GCPhys Physical address to write to.
2927 * @param pvBuf What to write.
2928 * @param cbWrite How many bytes to write.
2929 * @param enmOrigin Who is calling.
2930 */
2931VMMDECL(VBOXSTRICTRC) PGMPhysWrite(PVMCC pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
2932{
2933 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
2934 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2935 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2936
2937 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWrite));
2938 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2939
2940 PGM_LOCK_VOID(pVM);
2941
2942 /*
2943 * Copy loop on ram ranges.
2944 */
2945 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2946 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2947 for (;;)
2948 {
2949 /* Inside range or not? */
2950 if (pRam && GCPhys >= pRam->GCPhys)
2951 {
2952 /*
2953 * Must work our way thru this page by page.
2954 */
2955 RTGCPTR off = GCPhys - pRam->GCPhys;
2956 while (off < pRam->cb)
2957 {
2958 RTGCPTR iPage = off >> GUEST_PAGE_SHIFT;
2959 PPGMPAGE pPage = &pRam->aPages[iPage];
2960 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
2961 if (cb > cbWrite)
2962 cb = cbWrite;
2963
2964 /*
2965 * Normal page? Get the pointer to it.
2966 */
2967 if ( !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
2968 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2969 {
2970 PGMPAGEMAPLOCK PgMpLck;
2971 void *pvDst;
2972 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
2973 if (RT_SUCCESS(rc))
2974 {
2975 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
2976 memcpy(pvDst, pvBuf, cb);
2977 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2978 }
2979 /* Ignore writes to ballooned pages. */
2980 else if (!PGM_PAGE_IS_BALLOONED(pPage))
2981 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2982 pRam->GCPhys + off, pPage, rc));
2983 }
2984 /*
2985 * Active WRITE or ALL access handlers.
2986 */
2987 else
2988 {
2989 VBOXSTRICTRC rcStrict2 = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2990 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2991 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2992 else
2993 {
2994 PGM_UNLOCK(pVM);
2995 return rcStrict2;
2996 }
2997 }
2998
2999 /* next page */
3000 if (cb >= cbWrite)
3001 {
3002 PGM_UNLOCK(pVM);
3003 return rcStrict;
3004 }
3005
3006 cbWrite -= cb;
3007 off += cb;
3008 pvBuf = (const char *)pvBuf + cb;
3009 } /* walk pages in ram range */
3010
3011 GCPhys = pRam->GCPhysLast + 1;
3012 }
3013 else
3014 {
3015 /*
3016 * Unassigned address space, skip it.
3017 */
3018 if (!pRam)
3019 break;
3020 size_t cb = pRam->GCPhys - GCPhys;
3021 if (cb >= cbWrite)
3022 break;
3023 cbWrite -= cb;
3024 pvBuf = (const char *)pvBuf + cb;
3025 GCPhys += cb;
3026 }
3027
3028 /* Advance range if necessary. */
3029 while (pRam && GCPhys > pRam->GCPhysLast)
3030 pRam = pRam->CTX_SUFF(pNext);
3031 } /* Ram range walk */
3032
3033 PGM_UNLOCK(pVM);
3034 return rcStrict;
3035}
3036
3037
3038/**
3039 * Read from guest physical memory by GC physical address, bypassing
3040 * MMIO and access handlers.
3041 *
3042 * @returns VBox status code.
3043 * @param pVM The cross context VM structure.
3044 * @param pvDst The destination address.
3045 * @param GCPhysSrc The source address (GC physical address).
3046 * @param cb The number of bytes to read.
3047 */
3048VMMDECL(int) PGMPhysSimpleReadGCPhys(PVMCC pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
3049{
3050 /*
3051 * Treat the first page as a special case.
3052 */
3053 if (!cb)
3054 return VINF_SUCCESS;
3055
3056 /* map the 1st page */
3057 void const *pvSrc;
3058 PGMPAGEMAPLOCK Lock;
3059 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3060 if (RT_FAILURE(rc))
3061 return rc;
3062
3063 /* optimize for the case where access is completely within the first page. */
3064 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysSrc & GUEST_PAGE_OFFSET_MASK);
3065 if (RT_LIKELY(cb <= cbPage))
3066 {
3067 memcpy(pvDst, pvSrc, cb);
3068 PGMPhysReleasePageMappingLock(pVM, &Lock);
3069 return VINF_SUCCESS;
3070 }
3071
3072 /* copy to the end of the page. */
3073 memcpy(pvDst, pvSrc, cbPage);
3074 PGMPhysReleasePageMappingLock(pVM, &Lock);
3075 GCPhysSrc += cbPage;
3076 pvDst = (uint8_t *)pvDst + cbPage;
3077 cb -= cbPage;
3078
3079 /*
3080 * Page by page.
3081 */
3082 for (;;)
3083 {
3084 /* map the page */
3085 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3086 if (RT_FAILURE(rc))
3087 return rc;
3088
3089 /* last page? */
3090 if (cb <= GUEST_PAGE_SIZE)
3091 {
3092 memcpy(pvDst, pvSrc, cb);
3093 PGMPhysReleasePageMappingLock(pVM, &Lock);
3094 return VINF_SUCCESS;
3095 }
3096
3097 /* copy the entire page and advance */
3098 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3099 PGMPhysReleasePageMappingLock(pVM, &Lock);
3100 GCPhysSrc += GUEST_PAGE_SIZE;
3101 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
3102 cb -= GUEST_PAGE_SIZE;
3103 }
3104 /* won't ever get here. */
3105}
3106
3107
3108/**
3109 * Write to guest physical memory referenced by GC pointer.
3110 * Write memory to GC physical address in guest physical memory.
3111 *
3112 * This will bypass MMIO and access handlers.
3113 *
3114 * @returns VBox status code.
3115 * @param pVM The cross context VM structure.
3116 * @param GCPhysDst The GC physical address of the destination.
3117 * @param pvSrc The source buffer.
3118 * @param cb The number of bytes to write.
3119 */
3120VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVMCC pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
3121{
3122 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
3123
3124 /*
3125 * Treat the first page as a special case.
3126 */
3127 if (!cb)
3128 return VINF_SUCCESS;
3129
3130 /* map the 1st page */
3131 void *pvDst;
3132 PGMPAGEMAPLOCK Lock;
3133 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3134 if (RT_FAILURE(rc))
3135 return rc;
3136
3137 /* optimize for the case where access is completely within the first page. */
3138 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysDst & GUEST_PAGE_OFFSET_MASK);
3139 if (RT_LIKELY(cb <= cbPage))
3140 {
3141 memcpy(pvDst, pvSrc, cb);
3142 PGMPhysReleasePageMappingLock(pVM, &Lock);
3143 return VINF_SUCCESS;
3144 }
3145
3146 /* copy to the end of the page. */
3147 memcpy(pvDst, pvSrc, cbPage);
3148 PGMPhysReleasePageMappingLock(pVM, &Lock);
3149 GCPhysDst += cbPage;
3150 pvSrc = (const uint8_t *)pvSrc + cbPage;
3151 cb -= cbPage;
3152
3153 /*
3154 * Page by page.
3155 */
3156 for (;;)
3157 {
3158 /* map the page */
3159 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3160 if (RT_FAILURE(rc))
3161 return rc;
3162
3163 /* last page? */
3164 if (cb <= GUEST_PAGE_SIZE)
3165 {
3166 memcpy(pvDst, pvSrc, cb);
3167 PGMPhysReleasePageMappingLock(pVM, &Lock);
3168 return VINF_SUCCESS;
3169 }
3170
3171 /* copy the entire page and advance */
3172 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3173 PGMPhysReleasePageMappingLock(pVM, &Lock);
3174 GCPhysDst += GUEST_PAGE_SIZE;
3175 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3176 cb -= GUEST_PAGE_SIZE;
3177 }
3178 /* won't ever get here. */
3179}
3180
3181
3182/**
3183 * Read from guest physical memory referenced by GC pointer.
3184 *
3185 * This function uses the current CR3/CR0/CR4 of the guest and will
3186 * bypass access handlers and not set any accessed bits.
3187 *
3188 * @returns VBox status code.
3189 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3190 * @param pvDst The destination address.
3191 * @param GCPtrSrc The source address (GC pointer).
3192 * @param cb The number of bytes to read.
3193 */
3194VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3195{
3196 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3197/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3198
3199 /*
3200 * Treat the first page as a special case.
3201 */
3202 if (!cb)
3203 return VINF_SUCCESS;
3204
3205 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleRead));
3206 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3207
3208 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3209 * when many VCPUs are fighting for the lock.
3210 */
3211 PGM_LOCK_VOID(pVM);
3212
3213 /* map the 1st page */
3214 void const *pvSrc;
3215 PGMPAGEMAPLOCK Lock;
3216 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3217 if (RT_FAILURE(rc))
3218 {
3219 PGM_UNLOCK(pVM);
3220 return rc;
3221 }
3222
3223 /* optimize for the case where access is completely within the first page. */
3224 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3225 if (RT_LIKELY(cb <= cbPage))
3226 {
3227 memcpy(pvDst, pvSrc, cb);
3228 PGMPhysReleasePageMappingLock(pVM, &Lock);
3229 PGM_UNLOCK(pVM);
3230 return VINF_SUCCESS;
3231 }
3232
3233 /* copy to the end of the page. */
3234 memcpy(pvDst, pvSrc, cbPage);
3235 PGMPhysReleasePageMappingLock(pVM, &Lock);
3236 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3237 pvDst = (uint8_t *)pvDst + cbPage;
3238 cb -= cbPage;
3239
3240 /*
3241 * Page by page.
3242 */
3243 for (;;)
3244 {
3245 /* map the page */
3246 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3247 if (RT_FAILURE(rc))
3248 {
3249 PGM_UNLOCK(pVM);
3250 return rc;
3251 }
3252
3253 /* last page? */
3254 if (cb <= GUEST_PAGE_SIZE)
3255 {
3256 memcpy(pvDst, pvSrc, cb);
3257 PGMPhysReleasePageMappingLock(pVM, &Lock);
3258 PGM_UNLOCK(pVM);
3259 return VINF_SUCCESS;
3260 }
3261
3262 /* copy the entire page and advance */
3263 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3264 PGMPhysReleasePageMappingLock(pVM, &Lock);
3265 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + GUEST_PAGE_SIZE);
3266 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
3267 cb -= GUEST_PAGE_SIZE;
3268 }
3269 /* won't ever get here. */
3270}
3271
3272
3273/**
3274 * Write to guest physical memory referenced by GC pointer.
3275 *
3276 * This function uses the current CR3/CR0/CR4 of the guest and will
3277 * bypass access handlers and not set dirty or accessed bits.
3278 *
3279 * @returns VBox status code.
3280 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3281 * @param GCPtrDst The destination address (GC pointer).
3282 * @param pvSrc The source address.
3283 * @param cb The number of bytes to write.
3284 */
3285VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3286{
3287 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3288 VMCPU_ASSERT_EMT(pVCpu);
3289
3290 /*
3291 * Treat the first page as a special case.
3292 */
3293 if (!cb)
3294 return VINF_SUCCESS;
3295
3296 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWrite));
3297 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3298
3299 /* map the 1st page */
3300 void *pvDst;
3301 PGMPAGEMAPLOCK Lock;
3302 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3303 if (RT_FAILURE(rc))
3304 return rc;
3305
3306 /* optimize for the case where access is completely within the first page. */
3307 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3308 if (RT_LIKELY(cb <= cbPage))
3309 {
3310 memcpy(pvDst, pvSrc, cb);
3311 PGMPhysReleasePageMappingLock(pVM, &Lock);
3312 return VINF_SUCCESS;
3313 }
3314
3315 /* copy to the end of the page. */
3316 memcpy(pvDst, pvSrc, cbPage);
3317 PGMPhysReleasePageMappingLock(pVM, &Lock);
3318 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3319 pvSrc = (const uint8_t *)pvSrc + cbPage;
3320 cb -= cbPage;
3321
3322 /*
3323 * Page by page.
3324 */
3325 for (;;)
3326 {
3327 /* map the page */
3328 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3329 if (RT_FAILURE(rc))
3330 return rc;
3331
3332 /* last page? */
3333 if (cb <= GUEST_PAGE_SIZE)
3334 {
3335 memcpy(pvDst, pvSrc, cb);
3336 PGMPhysReleasePageMappingLock(pVM, &Lock);
3337 return VINF_SUCCESS;
3338 }
3339
3340 /* copy the entire page and advance */
3341 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3342 PGMPhysReleasePageMappingLock(pVM, &Lock);
3343 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
3344 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3345 cb -= GUEST_PAGE_SIZE;
3346 }
3347 /* won't ever get here. */
3348}
3349
3350
3351/**
3352 * Write to guest physical memory referenced by GC pointer and update the PTE.
3353 *
3354 * This function uses the current CR3/CR0/CR4 of the guest and will
3355 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3356 *
3357 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3358 *
3359 * @returns VBox status code.
3360 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3361 * @param GCPtrDst The destination address (GC pointer).
3362 * @param pvSrc The source address.
3363 * @param cb The number of bytes to write.
3364 */
3365VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3366{
3367 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3368 VMCPU_ASSERT_EMT(pVCpu);
3369
3370 /*
3371 * Treat the first page as a special case.
3372 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3373 */
3374 if (!cb)
3375 return VINF_SUCCESS;
3376
3377 /* map the 1st page */
3378 void *pvDst;
3379 PGMPAGEMAPLOCK Lock;
3380 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3381 if (RT_FAILURE(rc))
3382 return rc;
3383
3384 /* optimize for the case where access is completely within the first page. */
3385 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3386 if (RT_LIKELY(cb <= cbPage))
3387 {
3388 memcpy(pvDst, pvSrc, cb);
3389 PGMPhysReleasePageMappingLock(pVM, &Lock);
3390 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3391 return VINF_SUCCESS;
3392 }
3393
3394 /* copy to the end of the page. */
3395 memcpy(pvDst, pvSrc, cbPage);
3396 PGMPhysReleasePageMappingLock(pVM, &Lock);
3397 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3398 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3399 pvSrc = (const uint8_t *)pvSrc + cbPage;
3400 cb -= cbPage;
3401
3402 /*
3403 * Page by page.
3404 */
3405 for (;;)
3406 {
3407 /* map the page */
3408 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3409 if (RT_FAILURE(rc))
3410 return rc;
3411
3412 /* last page? */
3413 if (cb <= GUEST_PAGE_SIZE)
3414 {
3415 memcpy(pvDst, pvSrc, cb);
3416 PGMPhysReleasePageMappingLock(pVM, &Lock);
3417 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3418 return VINF_SUCCESS;
3419 }
3420
3421 /* copy the entire page and advance */
3422 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3423 PGMPhysReleasePageMappingLock(pVM, &Lock);
3424 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3425 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
3426 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3427 cb -= GUEST_PAGE_SIZE;
3428 }
3429 /* won't ever get here. */
3430}
3431
3432
3433/**
3434 * Read from guest physical memory referenced by GC pointer.
3435 *
3436 * This function uses the current CR3/CR0/CR4 of the guest and will
3437 * respect access handlers and set accessed bits.
3438 *
3439 * @returns Strict VBox status, see PGMPhysRead for details.
3440 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3441 * specified virtual address.
3442 *
3443 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3444 * @param pvDst The destination address.
3445 * @param GCPtrSrc The source address (GC pointer).
3446 * @param cb The number of bytes to read.
3447 * @param enmOrigin Who is calling.
3448 * @thread EMT(pVCpu)
3449 */
3450VMMDECL(VBOXSTRICTRC) PGMPhysReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3451{
3452 int rc;
3453 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3454 VMCPU_ASSERT_EMT(pVCpu);
3455
3456 /*
3457 * Anything to do?
3458 */
3459 if (!cb)
3460 return VINF_SUCCESS;
3461
3462 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3463
3464 /*
3465 * Optimize reads within a single page.
3466 */
3467 if (((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
3468 {
3469 /* Convert virtual to physical address + flags */
3470 PGMPTWALK Walk;
3471 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
3472 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3473 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3474
3475 /* mark the guest page as accessed. */
3476 if (!(Walk.fEffective & X86_PTE_A))
3477 {
3478 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3479 AssertRC(rc);
3480 }
3481
3482 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3483 }
3484
3485 /*
3486 * Page by page.
3487 */
3488 for (;;)
3489 {
3490 /* Convert virtual to physical address + flags */
3491 PGMPTWALK Walk;
3492 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
3493 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3494 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3495
3496 /* mark the guest page as accessed. */
3497 if (!(Walk.fEffective & X86_PTE_A))
3498 {
3499 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3500 AssertRC(rc);
3501 }
3502
3503 /* copy */
3504 size_t cbRead = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3505 if (cbRead < cb)
3506 {
3507 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
3508 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3509 { /* likely */ }
3510 else
3511 return rcStrict;
3512 }
3513 else /* Last page (cbRead is GUEST_PAGE_SIZE, we only need cb!) */
3514 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3515
3516 /* next */
3517 Assert(cb > cbRead);
3518 cb -= cbRead;
3519 pvDst = (uint8_t *)pvDst + cbRead;
3520 GCPtrSrc += cbRead;
3521 }
3522}
3523
3524
3525/**
3526 * Write to guest physical memory referenced by GC pointer.
3527 *
3528 * This function uses the current CR3/CR0/CR4 of the guest and will
3529 * respect access handlers and set dirty and accessed bits.
3530 *
3531 * @returns Strict VBox status, see PGMPhysWrite for details.
3532 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3533 * specified virtual address.
3534 *
3535 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3536 * @param GCPtrDst The destination address (GC pointer).
3537 * @param pvSrc The source address.
3538 * @param cb The number of bytes to write.
3539 * @param enmOrigin Who is calling.
3540 */
3541VMMDECL(VBOXSTRICTRC) PGMPhysWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3542{
3543 int rc;
3544 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3545 VMCPU_ASSERT_EMT(pVCpu);
3546
3547 /*
3548 * Anything to do?
3549 */
3550 if (!cb)
3551 return VINF_SUCCESS;
3552
3553 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3554
3555 /*
3556 * Optimize writes within a single page.
3557 */
3558 if (((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
3559 {
3560 /* Convert virtual to physical address + flags */
3561 PGMPTWALK Walk;
3562 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
3563 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3564 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3565
3566 /* Mention when we ignore X86_PTE_RW... */
3567 if (!(Walk.fEffective & X86_PTE_RW))
3568 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3569
3570 /* Mark the guest page as accessed and dirty if necessary. */
3571 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3572 {
3573 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3574 AssertRC(rc);
3575 }
3576
3577 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3578 }
3579
3580 /*
3581 * Page by page.
3582 */
3583 for (;;)
3584 {
3585 /* Convert virtual to physical address + flags */
3586 PGMPTWALK Walk;
3587 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
3588 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3589 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3590
3591 /* Mention when we ignore X86_PTE_RW... */
3592 if (!(Walk.fEffective & X86_PTE_RW))
3593 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3594
3595 /* Mark the guest page as accessed and dirty if necessary. */
3596 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3597 {
3598 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3599 AssertRC(rc);
3600 }
3601
3602 /* copy */
3603 size_t cbWrite = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3604 if (cbWrite < cb)
3605 {
3606 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
3607 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3608 { /* likely */ }
3609 else
3610 return rcStrict;
3611 }
3612 else /* Last page (cbWrite is GUEST_PAGE_SIZE, we only need cb!) */
3613 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3614
3615 /* next */
3616 Assert(cb > cbWrite);
3617 cb -= cbWrite;
3618 pvSrc = (uint8_t *)pvSrc + cbWrite;
3619 GCPtrDst += cbWrite;
3620 }
3621}
3622
3623
3624/**
3625 * Return the page type of the specified physical address.
3626 *
3627 * @returns The page type.
3628 * @param pVM The cross context VM structure.
3629 * @param GCPhys Guest physical address
3630 */
3631VMM_INT_DECL(PGMPAGETYPE) PGMPhysGetPageType(PVMCC pVM, RTGCPHYS GCPhys)
3632{
3633 PGM_LOCK_VOID(pVM);
3634 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
3635 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
3636 PGM_UNLOCK(pVM);
3637
3638 return enmPgType;
3639}
3640
3641
3642/**
3643 * Converts a GC physical address to a HC ring-3 pointer, with some
3644 * additional checks.
3645 *
3646 * @returns VBox status code (no informational statuses).
3647 *
3648 * @param pVM The cross context VM structure.
3649 * @param pVCpu The cross context virtual CPU structure of the
3650 * calling EMT.
3651 * @param GCPhys The GC physical address to convert. This API mask
3652 * the A20 line when necessary.
3653 * @param puTlbPhysRev Where to read the physical TLB revision. Needs to
3654 * be done while holding the PGM lock.
3655 * @param ppb Where to store the pointer corresponding to GCPhys
3656 * on success.
3657 * @param pfTlb The TLB flags and revision. We only add stuff.
3658 *
3659 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr and
3660 * PGMPhysIemGCPhys2Ptr.
3661 *
3662 * @thread EMT(pVCpu).
3663 */
3664VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev,
3665 R3R0PTRTYPE(uint8_t *) *ppb,
3666 uint64_t *pfTlb)
3667{
3668 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
3669 Assert(!(GCPhys & X86_PAGE_OFFSET_MASK));
3670
3671 PGM_LOCK_VOID(pVM);
3672
3673 PPGMRAMRANGE pRam;
3674 PPGMPAGE pPage;
3675 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3676 if (RT_SUCCESS(rc))
3677 {
3678 if (!PGM_PAGE_IS_BALLOONED(pPage))
3679 {
3680 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3681 {
3682 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
3683 {
3684 /*
3685 * No access handler.
3686 */
3687 switch (PGM_PAGE_GET_STATE(pPage))
3688 {
3689 case PGM_PAGE_STATE_ALLOCATED:
3690 *pfTlb |= *puTlbPhysRev;
3691 break;
3692 case PGM_PAGE_STATE_BALLOONED:
3693 AssertFailed();
3694 RT_FALL_THRU();
3695 case PGM_PAGE_STATE_ZERO:
3696 case PGM_PAGE_STATE_SHARED:
3697 case PGM_PAGE_STATE_WRITE_MONITORED:
3698 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3699 break;
3700 }
3701
3702 PPGMPAGEMAPTLBE pTlbe;
3703 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3704 AssertLogRelRCReturn(rc, rc);
3705 *ppb = (uint8_t *)pTlbe->pv;
3706 }
3707 else if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
3708 {
3709 /*
3710 * MMIO or similar all access handler: Catch all access.
3711 */
3712 *pfTlb |= *puTlbPhysRev
3713 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3714 *ppb = NULL;
3715 }
3716 else
3717 {
3718 /*
3719 * Write access handler: Catch write accesses if active.
3720 */
3721 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
3722 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3723 else
3724 switch (PGM_PAGE_GET_STATE(pPage))
3725 {
3726 case PGM_PAGE_STATE_ALLOCATED:
3727 *pfTlb |= *puTlbPhysRev;
3728 break;
3729 case PGM_PAGE_STATE_BALLOONED:
3730 AssertFailed();
3731 RT_FALL_THRU();
3732 case PGM_PAGE_STATE_ZERO:
3733 case PGM_PAGE_STATE_SHARED:
3734 case PGM_PAGE_STATE_WRITE_MONITORED:
3735 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3736 break;
3737 }
3738
3739 PPGMPAGEMAPTLBE pTlbe;
3740 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3741 AssertLogRelRCReturn(rc, rc);
3742 *ppb = (uint8_t *)pTlbe->pv;
3743 }
3744 }
3745 else
3746 {
3747 /* Alias MMIO: For now, we catch all access. */
3748 *pfTlb |= *puTlbPhysRev
3749 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3750 *ppb = NULL;
3751 }
3752 }
3753 else
3754 {
3755 /* Ballooned: Shouldn't get here, but we read zero page via PGMPhysRead and writes goes to /dev/null. */
3756 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3757 *ppb = NULL;
3758 }
3759 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 pPage=%R[pgmpage]\n", GCPhys, *ppb, *pfTlb, pPage));
3760 }
3761 else
3762 {
3763 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3764 *ppb = NULL;
3765 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 (rc=%Rrc)\n", GCPhys, *ppb, *pfTlb, rc));
3766 }
3767
3768 PGM_UNLOCK(pVM);
3769 return VINF_SUCCESS;
3770}
3771
3772
3773/**
3774 * Converts a GC physical address to a HC ring-3 pointer, with some
3775 * additional checks.
3776 *
3777 * @returns VBox status code (no informational statuses).
3778 * @retval VINF_SUCCESS on success.
3779 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3780 * access handler of some kind.
3781 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3782 * accesses or is odd in any way.
3783 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3784 *
3785 * @param pVM The cross context VM structure.
3786 * @param pVCpu The cross context virtual CPU structure of the
3787 * calling EMT.
3788 * @param GCPhys The GC physical address to convert. This API mask
3789 * the A20 line when necessary.
3790 * @param fWritable Whether write access is required.
3791 * @param fByPassHandlers Whether to bypass access handlers.
3792 * @param ppv Where to store the pointer corresponding to GCPhys
3793 * on success.
3794 * @param pLock
3795 *
3796 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
3797 * @thread EMT(pVCpu).
3798 */
3799VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
3800 void **ppv, PPGMPAGEMAPLOCK pLock)
3801{
3802 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
3803
3804 PGM_LOCK_VOID(pVM);
3805
3806 PPGMRAMRANGE pRam;
3807 PPGMPAGE pPage;
3808 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3809 if (RT_SUCCESS(rc))
3810 {
3811 if (PGM_PAGE_IS_BALLOONED(pPage))
3812 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3813 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3814 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3815 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
3816 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
3817 rc = VINF_SUCCESS;
3818 else
3819 {
3820 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3821 {
3822 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
3823 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3824 }
3825 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
3826 {
3827 Assert(!fByPassHandlers);
3828 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3829 }
3830 }
3831 if (RT_SUCCESS(rc))
3832 {
3833 int rc2;
3834
3835 /* Make sure what we return is writable. */
3836 if (fWritable)
3837 switch (PGM_PAGE_GET_STATE(pPage))
3838 {
3839 case PGM_PAGE_STATE_ALLOCATED:
3840 break;
3841 case PGM_PAGE_STATE_BALLOONED:
3842 AssertFailed();
3843 break;
3844 case PGM_PAGE_STATE_ZERO:
3845 case PGM_PAGE_STATE_SHARED:
3846 case PGM_PAGE_STATE_WRITE_MONITORED:
3847 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK);
3848 AssertLogRelRCReturn(rc2, rc2);
3849 break;
3850 }
3851
3852 /* Get a ring-3 mapping of the address. */
3853 PPGMPAGEMAPTLBE pTlbe;
3854 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3855 AssertLogRelRCReturn(rc2, rc2);
3856
3857 /* Lock it and calculate the address. */
3858 if (fWritable)
3859 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
3860 else
3861 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
3862 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3863
3864 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
3865 }
3866 else
3867 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
3868
3869 /* else: handler catching all access, no pointer returned. */
3870 }
3871 else
3872 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
3873
3874 PGM_UNLOCK(pVM);
3875 return rc;
3876}
3877
3878
3879/**
3880 * Checks if the give GCPhys page requires special handling for the given access
3881 * because it's MMIO or otherwise monitored.
3882 *
3883 * @returns VBox status code (no informational statuses).
3884 * @retval VINF_SUCCESS on success.
3885 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3886 * access handler of some kind.
3887 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3888 * accesses or is odd in any way.
3889 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3890 *
3891 * @param pVM The cross context VM structure.
3892 * @param GCPhys The GC physical address to convert. Since this is
3893 * only used for filling the REM TLB, the A20 mask must
3894 * be applied before calling this API.
3895 * @param fWritable Whether write access is required.
3896 * @param fByPassHandlers Whether to bypass access handlers.
3897 *
3898 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
3899 * a stop gap thing that should be removed once there is a better TLB
3900 * for virtual address accesses.
3901 */
3902VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVMCC pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
3903{
3904 PGM_LOCK_VOID(pVM);
3905 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
3906
3907 PPGMRAMRANGE pRam;
3908 PPGMPAGE pPage;
3909 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3910 if (RT_SUCCESS(rc))
3911 {
3912 if (PGM_PAGE_IS_BALLOONED(pPage))
3913 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3914 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3915 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3916 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
3917 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
3918 rc = VINF_SUCCESS;
3919 else
3920 {
3921 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3922 {
3923 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
3924 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3925 }
3926 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
3927 {
3928 Assert(!fByPassHandlers);
3929 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3930 }
3931 }
3932 }
3933
3934 PGM_UNLOCK(pVM);
3935 return rc;
3936}
3937
3938#ifdef VBOX_WITH_NATIVE_NEM
3939
3940/**
3941 * Interface used by NEM to check what to do on a memory access exit.
3942 *
3943 * @returns VBox status code.
3944 * @param pVM The cross context VM structure.
3945 * @param pVCpu The cross context per virtual CPU structure.
3946 * Optional.
3947 * @param GCPhys The guest physical address.
3948 * @param fMakeWritable Whether to try make the page writable or not. If it
3949 * cannot be made writable, NEM_PAGE_PROT_WRITE won't
3950 * be returned and the return code will be unaffected
3951 * @param pInfo Where to return the page information. This is
3952 * initialized even on failure.
3953 * @param pfnChecker Page in-sync checker callback. Optional.
3954 * @param pvUser User argument to pass to pfnChecker.
3955 */
3956VMM_INT_DECL(int) PGMPhysNemPageInfoChecker(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fMakeWritable, PPGMPHYSNEMPAGEINFO pInfo,
3957 PFNPGMPHYSNEMCHECKPAGE pfnChecker, void *pvUser)
3958{
3959 PGM_LOCK_VOID(pVM);
3960
3961 PPGMPAGE pPage;
3962 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
3963 if (RT_SUCCESS(rc))
3964 {
3965 /* Try make it writable if requested. */
3966 pInfo->u2OldNemState = PGM_PAGE_GET_NEM_STATE(pPage);
3967 if (fMakeWritable)
3968 switch (PGM_PAGE_GET_STATE(pPage))
3969 {
3970 case PGM_PAGE_STATE_SHARED:
3971 case PGM_PAGE_STATE_WRITE_MONITORED:
3972 case PGM_PAGE_STATE_ZERO:
3973 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
3974 if (rc == VERR_PGM_PHYS_PAGE_RESERVED)
3975 rc = VINF_SUCCESS;
3976 break;
3977 }
3978
3979 /* Fill in the info. */
3980 pInfo->HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
3981 pInfo->u2NemState = PGM_PAGE_GET_NEM_STATE(pPage);
3982 pInfo->fHasHandlers = PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) ? 1 : 0;
3983 PGMPAGETYPE const enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
3984 pInfo->enmType = enmType;
3985 pInfo->fNemProt = pgmPhysPageCalcNemProtection(pPage, enmType);
3986 switch (PGM_PAGE_GET_STATE(pPage))
3987 {
3988 case PGM_PAGE_STATE_ALLOCATED:
3989 pInfo->fZeroPage = 0;
3990 break;
3991
3992 case PGM_PAGE_STATE_ZERO:
3993 pInfo->fZeroPage = 1;
3994 break;
3995
3996 case PGM_PAGE_STATE_WRITE_MONITORED:
3997 pInfo->fZeroPage = 0;
3998 break;
3999
4000 case PGM_PAGE_STATE_SHARED:
4001 pInfo->fZeroPage = 0;
4002 break;
4003
4004 case PGM_PAGE_STATE_BALLOONED:
4005 pInfo->fZeroPage = 1;
4006 break;
4007
4008 default:
4009 pInfo->fZeroPage = 1;
4010 AssertFailedStmt(rc = VERR_PGM_PHYS_PAGE_GET_IPE);
4011 }
4012
4013 /* Call the checker and update NEM state. */
4014 if (pfnChecker)
4015 {
4016 rc = pfnChecker(pVM, pVCpu, GCPhys, pInfo, pvUser);
4017 PGM_PAGE_SET_NEM_STATE(pPage, pInfo->u2NemState);
4018 }
4019
4020 /* Done. */
4021 PGM_UNLOCK(pVM);
4022 }
4023 else
4024 {
4025 PGM_UNLOCK(pVM);
4026
4027 pInfo->HCPhys = NIL_RTHCPHYS;
4028 pInfo->fNemProt = NEM_PAGE_PROT_NONE;
4029 pInfo->u2NemState = 0;
4030 pInfo->fHasHandlers = 0;
4031 pInfo->fZeroPage = 0;
4032 pInfo->enmType = PGMPAGETYPE_INVALID;
4033 }
4034
4035 return rc;
4036}
4037
4038
4039/**
4040 * NEM helper that performs @a pfnCallback on pages with NEM state @a uMinState
4041 * or higher.
4042 *
4043 * @returns VBox status code from callback.
4044 * @param pVM The cross context VM structure.
4045 * @param pVCpu The cross context per CPU structure. This is
4046 * optional as its only for passing to callback.
4047 * @param uMinState The minimum NEM state value to call on.
4048 * @param pfnCallback The callback function.
4049 * @param pvUser User argument for the callback.
4050 */
4051VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVMCC pVM, PVMCPUCC pVCpu, uint8_t uMinState,
4052 PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser)
4053{
4054 /*
4055 * Just brute force this problem.
4056 */
4057 PGM_LOCK_VOID(pVM);
4058 int rc = VINF_SUCCESS;
4059 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
4060 {
4061 uint32_t const cPages = pRam->cb >> X86_PAGE_SHIFT;
4062 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4063 {
4064 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(&pRam->aPages[iPage]);
4065 if (u2State < uMinState)
4066 { /* likely */ }
4067 else
4068 {
4069 rc = pfnCallback(pVM, pVCpu, pRam->GCPhys + ((RTGCPHYS)iPage << X86_PAGE_SHIFT), &u2State, pvUser);
4070 if (RT_SUCCESS(rc))
4071 PGM_PAGE_SET_NEM_STATE(&pRam->aPages[iPage], u2State);
4072 else
4073 break;
4074 }
4075 }
4076 }
4077 PGM_UNLOCK(pVM);
4078
4079 return rc;
4080}
4081
4082
4083/**
4084 * Helper for setting the NEM state for a range of pages.
4085 *
4086 * @param paPages Array of pages to modify.
4087 * @param cPages How many pages to modify.
4088 * @param u2State The new state value.
4089 */
4090void pgmPhysSetNemStateForPages(PPGMPAGE paPages, RTGCPHYS cPages, uint8_t u2State)
4091{
4092 PPGMPAGE pPage = paPages;
4093 while (cPages-- > 0)
4094 {
4095 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
4096 pPage++;
4097 }
4098}
4099
4100#endif /* VBOX_WITH_NATIVE_NEM */
4101
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette