VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 39034

Last change on this file since 39034 was 39034, checked in by vboxsync, 14 years ago

VMM,INTNET: Addressing unused variable warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 140.4 KB
Line 
1/* $Id: PGMAllPhys.cpp 39034 2011-10-19 11:43:52Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM_PHYS
22#include <VBox/vmm/pgm.h>
23#include <VBox/vmm/trpm.h>
24#include <VBox/vmm/vmm.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/em.h>
27#include <VBox/vmm/rem.h>
28#include "PGMInternal.h"
29#include <VBox/vmm/vm.h>
30#include "PGMInline.h"
31#include <VBox/param.h>
32#include <VBox/err.h>
33#include <iprt/assert.h>
34#include <iprt/string.h>
35#include <iprt/asm-amd64-x86.h>
36#include <VBox/log.h>
37#ifdef IN_RING3
38# include <iprt/thread.h>
39#endif
40
41
42/*******************************************************************************
43* Defined Constants And Macros *
44*******************************************************************************/
45/** Enable the physical TLB. */
46#define PGM_WITH_PHYS_TLB
47
48
49
50#ifndef IN_RING3
51
52/**
53 * \#PF Handler callback for physical memory accesses without a RC/R0 handler.
54 * This simply pushes everything to the HC handler.
55 *
56 * @returns VBox status code (appropriate for trap handling and GC return).
57 * @param pVM VM Handle.
58 * @param uErrorCode CPU Error code.
59 * @param pRegFrame Trap register frame.
60 * @param pvFault The fault address (cr2).
61 * @param GCPhysFault The GC physical address corresponding to pvFault.
62 * @param pvUser User argument.
63 */
64VMMDECL(int) pgmPhysHandlerRedirectToHC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
65{
66 return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
67}
68
69
70/**
71 * \#PF Handler callback for Guest ROM range write access.
72 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
73 *
74 * @returns VBox status code (appropriate for trap handling and GC return).
75 * @param pVM VM Handle.
76 * @param uErrorCode CPU Error code.
77 * @param pRegFrame Trap register frame.
78 * @param pvFault The fault address (cr2).
79 * @param GCPhysFault The GC physical address corresponding to pvFault.
80 * @param pvUser User argument. Pointer to the ROM range structure.
81 */
82VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
83{
84 int rc;
85 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
86 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
87 PVMCPU pVCpu = VMMGetCpu(pVM);
88
89 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
90 switch (pRom->aPages[iPage].enmProt)
91 {
92 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
93 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
94 {
95 /*
96 * If it's a simple instruction which doesn't change the cpu state
97 * we will simply skip it. Otherwise we'll have to defer it to REM.
98 */
99 uint32_t cbOp;
100 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
101 rc = EMInterpretDisasOne(pVM, pVCpu, pRegFrame, pDis, &cbOp);
102 if ( RT_SUCCESS(rc)
103 && pDis->mode == CPUMODE_32BIT /** @todo why does this matter? */
104 && !(pDis->prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
105 {
106 switch (pDis->opcode)
107 {
108 /** @todo Find other instructions we can safely skip, possibly
109 * adding this kind of detection to DIS or EM. */
110 case OP_MOV:
111 pRegFrame->rip += cbOp;
112 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteHandled);
113 return VINF_SUCCESS;
114 }
115 }
116 else if (RT_UNLIKELY(rc == VERR_INTERNAL_ERROR))
117 return rc;
118 break;
119 }
120
121 case PGMROMPROT_READ_RAM_WRITE_RAM:
122 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
123 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
124 AssertRC(rc);
125 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
126
127 case PGMROMPROT_READ_ROM_WRITE_RAM:
128 /* Handle it in ring-3 because it's *way* easier there. */
129 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
130 break;
131
132 default:
133 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
134 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
135 VERR_INTERNAL_ERROR);
136 }
137
138 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteUnhandled);
139 return VINF_EM_RAW_EMULATE_INSTR;
140}
141
142#endif /* IN_RING3 */
143
144/**
145 * Invalidates the RAM range TLBs.
146 *
147 * @param pVM The VM handle.
148 */
149void pgmPhysInvalidRamRangeTlbs(PVM pVM)
150{
151 pgmLock(pVM);
152 for (uint32_t i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
153 {
154 pVM->pgm.s.apRamRangesTlbR3[i] = NIL_RTR3PTR;
155 pVM->pgm.s.apRamRangesTlbR0[i] = NIL_RTR0PTR;
156 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
157 }
158 pgmUnlock(pVM);
159}
160
161
162/**
163 * Tests if a value of type RTGCPHYS is negative if the type had been signed
164 * instead of unsigned.
165 *
166 * @returns @c true if negative, @c false if positive or zero.
167 * @param a_GCPhys The value to test.
168 * @todo Move me to iprt/types.h.
169 */
170#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
171
172
173/**
174 * Slow worker for pgmPhysGetRange.
175 *
176 * @copydoc pgmPhysGetRange
177 */
178PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
179{
180 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
181
182 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
183 while (pRam)
184 {
185 RTGCPHYS off = GCPhys - pRam->GCPhys;
186 if (off < pRam->cb)
187 {
188 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
189 return pRam;
190 }
191 if (RTGCPHYS_IS_NEGATIVE(off))
192 pRam = pRam->CTX_SUFF(pLeft);
193 else
194 pRam = pRam->CTX_SUFF(pRight);
195 }
196 return NULL;
197}
198
199
200/**
201 * Slow worker for pgmPhysGetRangeAtOrAbove.
202 *
203 * @copydoc pgmPhysGetRangeAtOrAbove
204 */
205PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
206{
207 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
208
209 PPGMRAMRANGE pLastLeft = NULL;
210 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
211 while (pRam)
212 {
213 RTGCPHYS off = GCPhys - pRam->GCPhys;
214 if (off < pRam->cb)
215 {
216 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
217 return pRam;
218 }
219 if (RTGCPHYS_IS_NEGATIVE(off))
220 {
221 pLastLeft = pRam;
222 pRam = pRam->CTX_SUFF(pLeft);
223 }
224 else
225 pRam = pRam->CTX_SUFF(pRight);
226 }
227 return pLastLeft;
228}
229
230
231/**
232 * Slow worker for pgmPhysGetPage.
233 *
234 * @copydoc pgmPhysGetPage
235 */
236PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
237{
238 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
239
240 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
241 while (pRam)
242 {
243 RTGCPHYS off = GCPhys - pRam->GCPhys;
244 if (off < pRam->cb)
245 {
246 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
247 return &pRam->aPages[off >> PAGE_SHIFT];
248 }
249
250 if (RTGCPHYS_IS_NEGATIVE(off))
251 pRam = pRam->CTX_SUFF(pLeft);
252 else
253 pRam = pRam->CTX_SUFF(pRight);
254 }
255 return NULL;
256}
257
258
259/**
260 * Slow worker for pgmPhysGetPageEx.
261 *
262 * @copydoc pgmPhysGetPageEx
263 */
264int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
265{
266 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
267
268 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
269 while (pRam)
270 {
271 RTGCPHYS off = GCPhys - pRam->GCPhys;
272 if (off < pRam->cb)
273 {
274 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
275 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
276 return VINF_SUCCESS;
277 }
278
279 if (RTGCPHYS_IS_NEGATIVE(off))
280 pRam = pRam->CTX_SUFF(pLeft);
281 else
282 pRam = pRam->CTX_SUFF(pRight);
283 }
284
285 *ppPage = NULL;
286 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
287}
288
289
290/**
291 * Slow worker for pgmPhysGetPageAndRangeEx.
292 *
293 * @copydoc pgmPhysGetPageAndRangeEx
294 */
295int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
296{
297 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
298
299 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
300 while (pRam)
301 {
302 RTGCPHYS off = GCPhys - pRam->GCPhys;
303 if (off < pRam->cb)
304 {
305 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
306 *ppRam = pRam;
307 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
308 return VINF_SUCCESS;
309 }
310
311 if (RTGCPHYS_IS_NEGATIVE(off))
312 pRam = pRam->CTX_SUFF(pLeft);
313 else
314 pRam = pRam->CTX_SUFF(pRight);
315 }
316
317 *ppRam = NULL;
318 *ppPage = NULL;
319 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
320}
321
322
323/**
324 * Checks if Address Gate 20 is enabled or not.
325 *
326 * @returns true if enabled.
327 * @returns false if disabled.
328 * @param pVCpu VMCPU handle.
329 */
330VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
331{
332 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
333 return pVCpu->pgm.s.fA20Enabled;
334}
335
336
337/**
338 * Validates a GC physical address.
339 *
340 * @returns true if valid.
341 * @returns false if invalid.
342 * @param pVM The VM handle.
343 * @param GCPhys The physical address to validate.
344 */
345VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
346{
347 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
348 return pPage != NULL;
349}
350
351
352/**
353 * Checks if a GC physical address is a normal page,
354 * i.e. not ROM, MMIO or reserved.
355 *
356 * @returns true if normal.
357 * @returns false if invalid, ROM, MMIO or reserved page.
358 * @param pVM The VM handle.
359 * @param GCPhys The physical address to check.
360 */
361VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
362{
363 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
364 return pPage
365 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
366}
367
368
369/**
370 * Converts a GC physical address to a HC physical address.
371 *
372 * @returns VINF_SUCCESS on success.
373 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
374 * page but has no physical backing.
375 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
376 * GC physical address.
377 *
378 * @param pVM The VM handle.
379 * @param GCPhys The GC physical address to convert.
380 * @param pHCPhys Where to store the HC physical address on success.
381 */
382VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
383{
384 pgmLock(pVM);
385 PPGMPAGE pPage;
386 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
387 if (RT_SUCCESS(rc))
388 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
389 pgmUnlock(pVM);
390 return rc;
391}
392
393
394/**
395 * Invalidates all page mapping TLBs.
396 *
397 * @param pVM The VM handle.
398 */
399void pgmPhysInvalidatePageMapTLB(PVM pVM)
400{
401 pgmLock(pVM);
402 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushes);
403
404 /* Clear the shared R0/R3 TLB completely. */
405 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
406 {
407 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
408 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
409 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
410 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
411 }
412
413 /** @todo clear the RC TLB whenever we add it. */
414
415 pgmUnlock(pVM);
416}
417
418
419/**
420 * Invalidates a page mapping TLB entry
421 *
422 * @param pVM The VM handle.
423 * @param GCPhys GCPhys entry to flush
424 */
425void pgmPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
426{
427 PGM_LOCK_ASSERT_OWNER(pVM);
428
429 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushEntry);
430
431#ifdef IN_RC
432 unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
433 pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
434 pVM->pgm.s.PhysTlbHC.aEntries[idx].pPage = 0;
435 pVM->pgm.s.PhysTlbHC.aEntries[idx].pMap = 0;
436 pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0;
437#else
438 /* Clear the shared R0/R3 TLB entry. */
439 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
440 pTlbe->GCPhys = NIL_RTGCPHYS;
441 pTlbe->pPage = 0;
442 pTlbe->pMap = 0;
443 pTlbe->pv = 0;
444#endif
445
446 /** @todo clear the RC TLB whenever we add it. */
447}
448
449/**
450 * Makes sure that there is at least one handy page ready for use.
451 *
452 * This will also take the appropriate actions when reaching water-marks.
453 *
454 * @returns VBox status code.
455 * @retval VINF_SUCCESS on success.
456 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
457 *
458 * @param pVM The VM handle.
459 *
460 * @remarks Must be called from within the PGM critical section. It may
461 * nip back to ring-3/0 in some cases.
462 */
463static int pgmPhysEnsureHandyPage(PVM pVM)
464{
465 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
466
467 /*
468 * Do we need to do anything special?
469 */
470#ifdef IN_RING3
471 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
472#else
473 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
474#endif
475 {
476 /*
477 * Allocate pages only if we're out of them, or in ring-3, almost out.
478 */
479#ifdef IN_RING3
480 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
481#else
482 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
483#endif
484 {
485 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
486 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) ));
487#ifdef IN_RING3
488 int rc = PGMR3PhysAllocateHandyPages(pVM);
489#else
490 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
491#endif
492 if (RT_UNLIKELY(rc != VINF_SUCCESS))
493 {
494 if (RT_FAILURE(rc))
495 return rc;
496 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
497 if (!pVM->pgm.s.cHandyPages)
498 {
499 LogRel(("PGM: no more handy pages!\n"));
500 return VERR_EM_NO_MEMORY;
501 }
502 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
503 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY));
504#ifdef IN_RING3
505 REMR3NotifyFF(pVM);
506#else
507 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
508#endif
509 }
510 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
511 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
512 ("%u\n", pVM->pgm.s.cHandyPages),
513 VERR_INTERNAL_ERROR);
514 }
515 else
516 {
517 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
518 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
519#ifndef IN_RING3
520 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
521 {
522 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
523 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
524 }
525#endif
526 }
527 }
528
529 return VINF_SUCCESS;
530}
531
532
533/**
534 * Replace a zero or shared page with new page that we can write to.
535 *
536 * @returns The following VBox status codes.
537 * @retval VINF_SUCCESS on success, pPage is modified.
538 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
539 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
540 *
541 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
542 *
543 * @param pVM The VM address.
544 * @param pPage The physical page tracking structure. This will
545 * be modified on success.
546 * @param GCPhys The address of the page.
547 *
548 * @remarks Must be called from within the PGM critical section. It may
549 * nip back to ring-3/0 in some cases.
550 *
551 * @remarks This function shouldn't really fail, however if it does
552 * it probably means we've screwed up the size of handy pages and/or
553 * the low-water mark. Or, that some device I/O is causing a lot of
554 * pages to be allocated while while the host is in a low-memory
555 * condition. This latter should be handled elsewhere and in a more
556 * controlled manner, it's on the @bugref{3170} todo list...
557 */
558int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
559{
560 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
561
562 /*
563 * Prereqs.
564 */
565 PGM_LOCK_ASSERT_OWNER(pVM);
566 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
567 Assert(!PGM_PAGE_IS_MMIO(pPage));
568
569# ifdef PGM_WITH_LARGE_PAGES
570 /*
571 * Try allocate a large page if applicable.
572 */
573 if ( PGMIsUsingLargePages(pVM)
574 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
575 {
576 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
577 PPGMPAGE pBasePage;
578
579 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
580 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
581 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
582 {
583 rc = pgmPhysAllocLargePage(pVM, GCPhys);
584 if (rc == VINF_SUCCESS)
585 return rc;
586 }
587 /* Mark the base as type page table, so we don't check over and over again. */
588 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
589
590 /* fall back to 4KB pages. */
591 }
592# endif
593
594 /*
595 * Flush any shadow page table mappings of the page.
596 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
597 */
598 bool fFlushTLBs = false;
599 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
600 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
601
602 /*
603 * Ensure that we've got a page handy, take it and use it.
604 */
605 int rc2 = pgmPhysEnsureHandyPage(pVM);
606 if (RT_FAILURE(rc2))
607 {
608 if (fFlushTLBs)
609 PGM_INVL_ALL_VCPU_TLBS(pVM);
610 Assert(rc2 == VERR_EM_NO_MEMORY);
611 return rc2;
612 }
613 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
614 PGM_LOCK_ASSERT_OWNER(pVM);
615 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
616 Assert(!PGM_PAGE_IS_MMIO(pPage));
617
618 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
619 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
620 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
621 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
622 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
623 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
624
625 /*
626 * There are one or two action to be taken the next time we allocate handy pages:
627 * - Tell the GMM (global memory manager) what the page is being used for.
628 * (Speeds up replacement operations - sharing and defragmenting.)
629 * - If the current backing is shared, it must be freed.
630 */
631 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
632 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
633
634 void *pvSharedPage = NULL;
635 if (PGM_PAGE_IS_SHARED(pPage))
636 {
637 /* Mark this shared page for freeing/dereferencing. */
638 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
639 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
640
641 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
642 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
643 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageReplaceShared));
644 pVM->pgm.s.cSharedPages--;
645
646 /* Grab the address of the page so we can make a copy later on. (safe) */
647 rc = pgmPhysPageMap(pVM, pPage, GCPhys, &pvSharedPage);
648 AssertRC(rc);
649 }
650 else
651 {
652 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
653 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRZPageReplaceZero);
654 pVM->pgm.s.cZeroPages--;
655 }
656
657 /*
658 * Do the PGMPAGE modifications.
659 */
660 pVM->pgm.s.cPrivatePages++;
661 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
662 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
663 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
664 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
665 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
666
667 /* Copy the shared page contents to the replacement page. */
668 if (pvSharedPage)
669 {
670 /* Get the virtual address of the new page. */
671 PGMPAGEMAPLOCK PgMpLck;
672 void *pvNewPage;
673 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
674 if (RT_SUCCESS(rc))
675 {
676 memcpy(pvNewPage, pvSharedPage, PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
677 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
678 }
679 }
680
681 if ( fFlushTLBs
682 && rc != VINF_PGM_GCPHYS_ALIASED)
683 PGM_INVL_ALL_VCPU_TLBS(pVM);
684 return rc;
685}
686
687#ifdef PGM_WITH_LARGE_PAGES
688
689/**
690 * Replace a 2 MB range of zero pages with new pages that we can write to.
691 *
692 * @returns The following VBox status codes.
693 * @retval VINF_SUCCESS on success, pPage is modified.
694 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
695 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
696 *
697 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
698 *
699 * @param pVM The VM address.
700 * @param GCPhys The address of the page.
701 *
702 * @remarks Must be called from within the PGM critical section. It may
703 * nip back to ring-3/0 in some cases.
704 */
705int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys)
706{
707 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
708 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
709
710 /*
711 * Prereqs.
712 */
713 PGM_LOCK_ASSERT_OWNER(pVM);
714 Assert(PGMIsUsingLargePages(pVM));
715
716 PPGMPAGE pFirstPage;
717 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
718 if ( RT_SUCCESS(rc)
719 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM)
720 {
721 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
722
723 /* Don't call this function for already allocated pages. */
724 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
725
726 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
727 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
728 {
729 /* Lazy approach: check all pages in the 2 MB range.
730 * The whole range must be ram and unallocated. */
731 GCPhys = GCPhysBase;
732 unsigned iPage;
733 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
734 {
735 PPGMPAGE pSubPage;
736 rc = pgmPhysGetPageEx(pVM, GCPhys, &pSubPage);
737 if ( RT_FAILURE(rc)
738 || PGM_PAGE_GET_TYPE(pSubPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
739 || PGM_PAGE_GET_STATE(pSubPage) != PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
740 {
741 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pSubPage), PGM_PAGE_GET_STATE(pSubPage), rc));
742 break;
743 }
744 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
745 GCPhys += PAGE_SIZE;
746 }
747 if (iPage != _2M/PAGE_SIZE)
748 {
749 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
750 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
751 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
752 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
753 }
754
755 /*
756 * Do the allocation.
757 */
758# ifdef IN_RING3
759 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
760# else
761 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
762# endif
763 if (RT_SUCCESS(rc))
764 {
765 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
766 pVM->pgm.s.cLargePages++;
767 return VINF_SUCCESS;
768 }
769
770 /* If we fail once, it most likely means the host's memory is too
771 fragmented; don't bother trying again. */
772 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
773 PGMSetLargePageUsage(pVM, false);
774 return rc;
775 }
776 }
777 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
778}
779
780
781/**
782 * Recheck the entire 2 MB range to see if we can use it again as a large page.
783 *
784 * @returns The following VBox status codes.
785 * @retval VINF_SUCCESS on success, the large page can be used again
786 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
787 *
788 * @param pVM The VM address.
789 * @param GCPhys The address of the page.
790 * @param pLargePage Page structure of the base page
791 */
792int pgmPhysRecheckLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
793{
794 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
795
796 GCPhys &= X86_PDE2M_PAE_PG_MASK;
797
798 /* Check the base page. */
799 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
800 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
801 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
802 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
803 {
804 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
805 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
806 }
807
808 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
809 /* Check all remaining pages in the 2 MB range. */
810 unsigned i;
811 GCPhys += PAGE_SIZE;
812 for (i = 1; i < _2M/PAGE_SIZE; i++)
813 {
814 PPGMPAGE pPage;
815 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
816 AssertRCBreak(rc);
817
818 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
819 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
820 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
821 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
822 {
823 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
824 break;
825 }
826
827 GCPhys += PAGE_SIZE;
828 }
829 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
830
831 if (i == _2M/PAGE_SIZE)
832 {
833 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
834 pVM->pgm.s.cLargePagesDisabled--;
835 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
836 return VINF_SUCCESS;
837 }
838
839 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
840}
841
842#endif /* PGM_WITH_LARGE_PAGES */
843
844/**
845 * Deal with a write monitored page.
846 *
847 * @returns VBox strict status code.
848 *
849 * @param pVM The VM address.
850 * @param pPage The physical page tracking structure.
851 *
852 * @remarks Called from within the PGM critical section.
853 */
854void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage)
855{
856 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
857 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
858 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
859 Assert(pVM->pgm.s.cMonitoredPages > 0);
860 pVM->pgm.s.cMonitoredPages--;
861 pVM->pgm.s.cWrittenToPages++;
862}
863
864
865/**
866 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
867 *
868 * @returns VBox strict status code.
869 * @retval VINF_SUCCESS on success.
870 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
871 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
872 *
873 * @param pVM The VM address.
874 * @param pPage The physical page tracking structure.
875 * @param GCPhys The address of the page.
876 *
877 * @remarks Called from within the PGM critical section.
878 */
879int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
880{
881 PGM_LOCK_ASSERT_OWNER(pVM);
882 switch (PGM_PAGE_GET_STATE(pPage))
883 {
884 case PGM_PAGE_STATE_WRITE_MONITORED:
885 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
886 /* fall thru */
887 default: /* to shut up GCC */
888 case PGM_PAGE_STATE_ALLOCATED:
889 return VINF_SUCCESS;
890
891 /*
892 * Zero pages can be dummy pages for MMIO or reserved memory,
893 * so we need to check the flags before joining cause with
894 * shared page replacement.
895 */
896 case PGM_PAGE_STATE_ZERO:
897 if (PGM_PAGE_IS_MMIO(pPage))
898 return VERR_PGM_PHYS_PAGE_RESERVED;
899 /* fall thru */
900 case PGM_PAGE_STATE_SHARED:
901 return pgmPhysAllocPage(pVM, pPage, GCPhys);
902
903 /* Not allowed to write to ballooned pages. */
904 case PGM_PAGE_STATE_BALLOONED:
905 return VERR_PGM_PHYS_PAGE_BALLOONED;
906 }
907}
908
909
910/**
911 * Internal usage: Map the page specified by its GMM ID.
912 *
913 * This is similar to pgmPhysPageMap
914 *
915 * @returns VBox status code.
916 *
917 * @param pVM The VM handle.
918 * @param idPage The Page ID.
919 * @param HCPhys The physical address (for RC).
920 * @param ppv Where to store the mapping address.
921 *
922 * @remarks Called from within the PGM critical section. The mapping is only
923 * valid while you are inside this section.
924 */
925int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
926{
927 /*
928 * Validation.
929 */
930 PGM_LOCK_ASSERT_OWNER(pVM);
931 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
932 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
933 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
934
935#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
936 /*
937 * Map it by HCPhys.
938 */
939 return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
940
941#else
942 /*
943 * Find/make Chunk TLB entry for the mapping chunk.
944 */
945 PPGMCHUNKR3MAP pMap;
946 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
947 if (pTlbe->idChunk == idChunk)
948 {
949 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
950 pMap = pTlbe->pChunk;
951 }
952 else
953 {
954 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
955
956 /*
957 * Find the chunk, map it if necessary.
958 */
959 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
960 if (pMap)
961 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
962 else
963 {
964# ifdef IN_RING0
965 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
966 AssertRCReturn(rc, rc);
967 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
968 Assert(pMap);
969# else
970 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
971 if (RT_FAILURE(rc))
972 return rc;
973# endif
974 }
975
976 /*
977 * Enter it into the Chunk TLB.
978 */
979 pTlbe->idChunk = idChunk;
980 pTlbe->pChunk = pMap;
981 }
982
983 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
984 return VINF_SUCCESS;
985#endif
986}
987
988
989/**
990 * Maps a page into the current virtual address space so it can be accessed.
991 *
992 * @returns VBox status code.
993 * @retval VINF_SUCCESS on success.
994 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
995 *
996 * @param pVM The VM address.
997 * @param pPage The physical page tracking structure.
998 * @param GCPhys The address of the page.
999 * @param ppMap Where to store the address of the mapping tracking structure.
1000 * @param ppv Where to store the mapping address of the page. The page
1001 * offset is masked off!
1002 *
1003 * @remarks Called from within the PGM critical section.
1004 */
1005static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1006{
1007 PGM_LOCK_ASSERT_OWNER(pVM);
1008
1009#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1010 /*
1011 * Just some sketchy GC/R0-darwin code.
1012 */
1013 *ppMap = NULL;
1014 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1015 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
1016 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
1017 return VINF_SUCCESS;
1018
1019#else /* IN_RING3 || IN_RING0 */
1020
1021
1022 /*
1023 * Special case: ZERO and MMIO2 pages.
1024 */
1025 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1026 if (idChunk == NIL_GMM_CHUNKID)
1027 {
1028 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
1029 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
1030 {
1031 /* Lookup the MMIO2 range and use pvR3 to calc the address. */
1032 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1033 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2);
1034 *ppv = (void *)((uintptr_t)pRam->pvR3 + (uintptr_t)((GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK) - pRam->GCPhys));
1035 }
1036 else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1037 {
1038 /** @todo deal with aliased MMIO2 pages somehow...
1039 * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
1040 * them, that would also avoid this mess. It would actually be kind of
1041 * elegant... */
1042 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
1043 }
1044 else
1045 {
1046 /** @todo handle MMIO2 */
1047 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
1048 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
1049 ("pPage=%R[pgmpage]\n", pPage),
1050 VERR_INTERNAL_ERROR_2);
1051 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1052 }
1053 *ppMap = NULL;
1054 return VINF_SUCCESS;
1055 }
1056
1057 /*
1058 * Find/make Chunk TLB entry for the mapping chunk.
1059 */
1060 PPGMCHUNKR3MAP pMap;
1061 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1062 if (pTlbe->idChunk == idChunk)
1063 {
1064 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1065 pMap = pTlbe->pChunk;
1066 AssertPtr(pMap->pv);
1067 }
1068 else
1069 {
1070 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1071
1072 /*
1073 * Find the chunk, map it if necessary.
1074 */
1075 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1076 if (pMap)
1077 {
1078 AssertPtr(pMap->pv);
1079 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1080 }
1081 else
1082 {
1083#ifdef IN_RING0
1084 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1085 AssertRCReturn(rc, rc);
1086 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1087 Assert(pMap);
1088#else
1089 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1090 if (RT_FAILURE(rc))
1091 return rc;
1092#endif
1093 AssertPtr(pMap->pv);
1094 }
1095
1096 /*
1097 * Enter it into the Chunk TLB.
1098 */
1099 pTlbe->idChunk = idChunk;
1100 pTlbe->pChunk = pMap;
1101 }
1102
1103 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
1104 *ppMap = pMap;
1105 return VINF_SUCCESS;
1106#endif /* IN_RING3 */
1107}
1108
1109
1110/**
1111 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1112 *
1113 * This is typically used is paths where we cannot use the TLB methods (like ROM
1114 * pages) or where there is no point in using them since we won't get many hits.
1115 *
1116 * @returns VBox strict status code.
1117 * @retval VINF_SUCCESS on success.
1118 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1119 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1120 *
1121 * @param pVM The VM address.
1122 * @param pPage The physical page tracking structure.
1123 * @param GCPhys The address of the page.
1124 * @param ppv Where to store the mapping address of the page. The page
1125 * offset is masked off!
1126 *
1127 * @remarks Called from within the PGM critical section. The mapping is only
1128 * valid while you are inside section.
1129 */
1130int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1131{
1132 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1133 if (RT_SUCCESS(rc))
1134 {
1135 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1136 PPGMPAGEMAP pMapIgnore;
1137 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1138 if (RT_FAILURE(rc2)) /* preserve rc */
1139 rc = rc2;
1140 }
1141 return rc;
1142}
1143
1144
1145/**
1146 * Maps a page into the current virtual address space so it can be accessed for
1147 * both writing and reading.
1148 *
1149 * This is typically used is paths where we cannot use the TLB methods (like ROM
1150 * pages) or where there is no point in using them since we won't get many hits.
1151 *
1152 * @returns VBox status code.
1153 * @retval VINF_SUCCESS on success.
1154 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1155 *
1156 * @param pVM The VM address.
1157 * @param pPage The physical page tracking structure. Must be in the
1158 * allocated state.
1159 * @param GCPhys The address of the page.
1160 * @param ppv Where to store the mapping address of the page. The page
1161 * offset is masked off!
1162 *
1163 * @remarks Called from within the PGM critical section. The mapping is only
1164 * valid while you are inside section.
1165 */
1166int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1167{
1168 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1169 PPGMPAGEMAP pMapIgnore;
1170 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1171}
1172
1173
1174/**
1175 * Maps a page into the current virtual address space so it can be accessed for
1176 * reading.
1177 *
1178 * This is typically used is paths where we cannot use the TLB methods (like ROM
1179 * pages) or where there is no point in using them since we won't get many hits.
1180 *
1181 * @returns VBox status code.
1182 * @retval VINF_SUCCESS on success.
1183 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1184 *
1185 * @param pVM The VM address.
1186 * @param pPage The physical page tracking structure.
1187 * @param GCPhys The address of the page.
1188 * @param ppv Where to store the mapping address of the page. The page
1189 * offset is masked off!
1190 *
1191 * @remarks Called from within the PGM critical section. The mapping is only
1192 * valid while you are inside this section.
1193 */
1194int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1195{
1196 PPGMPAGEMAP pMapIgnore;
1197 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1198}
1199
1200#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1201
1202/**
1203 * Load a guest page into the ring-3 physical TLB.
1204 *
1205 * @returns VBox status code.
1206 * @retval VINF_SUCCESS on success
1207 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1208 * @param pPGM The PGM instance pointer.
1209 * @param GCPhys The guest physical address in question.
1210 */
1211int pgmPhysPageLoadIntoTlb(PVM pVM, RTGCPHYS GCPhys)
1212{
1213 PGM_LOCK_ASSERT_OWNER(pVM);
1214
1215 /*
1216 * Find the ram range and page and hand it over to the with-page function.
1217 * 99.8% of requests are expected to be in the first range.
1218 */
1219 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1220 if (!pPage)
1221 {
1222 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1223 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1224 }
1225
1226 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1227}
1228
1229
1230/**
1231 * Load a guest page into the ring-3 physical TLB.
1232 *
1233 * @returns VBox status code.
1234 * @retval VINF_SUCCESS on success
1235 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1236 *
1237 * @param pVM The VM handle.
1238 * @param pPage Pointer to the PGMPAGE structure corresponding to
1239 * GCPhys.
1240 * @param GCPhys The guest physical address in question.
1241 */
1242int pgmPhysPageLoadIntoTlbWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1243{
1244 PGM_LOCK_ASSERT_OWNER(pVM);
1245 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1246
1247 /*
1248 * Map the page.
1249 * Make a special case for the zero page as it is kind of special.
1250 */
1251 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1252 if ( !PGM_PAGE_IS_ZERO(pPage)
1253 && !PGM_PAGE_IS_BALLOONED(pPage))
1254 {
1255 void *pv;
1256 PPGMPAGEMAP pMap;
1257 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1258 if (RT_FAILURE(rc))
1259 return rc;
1260 pTlbe->pMap = pMap;
1261 pTlbe->pv = pv;
1262 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1263 }
1264 else
1265 {
1266 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1267 pTlbe->pMap = NULL;
1268 pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1269 }
1270#ifdef PGM_WITH_PHYS_TLB
1271 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1272 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1273 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1274 else
1275 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1276#else
1277 pTlbe->GCPhys = NIL_RTGCPHYS;
1278#endif
1279 pTlbe->pPage = pPage;
1280 return VINF_SUCCESS;
1281}
1282
1283#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1284
1285/**
1286 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1287 * own the PGM lock and therefore not need to lock the mapped page.
1288 *
1289 * @returns VBox status code.
1290 * @retval VINF_SUCCESS on success.
1291 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1292 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1293 *
1294 * @param pVM The VM handle.
1295 * @param GCPhys The guest physical address of the page that should be mapped.
1296 * @param pPage Pointer to the PGMPAGE structure for the page.
1297 * @param ppv Where to store the address corresponding to GCPhys.
1298 *
1299 * @internal
1300 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1301 */
1302int pgmPhysGCPhys2CCPtrInternalDepr(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1303{
1304 int rc;
1305 AssertReturn(pPage, VERR_INTERNAL_ERROR);
1306 PGM_LOCK_ASSERT_OWNER(pVM);
1307 pVM->pgm.s.cDeprecatedPageLocks++;
1308
1309 /*
1310 * Make sure the page is writable.
1311 */
1312 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1313 {
1314 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1315 if (RT_FAILURE(rc))
1316 return rc;
1317 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1318 }
1319 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1320
1321 /*
1322 * Get the mapping address.
1323 */
1324#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1325 void *pv;
1326 rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
1327 PGM_PAGE_GET_HCPHYS(pPage),
1328 &pv
1329 RTLOG_COMMA_SRC_POS);
1330 if (RT_FAILURE(rc))
1331 return rc;
1332 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1333#else
1334 PPGMPAGEMAPTLBE pTlbe;
1335 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1336 if (RT_FAILURE(rc))
1337 return rc;
1338 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1339#endif
1340 return VINF_SUCCESS;
1341}
1342
1343#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1344
1345/**
1346 * Locks a page mapping for writing.
1347 *
1348 * @param pVM The VM handle.
1349 * @param pPage The page.
1350 * @param pTlbe The mapping TLB entry for the page.
1351 * @param pLock The lock structure (output).
1352 */
1353DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1354{
1355 PPGMPAGEMAP pMap = pTlbe->pMap;
1356 if (pMap)
1357 pMap->cRefs++;
1358
1359 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1360 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1361 {
1362 if (cLocks == 0)
1363 pVM->pgm.s.cWriteLockedPages++;
1364 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1365 }
1366 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1367 {
1368 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1369 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1370 if (pMap)
1371 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1372 }
1373
1374 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1375 pLock->pvMap = pMap;
1376}
1377
1378/**
1379 * Locks a page mapping for reading.
1380 *
1381 * @param pVM The VM handle.
1382 * @param pPage The page.
1383 * @param pTlbe The mapping TLB entry for the page.
1384 * @param pLock The lock structure (output).
1385 */
1386DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1387{
1388 PPGMPAGEMAP pMap = pTlbe->pMap;
1389 if (pMap)
1390 pMap->cRefs++;
1391
1392 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1393 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1394 {
1395 if (cLocks == 0)
1396 pVM->pgm.s.cReadLockedPages++;
1397 PGM_PAGE_INC_READ_LOCKS(pPage);
1398 }
1399 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1400 {
1401 PGM_PAGE_INC_READ_LOCKS(pPage);
1402 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1403 if (pMap)
1404 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1405 }
1406
1407 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1408 pLock->pvMap = pMap;
1409}
1410
1411#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1412
1413
1414/**
1415 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1416 * own the PGM lock and have access to the page structure.
1417 *
1418 * @returns VBox status code.
1419 * @retval VINF_SUCCESS on success.
1420 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1421 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1422 *
1423 * @param pVM The VM handle.
1424 * @param GCPhys The guest physical address of the page that should be mapped.
1425 * @param pPage Pointer to the PGMPAGE structure for the page.
1426 * @param ppv Where to store the address corresponding to GCPhys.
1427 * @param pLock Where to store the lock information that
1428 * pgmPhysReleaseInternalPageMappingLock needs.
1429 *
1430 * @internal
1431 */
1432int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1433{
1434 int rc;
1435 AssertReturn(pPage, VERR_INTERNAL_ERROR);
1436 PGM_LOCK_ASSERT_OWNER(pVM);
1437
1438 /*
1439 * Make sure the page is writable.
1440 */
1441 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1442 {
1443 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1444 if (RT_FAILURE(rc))
1445 return rc;
1446 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1447 }
1448 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1449
1450 /*
1451 * Do the job.
1452 */
1453#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1454 void *pv;
1455 PVMCPU pVCpu = VMMGetCpu(pVM);
1456 rc = pgmRZDynMapHCPageInlined(pVCpu,
1457 PGM_PAGE_GET_HCPHYS(pPage),
1458 &pv
1459 RTLOG_COMMA_SRC_POS);
1460 if (RT_FAILURE(rc))
1461 return rc;
1462 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1463 pLock->pvPage = pv;
1464 pLock->pVCpu = pVCpu;
1465
1466#else
1467 PPGMPAGEMAPTLBE pTlbe;
1468 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1469 if (RT_FAILURE(rc))
1470 return rc;
1471 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1472 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1473#endif
1474 return VINF_SUCCESS;
1475}
1476
1477
1478/**
1479 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1480 * own the PGM lock and have access to the page structure.
1481 *
1482 * @returns VBox status code.
1483 * @retval VINF_SUCCESS on success.
1484 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1485 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1486 *
1487 * @param pVM The VM handle.
1488 * @param GCPhys The guest physical address of the page that should be mapped.
1489 * @param pPage Pointer to the PGMPAGE structure for the page.
1490 * @param ppv Where to store the address corresponding to GCPhys.
1491 * @param pLock Where to store the lock information that
1492 * pgmPhysReleaseInternalPageMappingLock needs.
1493 *
1494 * @internal
1495 */
1496int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1497{
1498 AssertReturn(pPage, VERR_INTERNAL_ERROR);
1499 PGM_LOCK_ASSERT_OWNER(pVM);
1500 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1501
1502 /*
1503 * Do the job.
1504 */
1505#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1506 void *pv;
1507 PVMCPU pVCpu = VMMGetCpu(pVM);
1508 int rc = pgmRZDynMapHCPageInlined(pVCpu,
1509 PGM_PAGE_GET_HCPHYS(pPage),
1510 &pv
1511 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1512 if (RT_FAILURE(rc))
1513 return rc;
1514 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1515 pLock->pvPage = pv;
1516 pLock->pVCpu = pVCpu;
1517
1518#else
1519 PPGMPAGEMAPTLBE pTlbe;
1520 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1521 if (RT_FAILURE(rc))
1522 return rc;
1523 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1524 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1525#endif
1526 return VINF_SUCCESS;
1527}
1528
1529
1530/**
1531 * Requests the mapping of a guest page into the current context.
1532 *
1533 * This API should only be used for very short term, as it will consume scarse
1534 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1535 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1536 *
1537 * This API will assume your intention is to write to the page, and will
1538 * therefore replace shared and zero pages. If you do not intend to modify
1539 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1540 *
1541 * @returns VBox status code.
1542 * @retval VINF_SUCCESS on success.
1543 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1544 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1545 *
1546 * @param pVM The VM handle.
1547 * @param GCPhys The guest physical address of the page that should be
1548 * mapped.
1549 * @param ppv Where to store the address corresponding to GCPhys.
1550 * @param pLock Where to store the lock information that
1551 * PGMPhysReleasePageMappingLock needs.
1552 *
1553 * @remarks The caller is responsible for dealing with access handlers.
1554 * @todo Add an informational return code for pages with access handlers?
1555 *
1556 * @remark Avoid calling this API from within critical sections (other than
1557 * the PGM one) because of the deadlock risk. External threads may
1558 * need to delegate jobs to the EMTs.
1559 * @remarks Only one page is mapped! Make no assumption about what's after or
1560 * before the returned page!
1561 * @thread Any thread.
1562 */
1563VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1564{
1565 int rc = pgmLock(pVM);
1566 AssertRCReturn(rc, rc);
1567
1568#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1569 /*
1570 * Find the page and make sure it's writable.
1571 */
1572 PPGMPAGE pPage;
1573 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1574 if (RT_SUCCESS(rc))
1575 {
1576 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1577 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1578 if (RT_SUCCESS(rc))
1579 {
1580 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1581
1582 PVMCPU pVCpu = VMMGetCpu(pVM);
1583 void *pv;
1584 rc = pgmRZDynMapHCPageInlined(pVCpu,
1585 PGM_PAGE_GET_HCPHYS(pPage),
1586 &pv
1587 RTLOG_COMMA_SRC_POS);
1588 if (RT_SUCCESS(rc))
1589 {
1590 AssertRCSuccess(rc);
1591
1592 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1593 *ppv = pv;
1594 pLock->pvPage = pv;
1595 pLock->pVCpu = pVCpu;
1596 }
1597 }
1598 }
1599
1600#else /* IN_RING3 || IN_RING0 */
1601 /*
1602 * Query the Physical TLB entry for the page (may fail).
1603 */
1604 PPGMPAGEMAPTLBE pTlbe;
1605 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1606 if (RT_SUCCESS(rc))
1607 {
1608 /*
1609 * If the page is shared, the zero page, or being write monitored
1610 * it must be converted to a page that's writable if possible.
1611 */
1612 PPGMPAGE pPage = pTlbe->pPage;
1613 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1614 {
1615 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1616 if (RT_SUCCESS(rc))
1617 {
1618 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1619 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1620 }
1621 }
1622 if (RT_SUCCESS(rc))
1623 {
1624 /*
1625 * Now, just perform the locking and calculate the return address.
1626 */
1627 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1628 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1629 }
1630 }
1631
1632#endif /* IN_RING3 || IN_RING0 */
1633 pgmUnlock(pVM);
1634 return rc;
1635}
1636
1637
1638/**
1639 * Requests the mapping of a guest page into the current context.
1640 *
1641 * This API should only be used for very short term, as it will consume scarse
1642 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1643 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1644 *
1645 * @returns VBox status code.
1646 * @retval VINF_SUCCESS on success.
1647 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1648 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1649 *
1650 * @param pVM The VM handle.
1651 * @param GCPhys The guest physical address of the page that should be
1652 * mapped.
1653 * @param ppv Where to store the address corresponding to GCPhys.
1654 * @param pLock Where to store the lock information that
1655 * PGMPhysReleasePageMappingLock needs.
1656 *
1657 * @remarks The caller is responsible for dealing with access handlers.
1658 * @todo Add an informational return code for pages with access handlers?
1659 *
1660 * @remarks Avoid calling this API from within critical sections (other than
1661 * the PGM one) because of the deadlock risk.
1662 * @remarks Only one page is mapped! Make no assumption about what's after or
1663 * before the returned page!
1664 * @thread Any thread.
1665 */
1666VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1667{
1668 int rc = pgmLock(pVM);
1669 AssertRCReturn(rc, rc);
1670
1671#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1672 /*
1673 * Find the page and make sure it's readable.
1674 */
1675 PPGMPAGE pPage;
1676 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1677 if (RT_SUCCESS(rc))
1678 {
1679 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1680 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1681 else
1682 {
1683 PVMCPU pVCpu = VMMGetCpu(pVM);
1684 void *pv;
1685 rc = pgmRZDynMapHCPageInlined(pVCpu,
1686 PGM_PAGE_GET_HCPHYS(pPage),
1687 &pv
1688 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1689 if (RT_SUCCESS(rc))
1690 {
1691 AssertRCSuccess(rc);
1692
1693 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1694 *ppv = pv;
1695 pLock->pvPage = pv;
1696 pLock->pVCpu = pVCpu;
1697 }
1698 }
1699 }
1700
1701#else /* IN_RING3 || IN_RING0 */
1702 /*
1703 * Query the Physical TLB entry for the page (may fail).
1704 */
1705 PPGMPAGEMAPTLBE pTlbe;
1706 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1707 if (RT_SUCCESS(rc))
1708 {
1709 /* MMIO pages doesn't have any readable backing. */
1710 PPGMPAGE pPage = pTlbe->pPage;
1711 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1712 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1713 else
1714 {
1715 /*
1716 * Now, just perform the locking and calculate the return address.
1717 */
1718 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1719 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1720 }
1721 }
1722
1723#endif /* IN_RING3 || IN_RING0 */
1724 pgmUnlock(pVM);
1725 return rc;
1726}
1727
1728
1729/**
1730 * Requests the mapping of a guest page given by virtual address into the current context.
1731 *
1732 * This API should only be used for very short term, as it will consume
1733 * scarse resources (R0 and GC) in the mapping cache. When you're done
1734 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1735 *
1736 * This API will assume your intention is to write to the page, and will
1737 * therefore replace shared and zero pages. If you do not intend to modify
1738 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1739 *
1740 * @returns VBox status code.
1741 * @retval VINF_SUCCESS on success.
1742 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1743 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1744 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1745 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1746 *
1747 * @param pVCpu VMCPU handle.
1748 * @param GCPhys The guest physical address of the page that should be mapped.
1749 * @param ppv Where to store the address corresponding to GCPhys.
1750 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1751 *
1752 * @remark Avoid calling this API from within critical sections (other than
1753 * the PGM one) because of the deadlock risk.
1754 * @thread EMT
1755 */
1756VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1757{
1758 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1759 RTGCPHYS GCPhys;
1760 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1761 if (RT_SUCCESS(rc))
1762 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1763 return rc;
1764}
1765
1766
1767/**
1768 * Requests the mapping of a guest page given by virtual address into the current context.
1769 *
1770 * This API should only be used for very short term, as it will consume
1771 * scarse resources (R0 and GC) in the mapping cache. When you're done
1772 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1773 *
1774 * @returns VBox status code.
1775 * @retval VINF_SUCCESS on success.
1776 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1777 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1778 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1779 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1780 *
1781 * @param pVCpu VMCPU handle.
1782 * @param GCPhys The guest physical address of the page that should be mapped.
1783 * @param ppv Where to store the address corresponding to GCPhys.
1784 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1785 *
1786 * @remark Avoid calling this API from within critical sections (other than
1787 * the PGM one) because of the deadlock risk.
1788 * @thread EMT
1789 */
1790VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1791{
1792 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1793 RTGCPHYS GCPhys;
1794 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1795 if (RT_SUCCESS(rc))
1796 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1797 return rc;
1798}
1799
1800
1801/**
1802 * Release the mapping of a guest page.
1803 *
1804 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1805 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1806 *
1807 * @param pVM The VM handle.
1808 * @param pLock The lock structure initialized by the mapping function.
1809 */
1810VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1811{
1812#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1813 Assert(pLock->pvPage != NULL);
1814 Assert(pLock->pVCpu == VMMGetCpu(pVM));
1815 PGM_DYNMAP_UNUSED_HINT(pLock->pVCpu, pLock->pvPage);
1816 pLock->pVCpu = NULL;
1817 pLock->pvPage = NULL;
1818
1819#else
1820 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1821 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1822 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1823
1824 pLock->uPageAndType = 0;
1825 pLock->pvMap = NULL;
1826
1827 pgmLock(pVM);
1828 if (fWriteLock)
1829 {
1830 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1831 Assert(cLocks > 0);
1832 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1833 {
1834 if (cLocks == 1)
1835 {
1836 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1837 pVM->pgm.s.cWriteLockedPages--;
1838 }
1839 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1840 }
1841
1842 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1843 {
1844 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1845 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1846 Assert(pVM->pgm.s.cMonitoredPages > 0);
1847 pVM->pgm.s.cMonitoredPages--;
1848 pVM->pgm.s.cWrittenToPages++;
1849 }
1850 }
1851 else
1852 {
1853 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1854 Assert(cLocks > 0);
1855 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1856 {
1857 if (cLocks == 1)
1858 {
1859 Assert(pVM->pgm.s.cReadLockedPages > 0);
1860 pVM->pgm.s.cReadLockedPages--;
1861 }
1862 PGM_PAGE_DEC_READ_LOCKS(pPage);
1863 }
1864 }
1865
1866 if (pMap)
1867 {
1868 Assert(pMap->cRefs >= 1);
1869 pMap->cRefs--;
1870 }
1871 pgmUnlock(pVM);
1872#endif /* IN_RING3 */
1873}
1874
1875
1876/**
1877 * Release the internal mapping of a guest page.
1878 *
1879 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
1880 * pgmPhysGCPhys2CCPtrInternalReadOnly.
1881 *
1882 * @param pVM The VM handle.
1883 * @param pLock The lock structure initialized by the mapping function.
1884 *
1885 * @remarks Caller must hold the PGM lock.
1886 */
1887void pgmPhysReleaseInternalPageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1888{
1889 PGM_LOCK_ASSERT_OWNER(pVM);
1890 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
1891}
1892
1893
1894/**
1895 * Converts a GC physical address to a HC ring-3 pointer.
1896 *
1897 * @returns VINF_SUCCESS on success.
1898 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1899 * page but has no physical backing.
1900 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1901 * GC physical address.
1902 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1903 * a dynamic ram chunk boundary
1904 *
1905 * @param pVM The VM handle.
1906 * @param GCPhys The GC physical address to convert.
1907 * @param pR3Ptr Where to store the R3 pointer on success.
1908 *
1909 * @deprecated Avoid when possible!
1910 */
1911int pgmPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1912{
1913/** @todo this is kind of hacky and needs some more work. */
1914#ifndef DEBUG_sandervl
1915 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1916#endif
1917
1918 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
1919#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1920 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1921#else
1922 pgmLock(pVM);
1923
1924 PPGMRAMRANGE pRam;
1925 PPGMPAGE pPage;
1926 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
1927 if (RT_SUCCESS(rc))
1928 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
1929
1930 pgmUnlock(pVM);
1931 Assert(rc <= VINF_SUCCESS);
1932 return rc;
1933#endif
1934}
1935
1936#if 0 /*defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)*/
1937
1938/**
1939 * Maps and locks a guest CR3 or PD (PAE) page.
1940 *
1941 * @returns VINF_SUCCESS on success.
1942 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1943 * page but has no physical backing.
1944 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1945 * GC physical address.
1946 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1947 * a dynamic ram chunk boundary
1948 *
1949 * @param pVM The VM handle.
1950 * @param GCPhys The GC physical address to convert.
1951 * @param pR3Ptr Where to store the R3 pointer on success. This may or
1952 * may not be valid in ring-0 depending on the
1953 * VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 build option.
1954 *
1955 * @remarks The caller must own the PGM lock.
1956 */
1957int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1958{
1959
1960 PPGMRAMRANGE pRam;
1961 PPGMPAGE pPage;
1962 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
1963 if (RT_SUCCESS(rc))
1964 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
1965 Assert(rc <= VINF_SUCCESS);
1966 return rc;
1967}
1968
1969
1970int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1971{
1972
1973}
1974
1975#endif
1976
1977/**
1978 * Converts a guest pointer to a GC physical address.
1979 *
1980 * This uses the current CR3/CR0/CR4 of the guest.
1981 *
1982 * @returns VBox status code.
1983 * @param pVCpu The VMCPU Handle
1984 * @param GCPtr The guest pointer to convert.
1985 * @param pGCPhys Where to store the GC physical address.
1986 */
1987VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
1988{
1989 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
1990 if (pGCPhys && RT_SUCCESS(rc))
1991 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1992 return rc;
1993}
1994
1995
1996/**
1997 * Converts a guest pointer to a HC physical address.
1998 *
1999 * This uses the current CR3/CR0/CR4 of the guest.
2000 *
2001 * @returns VBox status code.
2002 * @param pVCpu The VMCPU Handle
2003 * @param GCPtr The guest pointer to convert.
2004 * @param pHCPhys Where to store the HC physical address.
2005 */
2006VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2007{
2008 PVM pVM = pVCpu->CTX_SUFF(pVM);
2009 RTGCPHYS GCPhys;
2010 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
2011 if (RT_SUCCESS(rc))
2012 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
2013 return rc;
2014}
2015
2016
2017
2018#undef LOG_GROUP
2019#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2020
2021
2022#ifdef IN_RING3
2023/**
2024 * Cache PGMPhys memory access
2025 *
2026 * @param pVM VM Handle.
2027 * @param pCache Cache structure pointer
2028 * @param GCPhys GC physical address
2029 * @param pbHC HC pointer corresponding to physical page
2030 *
2031 * @thread EMT.
2032 */
2033static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2034{
2035 uint32_t iCacheIndex;
2036
2037 Assert(VM_IS_EMT(pVM));
2038
2039 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
2040 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
2041
2042 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2043
2044 ASMBitSet(&pCache->aEntries, iCacheIndex);
2045
2046 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2047 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2048}
2049#endif /* IN_RING3 */
2050
2051
2052/**
2053 * Deals with reading from a page with one or more ALL access handlers.
2054 *
2055 * @returns VBox status code. Can be ignored in ring-3.
2056 * @retval VINF_SUCCESS.
2057 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2058 *
2059 * @param pVM The VM handle.
2060 * @param pPage The page descriptor.
2061 * @param GCPhys The physical address to start reading at.
2062 * @param pvBuf Where to put the bits we read.
2063 * @param cb How much to read - less or equal to a page.
2064 */
2065static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
2066{
2067 /*
2068 * The most frequent access here is MMIO and shadowed ROM.
2069 * The current code ASSUMES all these access handlers covers full pages!
2070 */
2071
2072 /*
2073 * Whatever we do we need the source page, map it first.
2074 */
2075 PGMPAGEMAPLOCK PgMpLck;
2076 const void *pvSrc = NULL;
2077 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2078 if (RT_FAILURE(rc))
2079 {
2080 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2081 GCPhys, pPage, rc));
2082 memset(pvBuf, 0xff, cb);
2083 return VINF_SUCCESS;
2084 }
2085 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2086
2087 /*
2088 * Deal with any physical handlers.
2089 */
2090#ifdef IN_RING3
2091 PPGMPHYSHANDLER pPhys = NULL;
2092#endif
2093 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL)
2094 {
2095#ifdef IN_RING3
2096 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2097 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2098 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
2099 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
2100 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2101 Assert(pPhys->CTX_SUFF(pfnHandler));
2102
2103 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2104 void *pvUser = pPhys->CTX_SUFF(pvUser);
2105
2106 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
2107 STAM_PROFILE_START(&pPhys->Stat, h);
2108 PGM_LOCK_ASSERT_OWNER(pVM);
2109 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2110 pgmUnlock(pVM);
2111 rc = pfnHandler(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pvUser);
2112 pgmLock(pVM);
2113# ifdef VBOX_WITH_STATISTICS
2114 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2115 if (pPhys)
2116 STAM_PROFILE_STOP(&pPhys->Stat, h);
2117# else
2118 pPhys = NULL; /* might not be valid anymore. */
2119# endif
2120 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
2121#else
2122 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2123 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2124 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2125 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2126#endif
2127 }
2128
2129 /*
2130 * Deal with any virtual handlers.
2131 */
2132 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
2133 {
2134 unsigned iPage;
2135 PPGMVIRTHANDLER pVirt;
2136
2137 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
2138 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
2139 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
2140 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2141 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
2142
2143#ifdef IN_RING3
2144 if (pVirt->pfnHandlerR3)
2145 {
2146 if (!pPhys)
2147 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2148 else
2149 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
2150 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2151 + (iPage << PAGE_SHIFT)
2152 + (GCPhys & PAGE_OFFSET_MASK);
2153
2154 STAM_PROFILE_START(&pVirt->Stat, h);
2155 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
2156 STAM_PROFILE_STOP(&pVirt->Stat, h);
2157 if (rc2 == VINF_SUCCESS)
2158 rc = VINF_SUCCESS;
2159 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
2160 }
2161 else
2162 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2163#else
2164 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2165 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2166 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2167 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2168#endif
2169 }
2170
2171 /*
2172 * Take the default action.
2173 */
2174 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2175 memcpy(pvBuf, pvSrc, cb);
2176 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2177 return rc;
2178}
2179
2180
2181/**
2182 * Read physical memory.
2183 *
2184 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2185 * want to ignore those.
2186 *
2187 * @returns VBox status code. Can be ignored in ring-3.
2188 * @retval VINF_SUCCESS.
2189 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2190 *
2191 * @param pVM VM Handle.
2192 * @param GCPhys Physical address start reading from.
2193 * @param pvBuf Where to put the read bits.
2194 * @param cbRead How many bytes to read.
2195 */
2196VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
2197{
2198 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2199 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2200
2201 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysRead));
2202 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2203
2204 pgmLock(pVM);
2205
2206 /*
2207 * Copy loop on ram ranges.
2208 */
2209 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2210 for (;;)
2211 {
2212 /* Inside range or not? */
2213 if (pRam && GCPhys >= pRam->GCPhys)
2214 {
2215 /*
2216 * Must work our way thru this page by page.
2217 */
2218 RTGCPHYS off = GCPhys - pRam->GCPhys;
2219 while (off < pRam->cb)
2220 {
2221 unsigned iPage = off >> PAGE_SHIFT;
2222 PPGMPAGE pPage = &pRam->aPages[iPage];
2223 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2224 if (cb > cbRead)
2225 cb = cbRead;
2226
2227 /*
2228 * Any ALL access handlers?
2229 */
2230 if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)))
2231 {
2232 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2233 if (RT_FAILURE(rc))
2234 {
2235 pgmUnlock(pVM);
2236 return rc;
2237 }
2238 }
2239 else
2240 {
2241 /*
2242 * Get the pointer to the page.
2243 */
2244 PGMPAGEMAPLOCK PgMpLck;
2245 const void *pvSrc;
2246 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2247 if (RT_SUCCESS(rc))
2248 {
2249 memcpy(pvBuf, pvSrc, cb);
2250 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2251 }
2252 else
2253 {
2254 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2255 pRam->GCPhys + off, pPage, rc));
2256 memset(pvBuf, 0xff, cb);
2257 }
2258 }
2259
2260 /* next page */
2261 if (cb >= cbRead)
2262 {
2263 pgmUnlock(pVM);
2264 return VINF_SUCCESS;
2265 }
2266 cbRead -= cb;
2267 off += cb;
2268 pvBuf = (char *)pvBuf + cb;
2269 } /* walk pages in ram range. */
2270
2271 GCPhys = pRam->GCPhysLast + 1;
2272 }
2273 else
2274 {
2275 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2276
2277 /*
2278 * Unassigned address space.
2279 */
2280 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2281 if (cb >= cbRead)
2282 {
2283 memset(pvBuf, 0xff, cbRead);
2284 break;
2285 }
2286 memset(pvBuf, 0xff, cb);
2287
2288 cbRead -= cb;
2289 pvBuf = (char *)pvBuf + cb;
2290 GCPhys += cb;
2291 }
2292
2293 /* Advance range if necessary. */
2294 while (pRam && GCPhys > pRam->GCPhysLast)
2295 pRam = pRam->CTX_SUFF(pNext);
2296 } /* Ram range walk */
2297
2298 pgmUnlock(pVM);
2299 return VINF_SUCCESS;
2300}
2301
2302
2303/**
2304 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2305 *
2306 * @returns VBox status code. Can be ignored in ring-3.
2307 * @retval VINF_SUCCESS.
2308 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2309 *
2310 * @param pVM The VM handle.
2311 * @param pPage The page descriptor.
2312 * @param GCPhys The physical address to start writing at.
2313 * @param pvBuf What to write.
2314 * @param cbWrite How much to write - less or equal to a page.
2315 */
2316static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
2317{
2318 PGMPAGEMAPLOCK PgMpLck;
2319 void *pvDst = NULL;
2320 int rc;
2321
2322 /*
2323 * Give priority to physical handlers (like #PF does).
2324 *
2325 * Hope for a lonely physical handler first that covers the whole
2326 * write area. This should be a pretty frequent case with MMIO and
2327 * the heavy usage of full page handlers in the page pool.
2328 */
2329 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
2330 || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */)
2331 {
2332 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2333 if (pCur)
2334 {
2335 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2336 Assert(pCur->CTX_SUFF(pfnHandler));
2337
2338 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2339 if (cbRange > cbWrite)
2340 cbRange = cbWrite;
2341
2342#ifndef IN_RING3
2343 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2344 NOREF(cbRange);
2345 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2346 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2347
2348#else /* IN_RING3 */
2349 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2350 if (!PGM_PAGE_IS_MMIO(pPage))
2351 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2352 else
2353 rc = VINF_SUCCESS;
2354 if (RT_SUCCESS(rc))
2355 {
2356 PFNPGMR3PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
2357 void *pvUser = pCur->CTX_SUFF(pvUser);
2358
2359 STAM_PROFILE_START(&pCur->Stat, h);
2360 PGM_LOCK_ASSERT_OWNER(pVM);
2361 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2362 pgmUnlock(pVM);
2363 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2364 pgmLock(pVM);
2365# ifdef VBOX_WITH_STATISTICS
2366 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2367 if (pCur)
2368 STAM_PROFILE_STOP(&pCur->Stat, h);
2369# else
2370 pCur = NULL; /* might not be valid anymore. */
2371# endif
2372 if (rc == VINF_PGM_HANDLER_DO_DEFAULT && pvDst)
2373 {
2374 if (pvDst)
2375 memcpy(pvDst, pvBuf, cbRange);
2376 }
2377 else
2378 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
2379 }
2380 else
2381 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2382 GCPhys, pPage, rc), rc);
2383 if (RT_LIKELY(cbRange == cbWrite))
2384 {
2385 if (pvBuf)
2386 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2387 return VINF_SUCCESS;
2388 }
2389
2390 /* more fun to be had below */
2391 cbWrite -= cbRange;
2392 GCPhys += cbRange;
2393 pvBuf = (uint8_t *)pvBuf + cbRange;
2394 pvDst = (uint8_t *)pvDst + cbRange;
2395#endif /* IN_RING3 */
2396 }
2397 /* else: the handler is somewhere else in the page, deal with it below. */
2398 Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2399 }
2400 /*
2401 * A virtual handler without any interfering physical handlers.
2402 * Hopefully it'll convert the whole write.
2403 */
2404 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2405 {
2406 unsigned iPage;
2407 PPGMVIRTHANDLER pCur;
2408 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
2409 if (RT_SUCCESS(rc))
2410 {
2411 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
2412 if (cbRange > cbWrite)
2413 cbRange = cbWrite;
2414
2415#ifndef IN_RING3
2416 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2417 NOREF(cbRange);
2418 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2419 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2420
2421#else /* IN_RING3 */
2422
2423 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2424 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2425 if (RT_SUCCESS(rc))
2426 {
2427 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2428 if (pCur->pfnHandlerR3)
2429 {
2430 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
2431 + (iPage << PAGE_SHIFT)
2432 + (GCPhys & PAGE_OFFSET_MASK);
2433
2434 STAM_PROFILE_START(&pCur->Stat, h);
2435 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2436 STAM_PROFILE_STOP(&pCur->Stat, h);
2437 }
2438 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2439 memcpy(pvDst, pvBuf, cbRange);
2440 else
2441 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
2442 }
2443 else
2444 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2445 GCPhys, pPage, rc), rc);
2446 if (RT_LIKELY(cbRange == cbWrite))
2447 {
2448 if (pvBuf)
2449 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2450 return VINF_SUCCESS;
2451 }
2452
2453 /* more fun to be had below */
2454 cbWrite -= cbRange;
2455 GCPhys += cbRange;
2456 pvBuf = (uint8_t *)pvBuf + cbRange;
2457 pvDst = (uint8_t *)pvDst + cbRange;
2458#endif
2459 }
2460 /* else: the handler is somewhere else in the page, deal with it below. */
2461 }
2462
2463 /*
2464 * Deal with all the odd ends.
2465 */
2466
2467 /* We need a writable destination page. */
2468 if (!pvDst)
2469 {
2470 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2471 AssertLogRelMsgReturn(RT_SUCCESS(rc),
2472 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2473 GCPhys, pPage, rc), rc);
2474 }
2475
2476 /* The loop state (big + ugly). */
2477 unsigned iVirtPage = 0;
2478 PPGMVIRTHANDLER pVirt = NULL;
2479 uint32_t offVirt = PAGE_SIZE;
2480 uint32_t offVirtLast = PAGE_SIZE;
2481 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
2482
2483 PPGMPHYSHANDLER pPhys = NULL;
2484 uint32_t offPhys = PAGE_SIZE;
2485 uint32_t offPhysLast = PAGE_SIZE;
2486 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2487
2488 /* The loop. */
2489 for (;;)
2490 {
2491 /*
2492 * Find the closest handler at or above GCPhys.
2493 */
2494 if (fMoreVirt && !pVirt)
2495 {
2496 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
2497 if (RT_SUCCESS(rc))
2498 {
2499 offVirt = 0;
2500 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2501 }
2502 else
2503 {
2504 PPGMPHYS2VIRTHANDLER pVirtPhys;
2505 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
2506 GCPhys, true /* fAbove */);
2507 if ( pVirtPhys
2508 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
2509 {
2510 /* ASSUME that pVirtPhys only covers one page. */
2511 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
2512 Assert(pVirtPhys->Core.Key > GCPhys);
2513
2514 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
2515 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
2516 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2517 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2518 }
2519 else
2520 {
2521 pVirt = NULL;
2522 fMoreVirt = false;
2523 offVirt = offVirtLast = PAGE_SIZE;
2524 }
2525 }
2526 }
2527
2528 if (fMorePhys && !pPhys)
2529 {
2530 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2531 if (pPhys)
2532 {
2533 offPhys = 0;
2534 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2535 }
2536 else
2537 {
2538 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2539 GCPhys, true /* fAbove */);
2540 if ( pPhys
2541 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2542 {
2543 offPhys = pPhys->Core.Key - GCPhys;
2544 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2545 }
2546 else
2547 {
2548 pPhys = NULL;
2549 fMorePhys = false;
2550 offPhys = offPhysLast = PAGE_SIZE;
2551 }
2552 }
2553 }
2554
2555 /*
2556 * Handle access to space without handlers (that's easy).
2557 */
2558 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2559 uint32_t cbRange = (uint32_t)cbWrite;
2560 if (offPhys && offVirt)
2561 {
2562 if (cbRange > offPhys)
2563 cbRange = offPhys;
2564 if (cbRange > offVirt)
2565 cbRange = offVirt;
2566 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2567 }
2568 /*
2569 * Physical handler.
2570 */
2571 else if (!offPhys && offVirt)
2572 {
2573 if (cbRange > offPhysLast + 1)
2574 cbRange = offPhysLast + 1;
2575 if (cbRange > offVirt)
2576 cbRange = offVirt;
2577#ifdef IN_RING3
2578 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2579 void *pvUser = pPhys->CTX_SUFF(pvUser);
2580
2581 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2582 STAM_PROFILE_START(&pPhys->Stat, h);
2583 PGM_LOCK_ASSERT_OWNER(pVM);
2584 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2585 pgmUnlock(pVM);
2586 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2587 pgmLock(pVM);
2588# ifdef VBOX_WITH_STATISTICS
2589 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2590 if (pPhys)
2591 STAM_PROFILE_STOP(&pPhys->Stat, h);
2592# else
2593 pPhys = NULL; /* might not be valid anymore. */
2594# endif
2595 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2596#else
2597 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2598 NOREF(cbRange);
2599 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2600 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2601 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2602#endif
2603 }
2604 /*
2605 * Virtual handler.
2606 */
2607 else if (offPhys && !offVirt)
2608 {
2609 if (cbRange > offVirtLast + 1)
2610 cbRange = offVirtLast + 1;
2611 if (cbRange > offPhys)
2612 cbRange = offPhys;
2613#ifdef IN_RING3
2614 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2615 if (pVirt->pfnHandlerR3)
2616 {
2617 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2618 + (iVirtPage << PAGE_SHIFT)
2619 + (GCPhys & PAGE_OFFSET_MASK);
2620 STAM_PROFILE_START(&pVirt->Stat, h);
2621 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2622 STAM_PROFILE_STOP(&pVirt->Stat, h);
2623 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2624 }
2625 pVirt = NULL;
2626#else
2627 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2628 NOREF(cbRange);
2629 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2630 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2631 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2632#endif
2633 }
2634 /*
2635 * Both... give the physical one priority.
2636 */
2637 else
2638 {
2639 Assert(!offPhys && !offVirt);
2640 if (cbRange > offVirtLast + 1)
2641 cbRange = offVirtLast + 1;
2642 if (cbRange > offPhysLast + 1)
2643 cbRange = offPhysLast + 1;
2644
2645#ifdef IN_RING3
2646 if (pVirt->pfnHandlerR3)
2647 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2648 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2649
2650 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2651 void *pvUser = pPhys->CTX_SUFF(pvUser);
2652
2653 STAM_PROFILE_START(&pPhys->Stat, h);
2654 PGM_LOCK_ASSERT_OWNER(pVM);
2655 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2656 pgmUnlock(pVM);
2657 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2658 pgmLock(pVM);
2659# ifdef VBOX_WITH_STATISTICS
2660 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2661 if (pPhys)
2662 STAM_PROFILE_STOP(&pPhys->Stat, h);
2663# else
2664 pPhys = NULL; /* might not be valid anymore. */
2665# endif
2666 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2667 if (pVirt->pfnHandlerR3)
2668 {
2669
2670 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2671 + (iVirtPage << PAGE_SHIFT)
2672 + (GCPhys & PAGE_OFFSET_MASK);
2673 STAM_PROFILE_START(&pVirt->Stat, h2);
2674 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2675 STAM_PROFILE_STOP(&pVirt->Stat, h2);
2676 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2677 rc = VINF_SUCCESS;
2678 else
2679 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2680 }
2681 pPhys = NULL;
2682 pVirt = NULL;
2683#else
2684 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2685 NOREF(cbRange);
2686 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2687 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2688 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2689#endif
2690 }
2691 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2692 memcpy(pvDst, pvBuf, cbRange);
2693
2694 /*
2695 * Advance if we've got more stuff to do.
2696 */
2697 if (cbRange >= cbWrite)
2698 {
2699 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2700 return VINF_SUCCESS;
2701 }
2702
2703 cbWrite -= cbRange;
2704 GCPhys += cbRange;
2705 pvBuf = (uint8_t *)pvBuf + cbRange;
2706 pvDst = (uint8_t *)pvDst + cbRange;
2707
2708 offPhys -= cbRange;
2709 offPhysLast -= cbRange;
2710 offVirt -= cbRange;
2711 offVirtLast -= cbRange;
2712 }
2713}
2714
2715
2716/**
2717 * Write to physical memory.
2718 *
2719 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2720 * want to ignore those.
2721 *
2722 * @returns VBox status code. Can be ignored in ring-3.
2723 * @retval VINF_SUCCESS.
2724 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2725 *
2726 * @param pVM VM Handle.
2727 * @param GCPhys Physical address to write to.
2728 * @param pvBuf What to write.
2729 * @param cbWrite How many bytes to write.
2730 */
2731VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2732{
2733 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2734 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2735 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2736
2737 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWrite));
2738 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2739
2740 pgmLock(pVM);
2741
2742 /*
2743 * Copy loop on ram ranges.
2744 */
2745 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2746 for (;;)
2747 {
2748 /* Inside range or not? */
2749 if (pRam && GCPhys >= pRam->GCPhys)
2750 {
2751 /*
2752 * Must work our way thru this page by page.
2753 */
2754 RTGCPTR off = GCPhys - pRam->GCPhys;
2755 while (off < pRam->cb)
2756 {
2757 RTGCPTR iPage = off >> PAGE_SHIFT;
2758 PPGMPAGE pPage = &pRam->aPages[iPage];
2759 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2760 if (cb > cbWrite)
2761 cb = cbWrite;
2762
2763 /*
2764 * Any active WRITE or ALL access handlers?
2765 */
2766 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2767 {
2768 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2769 if (RT_FAILURE(rc))
2770 {
2771 pgmUnlock(pVM);
2772 return rc;
2773 }
2774 }
2775 else
2776 {
2777 /*
2778 * Get the pointer to the page.
2779 */
2780 PGMPAGEMAPLOCK PgMpLck;
2781 void *pvDst;
2782 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
2783 if (RT_SUCCESS(rc))
2784 {
2785 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
2786 memcpy(pvDst, pvBuf, cb);
2787 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2788 }
2789 /* Ignore writes to ballooned pages. */
2790 else if (!PGM_PAGE_IS_BALLOONED(pPage))
2791 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2792 pRam->GCPhys + off, pPage, rc));
2793 }
2794
2795 /* next page */
2796 if (cb >= cbWrite)
2797 {
2798 pgmUnlock(pVM);
2799 return VINF_SUCCESS;
2800 }
2801
2802 cbWrite -= cb;
2803 off += cb;
2804 pvBuf = (const char *)pvBuf + cb;
2805 } /* walk pages in ram range */
2806
2807 GCPhys = pRam->GCPhysLast + 1;
2808 }
2809 else
2810 {
2811 /*
2812 * Unassigned address space, skip it.
2813 */
2814 if (!pRam)
2815 break;
2816 size_t cb = pRam->GCPhys - GCPhys;
2817 if (cb >= cbWrite)
2818 break;
2819 cbWrite -= cb;
2820 pvBuf = (const char *)pvBuf + cb;
2821 GCPhys += cb;
2822 }
2823
2824 /* Advance range if necessary. */
2825 while (pRam && GCPhys > pRam->GCPhysLast)
2826 pRam = pRam->CTX_SUFF(pNext);
2827 } /* Ram range walk */
2828
2829 pgmUnlock(pVM);
2830 return VINF_SUCCESS;
2831}
2832
2833
2834/**
2835 * Read from guest physical memory by GC physical address, bypassing
2836 * MMIO and access handlers.
2837 *
2838 * @returns VBox status.
2839 * @param pVM VM handle.
2840 * @param pvDst The destination address.
2841 * @param GCPhysSrc The source address (GC physical address).
2842 * @param cb The number of bytes to read.
2843 */
2844VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2845{
2846 /*
2847 * Treat the first page as a special case.
2848 */
2849 if (!cb)
2850 return VINF_SUCCESS;
2851
2852 /* map the 1st page */
2853 void const *pvSrc;
2854 PGMPAGEMAPLOCK Lock;
2855 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2856 if (RT_FAILURE(rc))
2857 return rc;
2858
2859 /* optimize for the case where access is completely within the first page. */
2860 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2861 if (RT_LIKELY(cb <= cbPage))
2862 {
2863 memcpy(pvDst, pvSrc, cb);
2864 PGMPhysReleasePageMappingLock(pVM, &Lock);
2865 return VINF_SUCCESS;
2866 }
2867
2868 /* copy to the end of the page. */
2869 memcpy(pvDst, pvSrc, cbPage);
2870 PGMPhysReleasePageMappingLock(pVM, &Lock);
2871 GCPhysSrc += cbPage;
2872 pvDst = (uint8_t *)pvDst + cbPage;
2873 cb -= cbPage;
2874
2875 /*
2876 * Page by page.
2877 */
2878 for (;;)
2879 {
2880 /* map the page */
2881 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2882 if (RT_FAILURE(rc))
2883 return rc;
2884
2885 /* last page? */
2886 if (cb <= PAGE_SIZE)
2887 {
2888 memcpy(pvDst, pvSrc, cb);
2889 PGMPhysReleasePageMappingLock(pVM, &Lock);
2890 return VINF_SUCCESS;
2891 }
2892
2893 /* copy the entire page and advance */
2894 memcpy(pvDst, pvSrc, PAGE_SIZE);
2895 PGMPhysReleasePageMappingLock(pVM, &Lock);
2896 GCPhysSrc += PAGE_SIZE;
2897 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2898 cb -= PAGE_SIZE;
2899 }
2900 /* won't ever get here. */
2901}
2902
2903
2904/**
2905 * Write to guest physical memory referenced by GC pointer.
2906 * Write memory to GC physical address in guest physical memory.
2907 *
2908 * This will bypass MMIO and access handlers.
2909 *
2910 * @returns VBox status.
2911 * @param pVM VM handle.
2912 * @param GCPhysDst The GC physical address of the destination.
2913 * @param pvSrc The source buffer.
2914 * @param cb The number of bytes to write.
2915 */
2916VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2917{
2918 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2919
2920 /*
2921 * Treat the first page as a special case.
2922 */
2923 if (!cb)
2924 return VINF_SUCCESS;
2925
2926 /* map the 1st page */
2927 void *pvDst;
2928 PGMPAGEMAPLOCK Lock;
2929 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2930 if (RT_FAILURE(rc))
2931 return rc;
2932
2933 /* optimize for the case where access is completely within the first page. */
2934 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2935 if (RT_LIKELY(cb <= cbPage))
2936 {
2937 memcpy(pvDst, pvSrc, cb);
2938 PGMPhysReleasePageMappingLock(pVM, &Lock);
2939 return VINF_SUCCESS;
2940 }
2941
2942 /* copy to the end of the page. */
2943 memcpy(pvDst, pvSrc, cbPage);
2944 PGMPhysReleasePageMappingLock(pVM, &Lock);
2945 GCPhysDst += cbPage;
2946 pvSrc = (const uint8_t *)pvSrc + cbPage;
2947 cb -= cbPage;
2948
2949 /*
2950 * Page by page.
2951 */
2952 for (;;)
2953 {
2954 /* map the page */
2955 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2956 if (RT_FAILURE(rc))
2957 return rc;
2958
2959 /* last page? */
2960 if (cb <= PAGE_SIZE)
2961 {
2962 memcpy(pvDst, pvSrc, cb);
2963 PGMPhysReleasePageMappingLock(pVM, &Lock);
2964 return VINF_SUCCESS;
2965 }
2966
2967 /* copy the entire page and advance */
2968 memcpy(pvDst, pvSrc, PAGE_SIZE);
2969 PGMPhysReleasePageMappingLock(pVM, &Lock);
2970 GCPhysDst += PAGE_SIZE;
2971 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2972 cb -= PAGE_SIZE;
2973 }
2974 /* won't ever get here. */
2975}
2976
2977
2978/**
2979 * Read from guest physical memory referenced by GC pointer.
2980 *
2981 * This function uses the current CR3/CR0/CR4 of the guest and will
2982 * bypass access handlers and not set any accessed bits.
2983 *
2984 * @returns VBox status.
2985 * @param pVCpu Handle to the current virtual CPU.
2986 * @param pvDst The destination address.
2987 * @param GCPtrSrc The source address (GC pointer).
2988 * @param cb The number of bytes to read.
2989 */
2990VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2991{
2992 PVM pVM = pVCpu->CTX_SUFF(pVM);
2993/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
2994
2995 /*
2996 * Treat the first page as a special case.
2997 */
2998 if (!cb)
2999 return VINF_SUCCESS;
3000
3001 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleRead));
3002 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3003
3004 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3005 * when many VCPUs are fighting for the lock.
3006 */
3007 pgmLock(pVM);
3008
3009 /* map the 1st page */
3010 void const *pvSrc;
3011 PGMPAGEMAPLOCK Lock;
3012 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3013 if (RT_FAILURE(rc))
3014 {
3015 pgmUnlock(pVM);
3016 return rc;
3017 }
3018
3019 /* optimize for the case where access is completely within the first page. */
3020 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3021 if (RT_LIKELY(cb <= cbPage))
3022 {
3023 memcpy(pvDst, pvSrc, cb);
3024 PGMPhysReleasePageMappingLock(pVM, &Lock);
3025 pgmUnlock(pVM);
3026 return VINF_SUCCESS;
3027 }
3028
3029 /* copy to the end of the page. */
3030 memcpy(pvDst, pvSrc, cbPage);
3031 PGMPhysReleasePageMappingLock(pVM, &Lock);
3032 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3033 pvDst = (uint8_t *)pvDst + cbPage;
3034 cb -= cbPage;
3035
3036 /*
3037 * Page by page.
3038 */
3039 for (;;)
3040 {
3041 /* map the page */
3042 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3043 if (RT_FAILURE(rc))
3044 {
3045 pgmUnlock(pVM);
3046 return rc;
3047 }
3048
3049 /* last page? */
3050 if (cb <= PAGE_SIZE)
3051 {
3052 memcpy(pvDst, pvSrc, cb);
3053 PGMPhysReleasePageMappingLock(pVM, &Lock);
3054 pgmUnlock(pVM);
3055 return VINF_SUCCESS;
3056 }
3057
3058 /* copy the entire page and advance */
3059 memcpy(pvDst, pvSrc, PAGE_SIZE);
3060 PGMPhysReleasePageMappingLock(pVM, &Lock);
3061 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
3062 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3063 cb -= PAGE_SIZE;
3064 }
3065 /* won't ever get here. */
3066}
3067
3068
3069/**
3070 * Write to guest physical memory referenced by GC pointer.
3071 *
3072 * This function uses the current CR3/CR0/CR4 of the guest and will
3073 * bypass access handlers and not set dirty or accessed bits.
3074 *
3075 * @returns VBox status.
3076 * @param pVCpu Handle to the current virtual CPU.
3077 * @param GCPtrDst The destination address (GC pointer).
3078 * @param pvSrc The source address.
3079 * @param cb The number of bytes to write.
3080 */
3081VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3082{
3083 PVM pVM = pVCpu->CTX_SUFF(pVM);
3084 VMCPU_ASSERT_EMT(pVCpu);
3085
3086 /*
3087 * Treat the first page as a special case.
3088 */
3089 if (!cb)
3090 return VINF_SUCCESS;
3091
3092 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWrite));
3093 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3094
3095 /* map the 1st page */
3096 void *pvDst;
3097 PGMPAGEMAPLOCK Lock;
3098 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3099 if (RT_FAILURE(rc))
3100 return rc;
3101
3102 /* optimize for the case where access is completely within the first page. */
3103 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3104 if (RT_LIKELY(cb <= cbPage))
3105 {
3106 memcpy(pvDst, pvSrc, cb);
3107 PGMPhysReleasePageMappingLock(pVM, &Lock);
3108 return VINF_SUCCESS;
3109 }
3110
3111 /* copy to the end of the page. */
3112 memcpy(pvDst, pvSrc, cbPage);
3113 PGMPhysReleasePageMappingLock(pVM, &Lock);
3114 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3115 pvSrc = (const uint8_t *)pvSrc + cbPage;
3116 cb -= cbPage;
3117
3118 /*
3119 * Page by page.
3120 */
3121 for (;;)
3122 {
3123 /* map the page */
3124 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3125 if (RT_FAILURE(rc))
3126 return rc;
3127
3128 /* last page? */
3129 if (cb <= PAGE_SIZE)
3130 {
3131 memcpy(pvDst, pvSrc, cb);
3132 PGMPhysReleasePageMappingLock(pVM, &Lock);
3133 return VINF_SUCCESS;
3134 }
3135
3136 /* copy the entire page and advance */
3137 memcpy(pvDst, pvSrc, PAGE_SIZE);
3138 PGMPhysReleasePageMappingLock(pVM, &Lock);
3139 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3140 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3141 cb -= PAGE_SIZE;
3142 }
3143 /* won't ever get here. */
3144}
3145
3146
3147/**
3148 * Write to guest physical memory referenced by GC pointer and update the PTE.
3149 *
3150 * This function uses the current CR3/CR0/CR4 of the guest and will
3151 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3152 *
3153 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3154 *
3155 * @returns VBox status.
3156 * @param pVCpu Handle to the current virtual CPU.
3157 * @param GCPtrDst The destination address (GC pointer).
3158 * @param pvSrc The source address.
3159 * @param cb The number of bytes to write.
3160 */
3161VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3162{
3163 PVM pVM = pVCpu->CTX_SUFF(pVM);
3164 VMCPU_ASSERT_EMT(pVCpu);
3165
3166 /*
3167 * Treat the first page as a special case.
3168 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3169 */
3170 if (!cb)
3171 return VINF_SUCCESS;
3172
3173 /* map the 1st page */
3174 void *pvDst;
3175 PGMPAGEMAPLOCK Lock;
3176 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3177 if (RT_FAILURE(rc))
3178 return rc;
3179
3180 /* optimize for the case where access is completely within the first page. */
3181 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3182 if (RT_LIKELY(cb <= cbPage))
3183 {
3184 memcpy(pvDst, pvSrc, cb);
3185 PGMPhysReleasePageMappingLock(pVM, &Lock);
3186 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3187 return VINF_SUCCESS;
3188 }
3189
3190 /* copy to the end of the page. */
3191 memcpy(pvDst, pvSrc, cbPage);
3192 PGMPhysReleasePageMappingLock(pVM, &Lock);
3193 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3194 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3195 pvSrc = (const uint8_t *)pvSrc + cbPage;
3196 cb -= cbPage;
3197
3198 /*
3199 * Page by page.
3200 */
3201 for (;;)
3202 {
3203 /* map the page */
3204 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3205 if (RT_FAILURE(rc))
3206 return rc;
3207
3208 /* last page? */
3209 if (cb <= PAGE_SIZE)
3210 {
3211 memcpy(pvDst, pvSrc, cb);
3212 PGMPhysReleasePageMappingLock(pVM, &Lock);
3213 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3214 return VINF_SUCCESS;
3215 }
3216
3217 /* copy the entire page and advance */
3218 memcpy(pvDst, pvSrc, PAGE_SIZE);
3219 PGMPhysReleasePageMappingLock(pVM, &Lock);
3220 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3221 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3222 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3223 cb -= PAGE_SIZE;
3224 }
3225 /* won't ever get here. */
3226}
3227
3228
3229/**
3230 * Read from guest physical memory referenced by GC pointer.
3231 *
3232 * This function uses the current CR3/CR0/CR4 of the guest and will
3233 * respect access handlers and set accessed bits.
3234 *
3235 * @returns VBox status.
3236 * @param pVCpu Handle to the current virtual CPU.
3237 * @param pvDst The destination address.
3238 * @param GCPtrSrc The source address (GC pointer).
3239 * @param cb The number of bytes to read.
3240 * @thread The vCPU EMT.
3241 */
3242VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3243{
3244 RTGCPHYS GCPhys;
3245 uint64_t fFlags;
3246 int rc;
3247 PVM pVM = pVCpu->CTX_SUFF(pVM);
3248 VMCPU_ASSERT_EMT(pVCpu);
3249
3250 /*
3251 * Anything to do?
3252 */
3253 if (!cb)
3254 return VINF_SUCCESS;
3255
3256 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3257
3258 /*
3259 * Optimize reads within a single page.
3260 */
3261 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3262 {
3263 /* Convert virtual to physical address + flags */
3264 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3265 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3266 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3267
3268 /* mark the guest page as accessed. */
3269 if (!(fFlags & X86_PTE_A))
3270 {
3271 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3272 AssertRC(rc);
3273 }
3274
3275 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
3276 }
3277
3278 /*
3279 * Page by page.
3280 */
3281 for (;;)
3282 {
3283 /* Convert virtual to physical address + flags */
3284 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3285 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3286 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3287
3288 /* mark the guest page as accessed. */
3289 if (!(fFlags & X86_PTE_A))
3290 {
3291 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3292 AssertRC(rc);
3293 }
3294
3295 /* copy */
3296 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3297 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
3298 if (cbRead >= cb || RT_FAILURE(rc))
3299 return rc;
3300
3301 /* next */
3302 cb -= cbRead;
3303 pvDst = (uint8_t *)pvDst + cbRead;
3304 GCPtrSrc += cbRead;
3305 }
3306}
3307
3308
3309/**
3310 * Write to guest physical memory referenced by GC pointer.
3311 *
3312 * This function uses the current CR3/CR0/CR4 of the guest and will
3313 * respect access handlers and set dirty and accessed bits.
3314 *
3315 * @returns VBox status.
3316 * @retval VINF_SUCCESS.
3317 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
3318 *
3319 * @param pVCpu Handle to the current virtual CPU.
3320 * @param GCPtrDst The destination address (GC pointer).
3321 * @param pvSrc The source address.
3322 * @param cb The number of bytes to write.
3323 */
3324VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3325{
3326 RTGCPHYS GCPhys;
3327 uint64_t fFlags;
3328 int rc;
3329 PVM pVM = pVCpu->CTX_SUFF(pVM);
3330 VMCPU_ASSERT_EMT(pVCpu);
3331
3332 /*
3333 * Anything to do?
3334 */
3335 if (!cb)
3336 return VINF_SUCCESS;
3337
3338 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3339
3340 /*
3341 * Optimize writes within a single page.
3342 */
3343 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3344 {
3345 /* Convert virtual to physical address + flags */
3346 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3347 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3348 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3349
3350 /* Mention when we ignore X86_PTE_RW... */
3351 if (!(fFlags & X86_PTE_RW))
3352 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3353
3354 /* Mark the guest page as accessed and dirty if necessary. */
3355 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3356 {
3357 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3358 AssertRC(rc);
3359 }
3360
3361 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
3362 }
3363
3364 /*
3365 * Page by page.
3366 */
3367 for (;;)
3368 {
3369 /* Convert virtual to physical address + flags */
3370 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3371 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3372 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3373
3374 /* Mention when we ignore X86_PTE_RW... */
3375 if (!(fFlags & X86_PTE_RW))
3376 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3377
3378 /* Mark the guest page as accessed and dirty if necessary. */
3379 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3380 {
3381 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3382 AssertRC(rc);
3383 }
3384
3385 /* copy */
3386 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3387 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
3388 if (cbWrite >= cb || RT_FAILURE(rc))
3389 return rc;
3390
3391 /* next */
3392 cb -= cbWrite;
3393 pvSrc = (uint8_t *)pvSrc + cbWrite;
3394 GCPtrDst += cbWrite;
3395 }
3396}
3397
3398
3399/**
3400 * Performs a read of guest virtual memory for instruction emulation.
3401 *
3402 * This will check permissions, raise exceptions and update the access bits.
3403 *
3404 * The current implementation will bypass all access handlers. It may later be
3405 * changed to at least respect MMIO.
3406 *
3407 *
3408 * @returns VBox status code suitable to scheduling.
3409 * @retval VINF_SUCCESS if the read was performed successfully.
3410 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3411 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3412 *
3413 * @param pVCpu Handle to the current virtual CPU.
3414 * @param pCtxCore The context core.
3415 * @param pvDst Where to put the bytes we've read.
3416 * @param GCPtrSrc The source address.
3417 * @param cb The number of bytes to read. Not more than a page.
3418 *
3419 * @remark This function will dynamically map physical pages in GC. This may unmap
3420 * mappings done by the caller. Be careful!
3421 */
3422VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3423{
3424 PVM pVM = pVCpu->CTX_SUFF(pVM);
3425 Assert(cb <= PAGE_SIZE);
3426 VMCPU_ASSERT_EMT(pVCpu);
3427
3428/** @todo r=bird: This isn't perfect!
3429 * -# It's not checking for reserved bits being 1.
3430 * -# It's not correctly dealing with the access bit.
3431 * -# It's not respecting MMIO memory or any other access handlers.
3432 */
3433 /*
3434 * 1. Translate virtual to physical. This may fault.
3435 * 2. Map the physical address.
3436 * 3. Do the read operation.
3437 * 4. Set access bits if required.
3438 */
3439 int rc;
3440 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3441 if (cb <= cb1)
3442 {
3443 /*
3444 * Not crossing pages.
3445 */
3446 RTGCPHYS GCPhys;
3447 uint64_t fFlags;
3448 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3449 if (RT_SUCCESS(rc))
3450 {
3451 /** @todo we should check reserved bits ... */
3452 PGMPAGEMAPLOCK PgMpLck;
3453 void const *pvSrc;
3454 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &PgMpLck);
3455 switch (rc)
3456 {
3457 case VINF_SUCCESS:
3458 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3459 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3460 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3461 break;
3462 case VERR_PGM_PHYS_PAGE_RESERVED:
3463 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3464 memset(pvDst, 0xff, cb);
3465 break;
3466 default:
3467 Assert(RT_FAILURE_NP(rc));
3468 return rc;
3469 }
3470
3471 /** @todo access bit emulation isn't 100% correct. */
3472 if (!(fFlags & X86_PTE_A))
3473 {
3474 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3475 AssertRC(rc);
3476 }
3477 return VINF_SUCCESS;
3478 }
3479 }
3480 else
3481 {
3482 /*
3483 * Crosses pages.
3484 */
3485 size_t cb2 = cb - cb1;
3486 uint64_t fFlags1;
3487 RTGCPHYS GCPhys1;
3488 uint64_t fFlags2;
3489 RTGCPHYS GCPhys2;
3490 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3491 if (RT_SUCCESS(rc))
3492 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3493 if (RT_SUCCESS(rc))
3494 {
3495 /** @todo we should check reserved bits ... */
3496 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3497 PGMPAGEMAPLOCK PgMpLck;
3498 void const *pvSrc1;
3499 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);
3500 switch (rc)
3501 {
3502 case VINF_SUCCESS:
3503 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3504 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3505 break;
3506 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3507 memset(pvDst, 0xff, cb1);
3508 break;
3509 default:
3510 Assert(RT_FAILURE_NP(rc));
3511 return rc;
3512 }
3513
3514 void const *pvSrc2;
3515 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);
3516 switch (rc)
3517 {
3518 case VINF_SUCCESS:
3519 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3520 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3521 break;
3522 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3523 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3524 break;
3525 default:
3526 Assert(RT_FAILURE_NP(rc));
3527 return rc;
3528 }
3529
3530 if (!(fFlags1 & X86_PTE_A))
3531 {
3532 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3533 AssertRC(rc);
3534 }
3535 if (!(fFlags2 & X86_PTE_A))
3536 {
3537 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3538 AssertRC(rc);
3539 }
3540 return VINF_SUCCESS;
3541 }
3542 }
3543
3544 /*
3545 * Raise a #PF.
3546 */
3547 uint32_t uErr;
3548
3549 /* Get the current privilege level. */
3550 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3551 switch (rc)
3552 {
3553 case VINF_SUCCESS:
3554 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3555 break;
3556
3557 case VERR_PAGE_NOT_PRESENT:
3558 case VERR_PAGE_TABLE_NOT_PRESENT:
3559 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3560 break;
3561
3562 default:
3563 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3564 return rc;
3565 }
3566 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3567 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3568}
3569
3570
3571/**
3572 * Performs a read of guest virtual memory for instruction emulation.
3573 *
3574 * This will check permissions, raise exceptions and update the access bits.
3575 *
3576 * The current implementation will bypass all access handlers. It may later be
3577 * changed to at least respect MMIO.
3578 *
3579 *
3580 * @returns VBox status code suitable to scheduling.
3581 * @retval VINF_SUCCESS if the read was performed successfully.
3582 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3583 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3584 *
3585 * @param pVCpu Handle to the current virtual CPU.
3586 * @param pCtxCore The context core.
3587 * @param pvDst Where to put the bytes we've read.
3588 * @param GCPtrSrc The source address.
3589 * @param cb The number of bytes to read. Not more than a page.
3590 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3591 * an appropriate error status will be returned (no
3592 * informational at all).
3593 *
3594 *
3595 * @remarks Takes the PGM lock.
3596 * @remarks A page fault on the 2nd page of the access will be raised without
3597 * writing the bits on the first page since we're ASSUMING that the
3598 * caller is emulating an instruction access.
3599 * @remarks This function will dynamically map physical pages in GC. This may
3600 * unmap mappings done by the caller. Be careful!
3601 */
3602VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb,
3603 bool fRaiseTrap)
3604{
3605 PVM pVM = pVCpu->CTX_SUFF(pVM);
3606 Assert(cb <= PAGE_SIZE);
3607 VMCPU_ASSERT_EMT(pVCpu);
3608
3609 /*
3610 * 1. Translate virtual to physical. This may fault.
3611 * 2. Map the physical address.
3612 * 3. Do the read operation.
3613 * 4. Set access bits if required.
3614 */
3615 int rc;
3616 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3617 if (cb <= cb1)
3618 {
3619 /*
3620 * Not crossing pages.
3621 */
3622 RTGCPHYS GCPhys;
3623 uint64_t fFlags;
3624 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3625 if (RT_SUCCESS(rc))
3626 {
3627 if (1) /** @todo we should check reserved bits ... */
3628 {
3629 const void *pvSrc;
3630 PGMPAGEMAPLOCK Lock;
3631 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3632 switch (rc)
3633 {
3634 case VINF_SUCCESS:
3635 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3636 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3637 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3638 PGMPhysReleasePageMappingLock(pVM, &Lock);
3639 break;
3640 case VERR_PGM_PHYS_PAGE_RESERVED:
3641 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3642 memset(pvDst, 0xff, cb);
3643 break;
3644 default:
3645 AssertMsgFailed(("%Rrc\n", rc));
3646 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3647 return rc;
3648 }
3649
3650 if (!(fFlags & X86_PTE_A))
3651 {
3652 /** @todo access bit emulation isn't 100% correct. */
3653 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3654 AssertRC(rc);
3655 }
3656 return VINF_SUCCESS;
3657 }
3658 }
3659 }
3660 else
3661 {
3662 /*
3663 * Crosses pages.
3664 */
3665 size_t cb2 = cb - cb1;
3666 uint64_t fFlags1;
3667 RTGCPHYS GCPhys1;
3668 uint64_t fFlags2;
3669 RTGCPHYS GCPhys2;
3670 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3671 if (RT_SUCCESS(rc))
3672 {
3673 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3674 if (RT_SUCCESS(rc))
3675 {
3676 if (1) /** @todo we should check reserved bits ... */
3677 {
3678 const void *pvSrc;
3679 PGMPAGEMAPLOCK Lock;
3680 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3681 switch (rc)
3682 {
3683 case VINF_SUCCESS:
3684 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3685 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3686 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3687 PGMPhysReleasePageMappingLock(pVM, &Lock);
3688 break;
3689 case VERR_PGM_PHYS_PAGE_RESERVED:
3690 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3691 memset(pvDst, 0xff, cb1);
3692 break;
3693 default:
3694 AssertMsgFailed(("%Rrc\n", rc));
3695 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3696 return rc;
3697 }
3698
3699 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3700 switch (rc)
3701 {
3702 case VINF_SUCCESS:
3703 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3704 PGMPhysReleasePageMappingLock(pVM, &Lock);
3705 break;
3706 case VERR_PGM_PHYS_PAGE_RESERVED:
3707 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3708 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3709 break;
3710 default:
3711 AssertMsgFailed(("%Rrc\n", rc));
3712 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3713 return rc;
3714 }
3715
3716 if (!(fFlags1 & X86_PTE_A))
3717 {
3718 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3719 AssertRC(rc);
3720 }
3721 if (!(fFlags2 & X86_PTE_A))
3722 {
3723 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3724 AssertRC(rc);
3725 }
3726 return VINF_SUCCESS;
3727 }
3728 /* sort out which page */
3729 }
3730 else
3731 GCPtrSrc += cb1; /* fault on 2nd page */
3732 }
3733 }
3734
3735 /*
3736 * Raise a #PF if we're allowed to do that.
3737 */
3738 /* Calc the error bits. */
3739 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3740 uint32_t uErr;
3741 switch (rc)
3742 {
3743 case VINF_SUCCESS:
3744 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3745 rc = VERR_ACCESS_DENIED;
3746 break;
3747
3748 case VERR_PAGE_NOT_PRESENT:
3749 case VERR_PAGE_TABLE_NOT_PRESENT:
3750 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3751 break;
3752
3753 default:
3754 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3755 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3756 return rc;
3757 }
3758 if (fRaiseTrap)
3759 {
3760 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3761 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3762 }
3763 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3764 return rc;
3765}
3766
3767
3768/**
3769 * Performs a write to guest virtual memory for instruction emulation.
3770 *
3771 * This will check permissions, raise exceptions and update the dirty and access
3772 * bits.
3773 *
3774 * @returns VBox status code suitable to scheduling.
3775 * @retval VINF_SUCCESS if the read was performed successfully.
3776 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3777 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3778 *
3779 * @param pVCpu Handle to the current virtual CPU.
3780 * @param pCtxCore The context core.
3781 * @param GCPtrDst The destination address.
3782 * @param pvSrc What to write.
3783 * @param cb The number of bytes to write. Not more than a page.
3784 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3785 * an appropriate error status will be returned (no
3786 * informational at all).
3787 *
3788 * @remarks Takes the PGM lock.
3789 * @remarks A page fault on the 2nd page of the access will be raised without
3790 * writing the bits on the first page since we're ASSUMING that the
3791 * caller is emulating an instruction access.
3792 * @remarks This function will dynamically map physical pages in GC. This may
3793 * unmap mappings done by the caller. Be careful!
3794 */
3795VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc,
3796 size_t cb, bool fRaiseTrap)
3797{
3798 Assert(cb <= PAGE_SIZE);
3799 PVM pVM = pVCpu->CTX_SUFF(pVM);
3800 VMCPU_ASSERT_EMT(pVCpu);
3801
3802 /*
3803 * 1. Translate virtual to physical. This may fault.
3804 * 2. Map the physical address.
3805 * 3. Do the write operation.
3806 * 4. Set access bits if required.
3807 */
3808 /** @todo Since this method is frequently used by EMInterpret or IOM
3809 * upon a write fault to an write access monitored page, we can
3810 * reuse the guest page table walking from the \#PF code. */
3811 int rc;
3812 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3813 if (cb <= cb1)
3814 {
3815 /*
3816 * Not crossing pages.
3817 */
3818 RTGCPHYS GCPhys;
3819 uint64_t fFlags;
3820 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3821 if (RT_SUCCESS(rc))
3822 {
3823 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3824 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3825 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3826 {
3827 void *pvDst;
3828 PGMPAGEMAPLOCK Lock;
3829 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3830 switch (rc)
3831 {
3832 case VINF_SUCCESS:
3833 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3834 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3835 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3836 PGMPhysReleasePageMappingLock(pVM, &Lock);
3837 break;
3838 case VERR_PGM_PHYS_PAGE_RESERVED:
3839 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3840 /* bit bucket */
3841 break;
3842 default:
3843 AssertMsgFailed(("%Rrc\n", rc));
3844 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3845 return rc;
3846 }
3847
3848 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3849 {
3850 /** @todo dirty & access bit emulation isn't 100% correct. */
3851 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3852 AssertRC(rc);
3853 }
3854 return VINF_SUCCESS;
3855 }
3856 rc = VERR_ACCESS_DENIED;
3857 }
3858 }
3859 else
3860 {
3861 /*
3862 * Crosses pages.
3863 */
3864 size_t cb2 = cb - cb1;
3865 uint64_t fFlags1;
3866 RTGCPHYS GCPhys1;
3867 uint64_t fFlags2;
3868 RTGCPHYS GCPhys2;
3869 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3870 if (RT_SUCCESS(rc))
3871 {
3872 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3873 if (RT_SUCCESS(rc))
3874 {
3875 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3876 && (fFlags2 & X86_PTE_RW))
3877 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3878 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) )
3879 {
3880 void *pvDst;
3881 PGMPAGEMAPLOCK Lock;
3882 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3883 switch (rc)
3884 {
3885 case VINF_SUCCESS:
3886 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3887 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3888 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3889 PGMPhysReleasePageMappingLock(pVM, &Lock);
3890 break;
3891 case VERR_PGM_PHYS_PAGE_RESERVED:
3892 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3893 /* bit bucket */
3894 break;
3895 default:
3896 AssertMsgFailed(("%Rrc\n", rc));
3897 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3898 return rc;
3899 }
3900
3901 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3902 switch (rc)
3903 {
3904 case VINF_SUCCESS:
3905 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3906 PGMPhysReleasePageMappingLock(pVM, &Lock);
3907 break;
3908 case VERR_PGM_PHYS_PAGE_RESERVED:
3909 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3910 /* bit bucket */
3911 break;
3912 default:
3913 AssertMsgFailed(("%Rrc\n", rc));
3914 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3915 return rc;
3916 }
3917
3918 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3919 {
3920 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3921 AssertRC(rc);
3922 }
3923 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3924 {
3925 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3926 AssertRC(rc);
3927 }
3928 return VINF_SUCCESS;
3929 }
3930 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3931 GCPtrDst += cb1; /* fault on the 2nd page. */
3932 rc = VERR_ACCESS_DENIED;
3933 }
3934 else
3935 GCPtrDst += cb1; /* fault on the 2nd page. */
3936 }
3937 }
3938
3939 /*
3940 * Raise a #PF if we're allowed to do that.
3941 */
3942 /* Calc the error bits. */
3943 uint32_t uErr;
3944 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3945 switch (rc)
3946 {
3947 case VINF_SUCCESS:
3948 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3949 rc = VERR_ACCESS_DENIED;
3950 break;
3951
3952 case VERR_ACCESS_DENIED:
3953 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3954 break;
3955
3956 case VERR_PAGE_NOT_PRESENT:
3957 case VERR_PAGE_TABLE_NOT_PRESENT:
3958 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3959 break;
3960
3961 default:
3962 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3963 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3964 return rc;
3965 }
3966 if (fRaiseTrap)
3967 {
3968 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3969 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3970 }
3971 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3972 return rc;
3973}
3974
3975
3976/**
3977 * Return the page type of the specified physical address.
3978 *
3979 * @returns The page type.
3980 * @param pVM VM Handle.
3981 * @param GCPhys Guest physical address
3982 */
3983VMMDECL(PGMPAGETYPE) PGMPhysGetPageType(PVM pVM, RTGCPHYS GCPhys)
3984{
3985 pgmLock(pVM);
3986 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
3987 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
3988 pgmUnlock(pVM);
3989
3990 return enmPgType;
3991}
3992
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette