VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMMap.cpp@ 16300

Last change on this file since 16300 was 16300, checked in by vboxsync, 16 years ago

More paging updates

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 56.3 KB
Line 
1/* $Id: PGMMap.cpp 16300 2009-01-28 12:06:35Z vboxsync $ */
2/** @file
3 * PGM - Page Manager, Guest Context Mappings.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/dbgf.h>
28#include <VBox/pgm.h>
29#include "PGMInternal.h"
30#include <VBox/vm.h>
31
32#include <VBox/log.h>
33#include <VBox/err.h>
34#include <iprt/asm.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37
38
39/*******************************************************************************
40* Internal Functions *
41*******************************************************************************/
42static void pgmR3MapClearPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE);
43static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
44static int pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
45static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
46#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
47static void pgmR3MapClearShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE);
48static void pgmR3MapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
49#endif
50
51
52/**
53 * Creates a page table based mapping in GC.
54 *
55 * @returns VBox status code.
56 * @param pVM VM Handle.
57 * @param GCPtr Virtual Address. (Page table aligned!)
58 * @param cb Size of the range. Must be a 4MB aligned!
59 * @param pfnRelocate Relocation callback function.
60 * @param pvUser User argument to the callback.
61 * @param pszDesc Pointer to description string. This must not be freed.
62 */
63VMMR3DECL(int) PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, uint32_t cb, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc)
64{
65 LogFlow(("PGMR3MapPT: GCPtr=%#x cb=%d pfnRelocate=%p pvUser=%p pszDesc=%s\n", GCPtr, cb, pfnRelocate, pvUser, pszDesc));
66 AssertMsg(pVM->pgm.s.pInterPD && pVM->pgm.s.pShwNestedRootR3, ("Paging isn't initialized, init order problems!\n"));
67
68 /*
69 * Validate input.
70 */
71 if (cb < _2M || cb > 64 * _1M)
72 {
73 AssertMsgFailed(("Serious? cb=%d\n", cb));
74 return VERR_INVALID_PARAMETER;
75 }
76 cb = RT_ALIGN_32(cb, _4M);
77 RTGCPTR GCPtrLast = GCPtr + cb - 1;
78 if (GCPtrLast < GCPtr)
79 {
80 AssertMsgFailed(("Range wraps! GCPtr=%x GCPtrLast=%x\n", GCPtr, GCPtrLast));
81 return VERR_INVALID_PARAMETER;
82 }
83 if (pVM->pgm.s.fMappingsFixed)
84 {
85 AssertMsgFailed(("Mappings are fixed! It's not possible to add new mappings at this time!\n"));
86 return VERR_PGM_MAPPINGS_FIXED;
87 }
88 if (!pfnRelocate)
89 {
90 AssertMsgFailed(("Callback is required\n"));
91 return VERR_INVALID_PARAMETER;
92 }
93
94 /*
95 * Find list location.
96 */
97 PPGMMAPPING pPrev = NULL;
98 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
99 while (pCur)
100 {
101 if (pCur->GCPtrLast >= GCPtr && pCur->GCPtr <= GCPtrLast)
102 {
103 AssertMsgFailed(("Address is already in use by %s. req %#x-%#x take %#x-%#x\n",
104 pCur->pszDesc, GCPtr, GCPtrLast, pCur->GCPtr, pCur->GCPtrLast));
105 LogRel(("VERR_PGM_MAPPING_CONFLICT: Address is already in use by %s. req %#x-%#x take %#x-%#x\n",
106 pCur->pszDesc, GCPtr, GCPtrLast, pCur->GCPtr, pCur->GCPtrLast));
107 return VERR_PGM_MAPPING_CONFLICT;
108 }
109 if (pCur->GCPtr > GCPtr)
110 break;
111 pPrev = pCur;
112 pCur = pCur->pNextR3;
113 }
114
115 /*
116 * Check for conflicts with intermediate mappings.
117 */
118 const unsigned iPageDir = GCPtr >> X86_PD_SHIFT;
119 const unsigned cPTs = cb >> X86_PD_SHIFT;
120 if (pVM->pgm.s.fFinalizedMappings)
121 {
122 for (unsigned i = 0; i < cPTs; i++)
123 if (pVM->pgm.s.pInterPD->a[iPageDir + i].n.u1Present)
124 {
125 AssertMsgFailed(("Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT)));
126 LogRel(("VERR_PGM_MAPPING_CONFLICT: Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT)));
127 return VERR_PGM_MAPPING_CONFLICT;
128 }
129 /** @todo AMD64: add check in PAE structures too, so we can remove all the 32-Bit paging stuff there. */
130 }
131
132 /*
133 * Allocate and initialize the new list node.
134 */
135 PPGMMAPPING pNew;
136 int rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMMAPPING, aPTs[cPTs]), 0, MM_TAG_PGM, (void **)&pNew);
137 if (RT_FAILURE(rc))
138 return rc;
139 pNew->GCPtr = GCPtr;
140 pNew->GCPtrLast = GCPtrLast;
141 pNew->cb = cb;
142 pNew->pszDesc = pszDesc;
143 pNew->pfnRelocate = pfnRelocate;
144 pNew->pvUser = pvUser;
145 pNew->cPTs = cPTs;
146
147 /*
148 * Allocate page tables and insert them into the page directories.
149 * (One 32-bit PT and two PAE PTs.)
150 */
151 uint8_t *pbPTs;
152 rc = MMHyperAlloc(pVM, PAGE_SIZE * 3 * cPTs, PAGE_SIZE, MM_TAG_PGM, (void **)&pbPTs);
153 if (RT_FAILURE(rc))
154 {
155 MMHyperFree(pVM, pNew);
156 return VERR_NO_MEMORY;
157 }
158
159 /*
160 * Init the page tables and insert them into the page directories.
161 */
162 Log4(("PGMR3MapPT: GCPtr=%RGv cPTs=%u pbPTs=%p\n", GCPtr, cPTs, pbPTs));
163 for (unsigned i = 0; i < cPTs; i++)
164 {
165 /*
166 * 32-bit.
167 */
168 pNew->aPTs[i].pPTR3 = (PX86PT)pbPTs;
169 pNew->aPTs[i].pPTRC = MMHyperR3ToRC(pVM, pNew->aPTs[i].pPTR3);
170 pNew->aPTs[i].pPTR0 = MMHyperR3ToR0(pVM, pNew->aPTs[i].pPTR3);
171 pNew->aPTs[i].HCPhysPT = MMR3HyperHCVirt2HCPhys(pVM, pNew->aPTs[i].pPTR3);
172 pbPTs += PAGE_SIZE;
173 Log4(("PGMR3MapPT: i=%d: pPTR3=%RHv pPTRC=%RRv pPRTR0=%RHv HCPhysPT=%RHp\n",
174 i, pNew->aPTs[i].pPTR3, pNew->aPTs[i].pPTRC, pNew->aPTs[i].pPTR0, pNew->aPTs[i].HCPhysPT));
175
176 /*
177 * PAE.
178 */
179 pNew->aPTs[i].HCPhysPaePT0 = MMR3HyperHCVirt2HCPhys(pVM, pbPTs);
180 pNew->aPTs[i].HCPhysPaePT1 = MMR3HyperHCVirt2HCPhys(pVM, pbPTs + PAGE_SIZE);
181 pNew->aPTs[i].paPaePTsR3 = (PX86PTPAE)pbPTs;
182 pNew->aPTs[i].paPaePTsRC = MMHyperR3ToRC(pVM, pbPTs);
183 pNew->aPTs[i].paPaePTsR0 = MMHyperR3ToR0(pVM, pbPTs);
184 pbPTs += PAGE_SIZE * 2;
185 Log4(("PGMR3MapPT: i=%d: paPaePTsR#=%RHv paPaePTsRC=%RRv paPaePTsR#=%RHv HCPhysPaePT0=%RHp HCPhysPaePT1=%RHp\n",
186 i, pNew->aPTs[i].paPaePTsR3, pNew->aPTs[i].paPaePTsRC, pNew->aPTs[i].paPaePTsR0, pNew->aPTs[i].HCPhysPaePT0, pNew->aPTs[i].HCPhysPaePT1));
187 }
188 if (pVM->pgm.s.fFinalizedMappings)
189 pgmR3MapSetPDEs(pVM, pNew, iPageDir);
190 /* else PGMR3FinalizeMappings() */
191
192 /*
193 * Insert the new mapping.
194 */
195 pNew->pNextR3 = pCur;
196 pNew->pNextRC = pCur ? MMHyperR3ToRC(pVM, pCur) : NIL_RTRCPTR;
197 pNew->pNextR0 = pCur ? MMHyperR3ToR0(pVM, pCur) : NIL_RTR0PTR;
198 if (pPrev)
199 {
200 pPrev->pNextR3 = pNew;
201 pPrev->pNextRC = MMHyperR3ToRC(pVM, pNew);
202 pPrev->pNextR0 = MMHyperR3ToR0(pVM, pNew);
203 }
204 else
205 {
206 pVM->pgm.s.pMappingsR3 = pNew;
207 pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pNew);
208 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pNew);
209 }
210
211 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
212 return VINF_SUCCESS;
213}
214
215
216/**
217 * Removes a page table based mapping.
218 *
219 * @returns VBox status code.
220 * @param pVM VM Handle.
221 * @param GCPtr Virtual Address. (Page table aligned!)
222 */
223VMMR3DECL(int) PGMR3UnmapPT(PVM pVM, RTGCPTR GCPtr)
224{
225 LogFlow(("PGMR3UnmapPT: GCPtr=%#x\n", GCPtr));
226 AssertReturn(pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER);
227
228 /*
229 * Find it.
230 */
231 PPGMMAPPING pPrev = NULL;
232 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
233 while (pCur)
234 {
235 if (pCur->GCPtr == GCPtr)
236 {
237 /*
238 * Unlink it.
239 */
240 if (pPrev)
241 {
242 pPrev->pNextR3 = pCur->pNextR3;
243 pPrev->pNextRC = pCur->pNextRC;
244 pPrev->pNextR0 = pCur->pNextR0;
245 }
246 else
247 {
248 pVM->pgm.s.pMappingsR3 = pCur->pNextR3;
249 pVM->pgm.s.pMappingsRC = pCur->pNextRC;
250 pVM->pgm.s.pMappingsR0 = pCur->pNextR0;
251 }
252
253 /*
254 * Free the page table memory, clear page directory entries
255 * and free the page tables and node memory.
256 */
257 MMHyperFree(pVM, pCur->aPTs[0].pPTR3);
258 pgmR3MapClearPDEs(pVM, pCur, pCur->GCPtr >> X86_PD_SHIFT);
259 MMHyperFree(pVM, pCur);
260
261 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
262 return VINF_SUCCESS;
263 }
264
265 /* done? */
266 if (pCur->GCPtr > GCPtr)
267 break;
268
269 /* next */
270 pPrev = pCur;
271 pCur = pCur->pNextR3;
272 }
273
274 AssertMsgFailed(("No mapping for %#x found!\n", GCPtr));
275 return VERR_INVALID_PARAMETER;
276}
277
278
279/**
280 * Checks whether a range of PDEs in the intermediate
281 * memory context are unused.
282 *
283 * We're talking 32-bit PDEs here.
284 *
285 * @returns true/false.
286 * @param pVM Pointer to the shared VM structure.
287 * @param iPD The first PDE in the range.
288 * @param cPTs The number of PDEs in the range.
289 */
290DECLINLINE(bool) pgmR3AreIntermediatePDEsUnused(PVM pVM, unsigned iPD, unsigned cPTs)
291{
292 if (pVM->pgm.s.pInterPD->a[iPD].n.u1Present)
293 return false;
294 while (cPTs > 1)
295 {
296 iPD++;
297 if (pVM->pgm.s.pInterPD->a[iPD].n.u1Present)
298 return false;
299 cPTs--;
300 }
301 return true;
302}
303
304
305/**
306 * Unlinks the mapping.
307 *
308 * The mapping *must* be in the list.
309 *
310 * @param pVM Pointer to the shared VM structure.
311 * @param pMapping The mapping to unlink.
312 */
313static void pgmR3MapUnlink(PVM pVM, PPGMMAPPING pMapping)
314{
315 PPGMMAPPING pAfterThis = pVM->pgm.s.pMappingsR3;
316 if (pAfterThis == pMapping)
317 {
318 /* head */
319 pVM->pgm.s.pMappingsR3 = pMapping->pNextR3;
320 pVM->pgm.s.pMappingsRC = pMapping->pNextRC;
321 pVM->pgm.s.pMappingsR0 = pMapping->pNextR0;
322 }
323 else
324 {
325 /* in the list */
326 while (pAfterThis->pNextR3 != pMapping)
327 {
328 pAfterThis = pAfterThis->pNextR3;
329 AssertReleaseReturnVoid(pAfterThis);
330 }
331
332 pAfterThis->pNextR3 = pMapping->pNextR3;
333 pAfterThis->pNextRC = pMapping->pNextRC;
334 pAfterThis->pNextR0 = pMapping->pNextR0;
335 }
336}
337
338
339/**
340 * Links the mapping.
341 *
342 * @param pVM Pointer to the shared VM structure.
343 * @param pMapping The mapping to linked.
344 */
345static void pgmR3MapLink(PVM pVM, PPGMMAPPING pMapping)
346{
347 /*
348 * Find the list location (it's sorted by GCPhys) and link it in.
349 */
350 if ( !pVM->pgm.s.pMappingsR3
351 || pVM->pgm.s.pMappingsR3->GCPtr > pMapping->GCPtr)
352 {
353 /* head */
354 pMapping->pNextR3 = pVM->pgm.s.pMappingsR3;
355 pMapping->pNextRC = pVM->pgm.s.pMappingsRC;
356 pMapping->pNextR0 = pVM->pgm.s.pMappingsR0;
357 pVM->pgm.s.pMappingsR3 = pMapping;
358 pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pMapping);
359 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pMapping);
360 }
361 else
362 {
363 /* in the list */
364 PPGMMAPPING pAfterThis = pVM->pgm.s.pMappingsR3;
365 PPGMMAPPING pBeforeThis = pAfterThis->pNextR3;
366 while (pBeforeThis && pBeforeThis->GCPtr <= pMapping->GCPtr)
367 {
368 pAfterThis = pBeforeThis;
369 pBeforeThis = pBeforeThis->pNextR3;
370 }
371
372 pMapping->pNextR3 = pAfterThis->pNextR3;
373 pMapping->pNextRC = pAfterThis->pNextRC;
374 pMapping->pNextR0 = pAfterThis->pNextR0;
375 pAfterThis->pNextR3 = pMapping;
376 pAfterThis->pNextRC = MMHyperR3ToRC(pVM, pMapping);
377 pAfterThis->pNextR0 = MMHyperR3ToR0(pVM, pMapping);
378 }
379}
380
381
382/**
383 * Finalizes the intermediate context.
384 *
385 * This is called at the end of the ring-3 init and will construct the
386 * intermediate paging structures, relocating all the mappings in the process.
387 *
388 * @returns VBox status code.
389 * @param pVM Pointer to the shared VM structure.
390 * @thread EMT(0)
391 */
392VMMR3DECL(int) PGMR3FinalizeMappings(PVM pVM)
393{
394 AssertReturn(!pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER);
395 pVM->pgm.s.fFinalizedMappings = true;
396
397 /*
398 * Loop until all mappings have been finalized.
399 */
400 /*unsigned iPDNext = UINT32_C(0xc0000000) >> X86_PD_SHIFT;*/ /* makes CSAM/PATM freak out booting linux. :-/ */
401#if 0
402 unsigned iPDNext = MM_HYPER_AREA_ADDRESS >> X86_PD_SHIFT;
403#else
404 unsigned iPDNext = 1 << X86_PD_SHIFT; /* no hint, map them from the top. */
405#endif
406 PPGMMAPPING pCur;
407 do
408 {
409 pCur = pVM->pgm.s.pMappingsR3;
410 while (pCur)
411 {
412 if (!pCur->fFinalized)
413 {
414 /*
415 * Find a suitable location.
416 */
417 RTGCPTR const GCPtrOld = pCur->GCPtr;
418 const unsigned cPTs = pCur->cPTs;
419 unsigned iPDNew = iPDNext;
420 if ( iPDNew + cPTs >= X86_PG_ENTRIES /* exclude the last PD */
421 || !pgmR3AreIntermediatePDEsUnused(pVM, iPDNew, cPTs)
422 || !pCur->pfnRelocate(pVM, GCPtrOld, (RTGCPTR)iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_SUGGEST, pCur->pvUser))
423 {
424 /* No luck, just scan down from 4GB-4MB, giving up at 4MB. */
425 iPDNew = X86_PG_ENTRIES - cPTs - 1;
426 while ( iPDNew > 0
427 && ( !pgmR3AreIntermediatePDEsUnused(pVM, iPDNew, cPTs)
428 || !pCur->pfnRelocate(pVM, GCPtrOld, (RTGCPTR)iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_SUGGEST, pCur->pvUser))
429 )
430 iPDNew--;
431 AssertLogRelReturn(iPDNew != 0, VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
432 }
433
434 /*
435 * Relocate it (something akin to pgmR3MapRelocate).
436 */
437 pgmR3MapSetPDEs(pVM, pCur, iPDNew);
438
439 /* unlink the mapping, update the entry and relink it. */
440 pgmR3MapUnlink(pVM, pCur);
441
442 RTGCPTR const GCPtrNew = (RTGCPTR)iPDNew << X86_PD_SHIFT;
443 pCur->GCPtr = GCPtrNew;
444 pCur->GCPtrLast = GCPtrNew + pCur->cb - 1;
445 pCur->fFinalized = true;
446
447 pgmR3MapLink(pVM, pCur);
448
449 /* Finally work the callback. */
450 pCur->pfnRelocate(pVM, GCPtrOld, GCPtrNew, PGMRELOCATECALL_RELOCATE, pCur->pvUser);
451
452 /*
453 * The list order might have changed, start from the beginning again.
454 */
455 iPDNext = iPDNew + cPTs;
456 break;
457 }
458
459 /* next */
460 pCur = pCur->pNextR3;
461 }
462 } while (pCur);
463
464 return VINF_SUCCESS;
465}
466
467
468/**
469 * Gets the size of the current guest mappings if they were to be
470 * put next to oneanother.
471 *
472 * @returns VBox status code.
473 * @param pVM The VM.
474 * @param pcb Where to store the size.
475 */
476VMMR3DECL(int) PGMR3MappingsSize(PVM pVM, uint32_t *pcb)
477{
478 RTGCPTR cb = 0;
479 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
480 cb += pCur->cb;
481
482 *pcb = cb;
483 AssertReturn(*pcb == cb, VERR_NUMBER_TOO_BIG);
484 Log(("PGMR3MappingsSize: return %d (%#x) bytes\n", cb, cb));
485 return VINF_SUCCESS;
486}
487
488
489/**
490 * Fixes the guest context mappings in a range reserved from the Guest OS.
491 *
492 * @returns VBox status code.
493 * @param pVM The VM.
494 * @param GCPtrBase The address of the reserved range of guest memory.
495 * @param cb The size of the range starting at GCPtrBase.
496 */
497VMMR3DECL(int) PGMR3MappingsFix(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb)
498{
499 Log(("PGMR3MappingsFix: GCPtrBase=%#x cb=%#x\n", GCPtrBase, cb));
500
501 /* Ignore the additions mapping fix call in VT-x/AMD-V. */
502 if ( pVM->pgm.s.fMappingsFixed
503 && HWACCMR3IsActive(pVM))
504 return VINF_SUCCESS;
505
506 /*
507 * This is all or nothing at all. So, a tiny bit of paranoia first.
508 */
509 if (GCPtrBase & X86_PAGE_4M_OFFSET_MASK)
510 {
511 AssertMsgFailed(("GCPtrBase (%#x) has to be aligned on a 4MB address!\n", GCPtrBase));
512 return VERR_INVALID_PARAMETER;
513 }
514 if (!cb || (cb & X86_PAGE_4M_OFFSET_MASK))
515 {
516 AssertMsgFailed(("cb (%#x) is 0 or not aligned on a 4MB address!\n", cb));
517 return VERR_INVALID_PARAMETER;
518 }
519
520 /*
521 * Before we do anything we'll do a forced PD sync to try make sure any
522 * pending relocations because of these mappings have been resolved.
523 */
524 PGMSyncCR3(pVM, CPUMGetGuestCR0(pVM), CPUMGetGuestCR3(pVM), CPUMGetGuestCR4(pVM), true);
525
526 /*
527 * Check that it's not conflicting with a core code mapping in the intermediate page table.
528 */
529 unsigned iPDNew = GCPtrBase >> X86_PD_SHIFT;
530 unsigned i = cb >> X86_PD_SHIFT;
531 while (i-- > 0)
532 {
533 if (pVM->pgm.s.pInterPD->a[iPDNew + i].n.u1Present)
534 {
535 /* Check that it's not one or our mappings. */
536 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
537 while (pCur)
538 {
539 if (iPDNew + i - (pCur->GCPtr >> X86_PD_SHIFT) < (pCur->cb >> X86_PD_SHIFT))
540 break;
541 pCur = pCur->pNextR3;
542 }
543 if (!pCur)
544 {
545 LogRel(("PGMR3MappingsFix: Conflicts with intermediate PDE %#x (GCPtrBase=%RGv cb=%#zx). The guest should retry.\n",
546 iPDNew + i, GCPtrBase, cb));
547 return VERR_PGM_MAPPINGS_FIX_CONFLICT;
548 }
549 }
550 }
551
552 /*
553 * In PAE / PAE mode, make sure we don't cross page directories.
554 */
555 if ( ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
556 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX)
557 && ( pVM->pgm.s.enmShadowMode == PGMMODE_PAE
558 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX))
559 {
560 unsigned iPdptBase = GCPtrBase >> X86_PDPT_SHIFT;
561 unsigned iPdptLast = (GCPtrBase + cb - 1) >> X86_PDPT_SHIFT;
562 if (iPdptBase != iPdptLast)
563 {
564 LogRel(("PGMR3MappingsFix: Crosses PD boundrary; iPdptBase=%#x iPdptLast=%#x (GCPtrBase=%RGv cb=%#zx). The guest should retry.\n",
565 iPdptBase, iPdptLast, GCPtrBase, cb));
566 return VERR_PGM_MAPPINGS_FIX_CONFLICT;
567 }
568 }
569
570 /*
571 * Loop the mappings and check that they all agree on their new locations.
572 */
573 RTGCPTR GCPtrCur = GCPtrBase;
574 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
575 while (pCur)
576 {
577 if (!pCur->pfnRelocate(pVM, pCur->GCPtr, GCPtrCur, PGMRELOCATECALL_SUGGEST, pCur->pvUser))
578 {
579 AssertMsgFailed(("The suggested fixed address %#x was rejected by '%s'!\n", GCPtrCur, pCur->pszDesc));
580 return VERR_PGM_MAPPINGS_FIX_REJECTED;
581 }
582 /* next */
583 GCPtrCur += pCur->cb;
584 pCur = pCur->pNextR3;
585 }
586 if (GCPtrCur > GCPtrBase + cb)
587 {
588 AssertMsgFailed(("cb (%#x) is less than the required range %#x!\n", cb, GCPtrCur - GCPtrBase));
589 return VERR_PGM_MAPPINGS_FIX_TOO_SMALL;
590 }
591
592 /*
593 * Loop the table assigning the mappings to the passed in memory
594 * and call their relocator callback.
595 */
596 GCPtrCur = GCPtrBase;
597 pCur = pVM->pgm.s.pMappingsR3;
598 while (pCur)
599 {
600 unsigned iPDOld = pCur->GCPtr >> X86_PD_SHIFT;
601 iPDNew = GCPtrCur >> X86_PD_SHIFT;
602
603 /*
604 * Relocate the page table(s).
605 */
606 pgmR3MapClearPDEs(pVM, pCur, iPDOld);
607 pgmR3MapSetPDEs(pVM, pCur, iPDNew);
608
609 /*
610 * Update the entry.
611 */
612 pCur->GCPtr = GCPtrCur;
613 pCur->GCPtrLast = GCPtrCur + pCur->cb - 1;
614
615 /*
616 * Callback to execute the relocation.
617 */
618 pCur->pfnRelocate(pVM, iPDOld << X86_PD_SHIFT, iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_RELOCATE, pCur->pvUser);
619
620 /*
621 * Advance.
622 */
623 GCPtrCur += pCur->cb;
624 pCur = pCur->pNextR3;
625 }
626
627#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
628 /*
629 * Turn off CR3 updating monitoring.
630 */
631 int rc2 = PGM_GST_PFN(UnmonitorCR3, pVM)(pVM);
632 AssertRC(rc2);
633#endif
634
635 /*
636 * Mark the mappings as fixed and return.
637 */
638 pVM->pgm.s.fMappingsFixed = true;
639 pVM->pgm.s.GCPtrMappingFixed = GCPtrBase;
640 pVM->pgm.s.cbMappingFixed = cb;
641 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
642 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
643 return VINF_SUCCESS;
644}
645
646
647/**
648 * Unfixes the mappings.
649 * After calling this function mapping conflict detection will be enabled.
650 *
651 * @returns VBox status code.
652 * @param pVM The VM.
653 */
654VMMR3DECL(int) PGMR3MappingsUnfix(PVM pVM)
655{
656 Log(("PGMR3MappingsUnfix: fMappingsFixed=%d\n", pVM->pgm.s.fMappingsFixed));
657
658 /* Refuse in VT-x/AMD-V mode. */
659 if (HWACCMR3IsActive(pVM))
660 return VINF_SUCCESS;
661
662 pVM->pgm.s.fMappingsFixed = false;
663 pVM->pgm.s.GCPtrMappingFixed = 0;
664 pVM->pgm.s.cbMappingFixed = 0;
665 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
666
667 /*
668 * Re-enable the CR3 monitoring.
669 *
670 * Paranoia: We flush the page pool before doing that because Windows
671 * is using the CR3 page both as a PD and a PT, e.g. the pool may
672 * be monitoring it.
673 */
674#ifdef PGMPOOL_WITH_MONITORING
675 pgmPoolFlushAll(pVM);
676#endif
677 /* Remap CR3 as we have just flushed the CR3 shadow PML4 in case we're in long mode. */
678 int rc = PGM_GST_PFN(MapCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
679 AssertRCSuccess(rc);
680
681#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
682 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
683 AssertRCSuccess(rc);
684#endif
685 return VINF_SUCCESS;
686}
687
688
689/**
690 * Map pages into the intermediate context (switcher code).
691 * These pages are mapped at both the give virtual address and at
692 * the physical address (for identity mapping).
693 *
694 * @returns VBox status code.
695 * @param pVM The virtual machine.
696 * @param Addr Intermediate context address of the mapping.
697 * @param HCPhys Start of the range of physical pages. This must be entriely below 4GB!
698 * @param cbPages Number of bytes to map.
699 *
700 * @remark This API shall not be used to anything but mapping the switcher code.
701 */
702VMMR3DECL(int) PGMR3MapIntermediate(PVM pVM, RTUINTPTR Addr, RTHCPHYS HCPhys, unsigned cbPages)
703{
704 LogFlow(("PGMR3MapIntermediate: Addr=%RTptr HCPhys=%RHp cbPages=%#x\n", Addr, HCPhys, cbPages));
705
706 /*
707 * Adjust input.
708 */
709 cbPages += (uint32_t)HCPhys & PAGE_OFFSET_MASK;
710 cbPages = RT_ALIGN(cbPages, PAGE_SIZE);
711 HCPhys &= X86_PTE_PAE_PG_MASK;
712 Addr &= PAGE_BASE_MASK;
713 /* We only care about the first 4GB, because on AMD64 we'll be repeating them all over the address space. */
714 uint32_t uAddress = (uint32_t)Addr;
715
716 /*
717 * Assert input and state.
718 */
719 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
720 AssertMsg(pVM->pgm.s.pInterPD, ("Bad init order, paging.\n"));
721 AssertMsg(cbPages <= (512 << PAGE_SHIFT), ("The mapping is too big %d bytes\n", cbPages));
722 AssertMsg(HCPhys < _4G && HCPhys + cbPages < _4G, ("Addr=%RTptr HCPhys=%RHp cbPages=%d\n", Addr, HCPhys, cbPages));
723 AssertReturn(!pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER);
724
725 /*
726 * Check for internal conflicts between the virtual address and the physical address.
727 * A 1:1 mapping is fine, but partial overlapping is a no-no.
728 */
729 if ( uAddress != HCPhys
730 && ( uAddress < HCPhys
731 ? HCPhys - uAddress < cbPages
732 : uAddress - HCPhys < cbPages
733 )
734 )
735 AssertLogRelMsgFailedReturn(("Addr=%RTptr HCPhys=%RHp cbPages=%d\n", Addr, HCPhys, cbPages),
736 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
737
738 const unsigned cPages = cbPages >> PAGE_SHIFT;
739 int rc = pgmR3MapIntermediateCheckOne(pVM, uAddress, cPages, pVM->pgm.s.apInterPTs[0], pVM->pgm.s.apInterPaePTs[0]);
740 if (RT_FAILURE(rc))
741 return rc;
742 rc = pgmR3MapIntermediateCheckOne(pVM, (uintptr_t)HCPhys, cPages, pVM->pgm.s.apInterPTs[1], pVM->pgm.s.apInterPaePTs[1]);
743 if (RT_FAILURE(rc))
744 return rc;
745
746 /*
747 * Everythings fine, do the mapping.
748 */
749 pgmR3MapIntermediateDoOne(pVM, uAddress, HCPhys, cPages, pVM->pgm.s.apInterPTs[0], pVM->pgm.s.apInterPaePTs[0]);
750 pgmR3MapIntermediateDoOne(pVM, (uintptr_t)HCPhys, HCPhys, cPages, pVM->pgm.s.apInterPTs[1], pVM->pgm.s.apInterPaePTs[1]);
751
752 return VINF_SUCCESS;
753}
754
755
756/**
757 * Validates that there are no conflicts for this mapping into the intermediate context.
758 *
759 * @returns VBox status code.
760 * @param pVM VM handle.
761 * @param uAddress Address of the mapping.
762 * @param cPages Number of pages.
763 * @param pPTDefault Pointer to the default page table for this mapping.
764 * @param pPTPaeDefault Pointer to the default page table for this mapping.
765 */
766static int pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault)
767{
768 AssertMsg((uAddress >> X86_PD_SHIFT) + cPages <= 1024, ("64-bit fixme\n"));
769
770 /*
771 * Check that the ranges are available.
772 * (This code doesn't have to be fast.)
773 */
774 while (cPages > 0)
775 {
776 /*
777 * 32-Bit.
778 */
779 unsigned iPDE = (uAddress >> X86_PD_SHIFT) & X86_PD_MASK;
780 unsigned iPTE = (uAddress >> X86_PT_SHIFT) & X86_PT_MASK;
781 PX86PT pPT = pPTDefault;
782 if (pVM->pgm.s.pInterPD->a[iPDE].u)
783 {
784 RTHCPHYS HCPhysPT = pVM->pgm.s.pInterPD->a[iPDE].u & X86_PDE_PG_MASK;
785 if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[0]))
786 pPT = pVM->pgm.s.apInterPTs[0];
787 else if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[1]))
788 pPT = pVM->pgm.s.apInterPTs[1];
789 else
790 {
791 /** @todo this must be handled with a relocation of the conflicting mapping!
792 * Which of course cannot be done because we're in the middle of the initialization. bad design! */
793 AssertLogRelMsgFailedReturn(("Conflict between core code and PGMR3Mapping(). uAddress=%RHv\n", uAddress),
794 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
795 }
796 }
797 if (pPT->a[iPTE].u)
798 AssertLogRelMsgFailedReturn(("Conflict iPTE=%#x iPDE=%#x uAddress=%RHv pPT->a[iPTE].u=%RX32\n", iPTE, iPDE, uAddress, pPT->a[iPTE].u),
799 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
800
801 /*
802 * PAE.
803 */
804 const unsigned iPDPE= (uAddress >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
805 iPDE = (uAddress >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
806 iPTE = (uAddress >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK;
807 Assert(iPDPE < 4);
808 Assert(pVM->pgm.s.apInterPaePDs[iPDPE]);
809 PX86PTPAE pPTPae = pPTPaeDefault;
810 if (pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u)
811 {
812 RTHCPHYS HCPhysPT = pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u & X86_PDE_PAE_PG_MASK;
813 if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]))
814 pPTPae = pVM->pgm.s.apInterPaePTs[0];
815 else if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]))
816 pPTPae = pVM->pgm.s.apInterPaePTs[1];
817 else
818 {
819 /** @todo this must be handled with a relocation of the conflicting mapping!
820 * Which of course cannot be done because we're in the middle of the initialization. bad design! */
821 AssertLogRelMsgFailedReturn(("Conflict between core code and PGMR3Mapping(). uAddress=%RHv\n", uAddress),
822 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
823 }
824 }
825 if (pPTPae->a[iPTE].u)
826 AssertLogRelMsgFailedReturn(("Conflict iPTE=%#x iPDE=%#x uAddress=%RHv pPTPae->a[iPTE].u=%#RX64\n", iPTE, iPDE, uAddress, pPTPae->a[iPTE].u),
827 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
828
829 /* next */
830 uAddress += PAGE_SIZE;
831 cPages--;
832 }
833
834 return VINF_SUCCESS;
835}
836
837
838
839/**
840 * Sets up the intermediate page tables for a verified mapping.
841 *
842 * @param pVM VM handle.
843 * @param uAddress Address of the mapping.
844 * @param HCPhys The physical address of the page range.
845 * @param cPages Number of pages.
846 * @param pPTDefault Pointer to the default page table for this mapping.
847 * @param pPTPaeDefault Pointer to the default page table for this mapping.
848 */
849static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault)
850{
851 while (cPages > 0)
852 {
853 /*
854 * 32-Bit.
855 */
856 unsigned iPDE = (uAddress >> X86_PD_SHIFT) & X86_PD_MASK;
857 unsigned iPTE = (uAddress >> X86_PT_SHIFT) & X86_PT_MASK;
858 PX86PT pPT;
859 if (pVM->pgm.s.pInterPD->a[iPDE].u)
860 pPT = (PX86PT)MMPagePhys2Page(pVM, pVM->pgm.s.pInterPD->a[iPDE].u & X86_PDE_PG_MASK);
861 else
862 {
863 pVM->pgm.s.pInterPD->a[iPDE].u = X86_PDE_P | X86_PDE_A | X86_PDE_RW
864 | (uint32_t)MMPage2Phys(pVM, pPTDefault);
865 pPT = pPTDefault;
866 }
867 pPT->a[iPTE].u = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D | (uint32_t)HCPhys;
868
869 /*
870 * PAE
871 */
872 const unsigned iPDPE= (uAddress >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
873 iPDE = (uAddress >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
874 iPTE = (uAddress >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK;
875 Assert(iPDPE < 4);
876 Assert(pVM->pgm.s.apInterPaePDs[iPDPE]);
877 PX86PTPAE pPTPae;
878 if (pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u)
879 pPTPae = (PX86PTPAE)MMPagePhys2Page(pVM, pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u & X86_PDE_PAE_PG_MASK);
880 else
881 {
882 pPTPae = pPTPaeDefault;
883 pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u = X86_PDE_P | X86_PDE_A | X86_PDE_RW
884 | MMPage2Phys(pVM, pPTPaeDefault);
885 }
886 pPTPae->a[iPTE].u = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D | HCPhys;
887
888 /* next */
889 cPages--;
890 HCPhys += PAGE_SIZE;
891 uAddress += PAGE_SIZE;
892 }
893}
894
895
896/**
897 * Clears all PDEs involved with the mapping in the shadow and intermediate page tables.
898 *
899 * @param pVM The VM handle.
900 * @param pMap Pointer to the mapping in question.
901 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
902 */
903static void pgmR3MapClearPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE)
904{
905 unsigned i = pMap->cPTs;
906
907#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
908 pgmR3MapClearShadowPDEs(pVM, pMap, iOldPDE);
909#endif
910
911 iOldPDE += i;
912 while (i-- > 0)
913 {
914 iOldPDE--;
915
916 /*
917 * 32-bit.
918 */
919 pVM->pgm.s.pInterPD->a[iOldPDE].u = 0;
920#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
921 pVM->pgm.s.pShw32BitPdR3->a[iOldPDE].u = 0;
922#endif
923 /*
924 * PAE.
925 */
926 const unsigned iPD = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
927 unsigned iPDE = iOldPDE * 2 % 512;
928 pVM->pgm.s.apInterPaePDs[iPD]->a[iPDE].u = 0;
929#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
930 pVM->pgm.s.apShwPaePDsR3[iPD]->a[iPDE].u = 0;
931#endif
932 iPDE++;
933 AssertFatal(iPDE < 512);
934 pVM->pgm.s.apInterPaePDs[iPD]->a[iPDE].u = 0;
935#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
936 pVM->pgm.s.apShwPaePDsR3[iPD]->a[iPDE].u = 0;
937
938 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */
939 pVM->pgm.s.pShwPaePdptR3->a[iPD].u &= ~PGM_PLXFLAGS_MAPPING;
940#endif
941 }
942}
943
944
945#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
946/**
947 * Clears all PDEs involved with the mapping in the shadow page table.
948 *
949 * @param pVM The VM handle.
950 * @param pMap Pointer to the mapping in question.
951 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
952 */
953static void pgmR3MapClearShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE)
954{
955 unsigned i = pMap->cPTs;
956 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
957
958 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
959 return;
960
961 iOldPDE += i;
962 while (i-- > 0)
963 {
964 iOldPDE--;
965
966 switch(enmShadowMode)
967 {
968 case PGMMODE_32_BIT:
969 {
970 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
971 AssertFatal(pShw32BitPd);
972
973 pShw32BitPd->a[iOldPDE].u = 0;
974 break;
975 }
976
977 case PGMMODE_PAE:
978 case PGMMODE_PAE_NX:
979 {
980 PX86PDPT pPdpt = NULL;
981 PX86PDPAE pShwPaePd = NULL;
982
983 const unsigned iPD = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
984 unsigned iPDE = iOldPDE * 2 % 512;
985 pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
986 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPD << X86_PDPT_SHIFT));
987 AssertFatal(pShwPaePd);
988
989 pShwPaePd->a[iPDE].u = 0;
990
991 iPDE++;
992 AssertFatal(iPDE < 512);
993
994 pShwPaePd->a[iPDE].u = 0;
995 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */
996 pPdpt->a[iPD].u &= ~PGM_PLXFLAGS_MAPPING;
997 break;
998 }
999 }
1000 }
1001}
1002#endif
1003
1004/**
1005 * Sets all PDEs involved with the mapping in the shadow and intermediate page tables.
1006 *
1007 * @param pVM The VM handle.
1008 * @param pMap Pointer to the mapping in question.
1009 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
1010 */
1011static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
1012{
1013 PPGM pPGM = &pVM->pgm.s;
1014
1015 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s) || PGMGetGuestMode(pVM) <= PGMMODE_PAE_NX);
1016
1017#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1018 pgmR3MapSetShadowPDEs(pVM, pMap, iNewPDE);
1019#endif
1020
1021 /*
1022 * Init the page tables and insert them into the page directories.
1023 */
1024 unsigned i = pMap->cPTs;
1025 iNewPDE += i;
1026 while (i-- > 0)
1027 {
1028 iNewPDE--;
1029
1030 /*
1031 * 32-bit.
1032 */
1033#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1034 if ( pgmMapAreMappingsEnabled(&pVM->pgm.s)
1035 && pPGM->pShw32BitPdR3->a[iNewPDE].n.u1Present)
1036 {
1037 Assert(!(pPGM->pShw32BitPdR3->a[iNewPDE].u & PGM_PDFLAGS_MAPPING));
1038 pgmPoolFree(pVM, pPGM->pShw32BitPdR3->a[iNewPDE].u & X86_PDE_PG_MASK, PGMPOOL_IDX_PD, iNewPDE);
1039 }
1040#endif
1041 X86PDE Pde;
1042 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
1043 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
1044 pPGM->pInterPD->a[iNewPDE] = Pde;
1045#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1046 if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
1047 pPGM->pShw32BitPdR3->a[iNewPDE] = Pde;
1048#endif
1049 /*
1050 * PAE.
1051 */
1052 const unsigned iPD = iNewPDE / 256;
1053 unsigned iPDE = iNewPDE * 2 % 512;
1054#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1055 if ( pgmMapAreMappingsEnabled(&pVM->pgm.s)
1056 && pPGM->apShwPaePDsR3[iPD]->a[iPDE].n.u1Present)
1057 {
1058 Assert(!(pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & PGM_PDFLAGS_MAPPING));
1059 pgmPoolFree(pVM, pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & X86_PDE_PAE_PG_MASK, PGMPOOL_IDX_PAE_PD, iNewPDE * 2);
1060 }
1061#endif
1062 X86PDEPAE PdePae0;
1063 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
1064 pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae0;
1065#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1066 if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
1067 pPGM->apShwPaePDsR3[iPD]->a[iPDE] = PdePae0;
1068#endif
1069 iPDE++;
1070 AssertFatal(iPDE < 512);
1071#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1072 if ( pgmMapAreMappingsEnabled(&pVM->pgm.s)
1073 && pPGM->apShwPaePDsR3[iPD]->a[iPDE].n.u1Present)
1074 {
1075 Assert(!(pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & PGM_PDFLAGS_MAPPING));
1076 pgmPoolFree(pVM, pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & X86_PDE_PAE_PG_MASK, PGMPOOL_IDX_PAE_PD, iNewPDE * 2 + 1);
1077 }
1078#endif
1079 X86PDEPAE PdePae1;
1080 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
1081 pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae1;
1082#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1083 if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
1084 {
1085 pPGM->apShwPaePDsR3[iPD]->a[iPDE] = PdePae1;
1086
1087 /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */
1088 pPGM->pShwPaePdptR3->a[iPD].u |= PGM_PLXFLAGS_MAPPING;
1089 }
1090#endif
1091 }
1092}
1093
1094#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1095/**
1096 * Sets all PDEs involved with the mapping in the shadow page table.
1097 *
1098 * @param pVM The VM handle.
1099 * @param pMap Pointer to the mapping in question.
1100 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
1101 */
1102static void pgmR3MapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
1103{
1104 PPGM pPGM = &pVM->pgm.s;
1105 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
1106
1107 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
1108 return;
1109
1110 Assert(enmShadowMode <= PGMMODE_PAE_NX);
1111
1112 /*
1113 * Init the page tables and insert them into the page directories.
1114 */
1115 unsigned i = pMap->cPTs;
1116 iNewPDE += i;
1117 while (i-- > 0)
1118 {
1119 iNewPDE--;
1120
1121 switch(enmShadowMode)
1122 {
1123 case PGMMODE_32_BIT:
1124 {
1125 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
1126 AssertFatal(pShw32BitPd);
1127
1128 if (pShw32BitPd->a[iNewPDE].n.u1Present)
1129 {
1130 Assert(!(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING));
1131 pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVM->pgm.s.pShwPageCR3R3->idx, iNewPDE);
1132 }
1133
1134 X86PDE Pde;
1135 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
1136 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
1137 pShw32BitPd->a[iNewPDE] = Pde;
1138 break;
1139 }
1140
1141 case PGMMODE_PAE:
1142 case PGMMODE_PAE_NX:
1143 {
1144 PX86PDPT pShwPdpt;
1145 PX86PDPAE pShwPaePd;
1146 const unsigned iPdPt = iNewPDE / 256;
1147 unsigned iPDE = iNewPDE * 2 % 512;
1148
1149 pShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
1150 Assert(pShwPdpt);
1151 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
1152 AssertFatal(pShwPaePd);
1153
1154 PPGMPOOLPAGE pPoolPagePde = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1155 AssertFatal(pPoolPagePde);
1156
1157 if (pShwPaePd->a[iPDE].n.u1Present)
1158 {
1159 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
1160 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePde->idx, iNewPDE);
1161 }
1162
1163 X86PDEPAE PdePae0;
1164 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
1165 pShwPaePd->a[iPDE] = PdePae0;
1166
1167 /* 2nd 2 MB PDE of the 4 MB region */
1168 iPDE++;
1169 AssertFatal(iPDE < 512);
1170
1171 if (pShwPaePd->a[iPDE].n.u1Present)
1172 {
1173 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
1174 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePde->idx, iNewPDE);
1175 }
1176
1177 X86PDEPAE PdePae1;
1178 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
1179 pShwPaePd->a[iPDE] = PdePae1;
1180
1181 /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */
1182 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
1183 }
1184 }
1185 }
1186}
1187#endif
1188
1189/**
1190 * Relocates a mapping to a new address.
1191 *
1192 * @param pVM VM handle.
1193 * @param pMapping The mapping to relocate.
1194 * @param GCPtrOldMapping The address of the start of the old mapping.
1195 * @param GCPtrNewMapping The address of the start of the new mapping.
1196 */
1197void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping, RTGCPTR GCPtrNewMapping)
1198{
1199 unsigned iPDOld = GCPtrOldMapping >> X86_PD_SHIFT;
1200 unsigned iPDNew = GCPtrNewMapping >> X86_PD_SHIFT;
1201
1202 Log(("PGM: Relocating %s from %RGv to %RGv\n", pMapping->pszDesc, GCPtrOldMapping, GCPtrNewMapping));
1203 Assert(((unsigned)iPDOld << X86_PD_SHIFT) == pMapping->GCPtr);
1204
1205 /*
1206 * Relocate the page table(s).
1207 */
1208 pgmR3MapClearPDEs(pVM, pMapping, iPDOld);
1209 pgmR3MapSetPDEs(pVM, pMapping, iPDNew);
1210
1211 /*
1212 * Update and resort the mapping list.
1213 */
1214
1215 /* Find previous mapping for pMapping, put result into pPrevMap. */
1216 PPGMMAPPING pPrevMap = NULL;
1217 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
1218 while (pCur && pCur != pMapping)
1219 {
1220 /* next */
1221 pPrevMap = pCur;
1222 pCur = pCur->pNextR3;
1223 }
1224 Assert(pCur);
1225
1226 /* Find mapping which >= than pMapping. */
1227 RTGCPTR GCPtrNew = iPDNew << X86_PD_SHIFT;
1228 PPGMMAPPING pPrev = NULL;
1229 pCur = pVM->pgm.s.pMappingsR3;
1230 while (pCur && pCur->GCPtr < GCPtrNew)
1231 {
1232 /* next */
1233 pPrev = pCur;
1234 pCur = pCur->pNextR3;
1235 }
1236
1237 if (pCur != pMapping && pPrev != pMapping)
1238 {
1239 /*
1240 * Unlink.
1241 */
1242 if (pPrevMap)
1243 {
1244 pPrevMap->pNextR3 = pMapping->pNextR3;
1245 pPrevMap->pNextRC = pMapping->pNextRC;
1246 pPrevMap->pNextR0 = pMapping->pNextR0;
1247 }
1248 else
1249 {
1250 pVM->pgm.s.pMappingsR3 = pMapping->pNextR3;
1251 pVM->pgm.s.pMappingsRC = pMapping->pNextRC;
1252 pVM->pgm.s.pMappingsR0 = pMapping->pNextR0;
1253 }
1254
1255 /*
1256 * Link
1257 */
1258 pMapping->pNextR3 = pCur;
1259 if (pPrev)
1260 {
1261 pMapping->pNextRC = pPrev->pNextRC;
1262 pMapping->pNextR0 = pPrev->pNextR0;
1263 pPrev->pNextR3 = pMapping;
1264 pPrev->pNextRC = MMHyperR3ToRC(pVM, pMapping);
1265 pPrev->pNextR0 = MMHyperR3ToR0(pVM, pMapping);
1266 }
1267 else
1268 {
1269 pMapping->pNextRC = pVM->pgm.s.pMappingsRC;
1270 pMapping->pNextR0 = pVM->pgm.s.pMappingsR0;
1271 pVM->pgm.s.pMappingsR3 = pMapping;
1272 pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pMapping);
1273 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pMapping);
1274 }
1275 }
1276
1277 /*
1278 * Update the entry.
1279 */
1280 pMapping->GCPtr = GCPtrNew;
1281 pMapping->GCPtrLast = GCPtrNew + pMapping->cb - 1;
1282
1283 /*
1284 * Callback to execute the relocation.
1285 */
1286 pMapping->pfnRelocate(pVM, iPDOld << X86_PD_SHIFT, iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_RELOCATE, pMapping->pvUser);
1287}
1288
1289
1290/**
1291 * Resolves a conflict between a page table based GC mapping and
1292 * the Guest OS page tables. (32 bits version)
1293 *
1294 * @returns VBox status code.
1295 * @param pVM VM Handle.
1296 * @param pMapping The mapping which conflicts.
1297 * @param pPDSrc The page directory of the guest OS.
1298 * @param GCPtrOldMapping The address of the start of the current mapping.
1299 */
1300int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping)
1301{
1302 STAM_PROFILE_START(&pVM->pgm.s.StatR3ResolveConflict, a);
1303
1304 /*
1305 * Scan for free page directory entries.
1306 *
1307 * Note that we do not support mappings at the very end of the
1308 * address space since that will break our GCPtrEnd assumptions.
1309 */
1310 const unsigned cPTs = pMapping->cPTs;
1311 unsigned iPDNew = RT_ELEMENTS(pPDSrc->a) - cPTs; /* (+ 1 - 1) */
1312 while (iPDNew-- > 0)
1313 {
1314 if (pPDSrc->a[iPDNew].n.u1Present)
1315 continue;
1316 if (cPTs > 1)
1317 {
1318 bool fOk = true;
1319 for (unsigned i = 1; fOk && i < cPTs; i++)
1320 if (pPDSrc->a[iPDNew + i].n.u1Present)
1321 fOk = false;
1322 if (!fOk)
1323 continue;
1324 }
1325
1326 /*
1327 * Check that it's not conflicting with an intermediate page table mapping.
1328 */
1329 bool fOk = true;
1330 unsigned i = cPTs;
1331 while (fOk && i-- > 0)
1332 fOk = !pVM->pgm.s.pInterPD->a[iPDNew + i].n.u1Present;
1333 if (!fOk)
1334 continue;
1335 /** @todo AMD64 should check the PAE directories and skip the 32bit stuff. */
1336
1337 /*
1338 * Ask for the mapping.
1339 */
1340 RTGCPTR GCPtrNewMapping = iPDNew << X86_PD_SHIFT;
1341
1342 if (pMapping->pfnRelocate(pVM, GCPtrOldMapping, GCPtrNewMapping, PGMRELOCATECALL_SUGGEST, pMapping->pvUser))
1343 {
1344 pgmR3MapRelocate(pVM, pMapping, GCPtrOldMapping, GCPtrNewMapping);
1345 STAM_PROFILE_STOP(&pVM->pgm.s.StatR3ResolveConflict, a);
1346 return VINF_SUCCESS;
1347 }
1348 }
1349
1350 STAM_PROFILE_STOP(&pVM->pgm.s.StatR3ResolveConflict, a);
1351 AssertMsgFailed(("Failed to relocate page table mapping '%s' from %#x! (cPTs=%d)\n", pMapping->pszDesc, GCPtrOldMapping, cPTs));
1352 return VERR_PGM_NO_HYPERVISOR_ADDRESS;
1353}
1354
1355
1356/**
1357 * Resolves a conflict between a page table based GC mapping and
1358 * the Guest OS page tables. (PAE bits version)
1359 *
1360 * @returns VBox status code.
1361 * @param pVM VM Handle.
1362 * @param pMapping The mapping which conflicts.
1363 * @param GCPtrOldMapping The address of the start of the current mapping.
1364 */
1365int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping)
1366{
1367 STAM_PROFILE_START(&pVM->pgm.s.StatR3ResolveConflict, a);
1368
1369 for (int iPDPTE = X86_PG_PAE_PDPE_ENTRIES - 1; iPDPTE >= 0; iPDPTE--)
1370 {
1371 unsigned iPDSrc;
1372 PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, (RTGCPTR32)iPDPTE << X86_PDPT_SHIFT, &iPDSrc, NULL);
1373
1374#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1375 /* It would be annoying to have to deal with a PD that isn't (yet) present in the guest PDPT. */
1376 if (!pPDSrc)
1377 continue;
1378#endif
1379
1380 /*
1381 * Scan for free page directory entries.
1382 *
1383 * Note that we do not support mappings at the very end of the
1384 * address space since that will break our GCPtrEnd assumptions.
1385 * Nor do we support mappings crossing page directories.
1386 */
1387 const unsigned cPTs = pMapping->cb >> X86_PD_PAE_SHIFT;
1388 unsigned iPDNew = RT_ELEMENTS(pPDSrc->a) - cPTs; /* (+ 1 - 1) */
1389
1390 while (iPDNew-- > 0)
1391 {
1392 /* Ugly assumption that mappings start on a 4 MB boundary. */
1393 if (iPDNew & 1)
1394 continue;
1395
1396 if (pPDSrc)
1397 {
1398 if (pPDSrc->a[iPDNew].n.u1Present)
1399 continue;
1400 if (cPTs > 1)
1401 {
1402 bool fOk = true;
1403 for (unsigned i = 1; fOk && i < cPTs; i++)
1404 if (pPDSrc->a[iPDNew + i].n.u1Present)
1405 fOk = false;
1406 if (!fOk)
1407 continue;
1408 }
1409 }
1410 /*
1411 * Check that it's not conflicting with an intermediate page table mapping.
1412 */
1413 bool fOk = true;
1414 unsigned i = cPTs;
1415 while (fOk && i-- > 0)
1416 fOk = !pVM->pgm.s.apInterPaePDs[iPDPTE]->a[iPDNew + i].n.u1Present;
1417 if (!fOk)
1418 continue;
1419
1420 /*
1421 * Ask for the mapping.
1422 */
1423 RTGCPTR GCPtrNewMapping = ((RTGCPTR32)iPDPTE << X86_PDPT_SHIFT) + (iPDNew << X86_PD_PAE_SHIFT);
1424
1425 if (pMapping->pfnRelocate(pVM, GCPtrOldMapping, GCPtrNewMapping, PGMRELOCATECALL_SUGGEST, pMapping->pvUser))
1426 {
1427 pgmR3MapRelocate(pVM, pMapping, GCPtrOldMapping, GCPtrNewMapping);
1428 STAM_PROFILE_STOP(&pVM->pgm.s.StatR3ResolveConflict, a);
1429 return VINF_SUCCESS;
1430 }
1431 }
1432 }
1433 STAM_PROFILE_STOP(&pVM->pgm.s.StatR3ResolveConflict, a);
1434 AssertMsgFailed(("Failed to relocate page table mapping '%s' from %#x! (cPTs=%d)\n", pMapping->pszDesc, GCPtrOldMapping, pMapping->cb >> X86_PD_PAE_SHIFT));
1435 return VERR_PGM_NO_HYPERVISOR_ADDRESS;
1436}
1437
1438
1439/**
1440 * Checks guest PD for conflicts with VMM GC mappings.
1441 *
1442 * @returns true if conflict detected.
1443 * @returns false if not.
1444 * @param pVM The virtual machine.
1445 * @param cr3 Guest context CR3 register.
1446 * @param fRawR0 Whether RawR0 is enabled or not.
1447 */
1448VMMR3DECL(bool) PGMR3MapHasConflicts(PVM pVM, uint64_t cr3, bool fRawR0) /** @todo how many HasConflict constructs do we really need? */
1449{
1450 /*
1451 * Can skip this if mappings are safely fixed.
1452 */
1453 if (pVM->pgm.s.fMappingsFixed)
1454 return false;
1455
1456 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
1457 Assert(enmGuestMode <= PGMMODE_PAE_NX);
1458
1459 /*
1460 * Iterate mappings.
1461 */
1462 if (enmGuestMode == PGMMODE_32_BIT)
1463 {
1464 /*
1465 * Resolve the page directory.
1466 */
1467 PX86PD pPD = pVM->pgm.s.pGst32BitPdR3;
1468 Assert(pPD);
1469 Assert(pPD == (PX86PD)PGMPhysGCPhys2R3PtrAssert(pVM, cr3 & X86_CR3_PAGE_MASK, sizeof(*pPD)));
1470
1471 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1472 {
1473 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
1474 unsigned iPT = pCur->cPTs;
1475 while (iPT-- > 0)
1476 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
1477 && (fRawR0 || pPD->a[iPDE + iPT].n.u1User))
1478 {
1479 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
1480 Log(("PGMR3HasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
1481 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
1482 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
1483 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
1484 return true;
1485 }
1486 }
1487 }
1488 else if ( enmGuestMode == PGMMODE_PAE
1489 || enmGuestMode == PGMMODE_PAE_NX)
1490 {
1491 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1492 {
1493 RTGCPTR GCPtr = pCur->GCPtr;
1494
1495 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
1496 while (iPT-- > 0)
1497 {
1498 X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
1499
1500 if ( Pde.n.u1Present
1501 && (fRawR0 || Pde.n.u1User))
1502 {
1503 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
1504 Log(("PGMR3HasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
1505 " PDE=%016RX64.\n",
1506 GCPtr, pCur->pszDesc, Pde.u));
1507 return true;
1508 }
1509 GCPtr += (1 << X86_PD_PAE_SHIFT);
1510 }
1511 }
1512 }
1513 else
1514 AssertFailed();
1515
1516 return false;
1517}
1518
1519#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1520/**
1521 * Apply the hypervisor mappings to the active CR3.
1522 *
1523 * @returns VBox status.
1524 * @param pVM The virtual machine.
1525 */
1526VMMR3DECL(int) PGMR3MapActivate(PVM pVM)
1527{
1528 /*
1529 * Can skip this if mappings are safely fixed.
1530 */
1531 if (pVM->pgm.s.fMappingsFixed)
1532 return VINF_SUCCESS;
1533
1534 /*
1535 * Iterate mappings.
1536 */
1537 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1538 {
1539 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
1540
1541 pgmR3MapSetShadowPDEs(pVM, pCur, iPDE);
1542 }
1543
1544 return VINF_SUCCESS;
1545}
1546
1547/**
1548 * Remove the hypervisor mappings from the active CR3
1549 *
1550 * @returns VBox status.
1551 * @param pVM The virtual machine.
1552 */
1553VMMR3DECL(int) PGMR3MapDeactivate(PVM pVM)
1554{
1555 /*
1556 * Can skip this if mappings are safely fixed.
1557 */
1558 if (pVM->pgm.s.fMappingsFixed)
1559 return VINF_SUCCESS;
1560
1561 /*
1562 * Iterate mappings.
1563 */
1564 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1565 {
1566 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
1567
1568 pgmR3MapClearShadowPDEs(pVM, pCur, iPDE);
1569 }
1570 return VINF_SUCCESS;
1571}
1572#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
1573
1574/**
1575 * Read memory from the guest mappings.
1576 *
1577 * This will use the page tables associated with the mappings to
1578 * read the memory. This means that not all kind of memory is readable
1579 * since we don't necessarily know how to convert that physical address
1580 * to a HC virtual one.
1581 *
1582 * @returns VBox status.
1583 * @param pVM VM handle.
1584 * @param pvDst The destination address (HC of course).
1585 * @param GCPtrSrc The source address (GC virtual address).
1586 * @param cb Number of bytes to read.
1587 *
1588 * @remarks The is indirectly for DBGF only.
1589 * @todo Consider renaming it to indicate it's special usage, or just
1590 * reimplement it in MMR3HyperReadGCVirt.
1591 */
1592VMMR3DECL(int) PGMR3MapRead(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
1593{
1594 /*
1595 * Simplicity over speed... Chop the request up into chunks
1596 * which don't cross pages.
1597 */
1598 if (cb + (GCPtrSrc & PAGE_OFFSET_MASK) > PAGE_SIZE)
1599 {
1600 for (;;)
1601 {
1602 size_t cbRead = RT_MIN(cb, PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK));
1603 int rc = PGMR3MapRead(pVM, pvDst, GCPtrSrc, cbRead);
1604 if (RT_FAILURE(rc))
1605 return rc;
1606 cb -= cbRead;
1607 if (!cb)
1608 break;
1609 pvDst = (char *)pvDst + cbRead;
1610 GCPtrSrc += cbRead;
1611 }
1612 return VINF_SUCCESS;
1613 }
1614
1615 /*
1616 * Find the mapping.
1617 */
1618 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
1619 while (pCur)
1620 {
1621 RTGCPTR off = GCPtrSrc - pCur->GCPtr;
1622 if (off < pCur->cb)
1623 {
1624 if (off + cb > pCur->cb)
1625 {
1626 AssertMsgFailed(("Invalid page range %RGv LB%#x. mapping '%s' %RGv to %RGv\n",
1627 GCPtrSrc, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast));
1628 return VERR_INVALID_PARAMETER;
1629 }
1630
1631 unsigned iPT = off >> X86_PD_SHIFT;
1632 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
1633 while (cb > 0 && iPTE < RT_ELEMENTS(CTXALLSUFF(pCur->aPTs[iPT].pPT)->a))
1634 {
1635 if (!CTXALLSUFF(pCur->aPTs[iPT].paPaePTs)[iPTE / 512].a[iPTE % 512].n.u1Present)
1636 return VERR_PAGE_NOT_PRESENT;
1637 RTHCPHYS HCPhys = CTXALLSUFF(pCur->aPTs[iPT].paPaePTs)[iPTE / 512].a[iPTE % 512].u & X86_PTE_PAE_PG_MASK;
1638
1639 /*
1640 * Get the virtual page from the physical one.
1641 */
1642 void *pvPage;
1643 int rc = MMR3HCPhys2HCVirt(pVM, HCPhys, &pvPage);
1644 if (RT_FAILURE(rc))
1645 return rc;
1646
1647 memcpy(pvDst, (char *)pvPage + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
1648 return VINF_SUCCESS;
1649 }
1650 }
1651
1652 /* next */
1653 pCur = CTXALLSUFF(pCur->pNext);
1654 }
1655
1656 return VERR_INVALID_POINTER;
1657}
1658
1659
1660/**
1661 * Info callback for 'pgmhandlers'.
1662 *
1663 * @param pHlp The output helpers.
1664 * @param pszArgs The arguments. phys or virt.
1665 */
1666DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1667{
1668 pHlp->pfnPrintf(pHlp, pVM->pgm.s.fMappingsFixed
1669 ? "\nThe mappings are FIXED.\n"
1670 : "\nThe mappings are FLOATING.\n");
1671 PPGMMAPPING pCur;
1672 for (pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1673 pHlp->pfnPrintf(pHlp, "%RGv - %RGv %s\n", pCur->GCPtr, pCur->GCPtrLast, pCur->pszDesc);
1674}
1675
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette