VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMInline.h@ 30824

Last change on this file since 30824 was 30824, checked in by vboxsync, 15 years ago

Must also deal with zero cr3 translation ptrs in ring 0.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 52.7 KB
Line 
1/* $Id: PGMInline.h 30824 2010-07-14 12:25:12Z vboxsync $ */
2/** @file
3 * PGM - Inlined functions.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___PGMInline_h
19#define ___PGMInline_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/err.h>
24#include <VBox/stam.h>
25#include <VBox/param.h>
26#include <VBox/vmm.h>
27#include <VBox/mm.h>
28#include <VBox/pdmcritsect.h>
29#include <VBox/pdmapi.h>
30#include <VBox/dis.h>
31#include <VBox/dbgf.h>
32#include <VBox/log.h>
33#include <VBox/gmm.h>
34#include <VBox/hwaccm.h>
35#include <iprt/asm.h>
36#include <iprt/assert.h>
37#include <iprt/avl.h>
38#include <iprt/critsect.h>
39#include <iprt/sha.h>
40
41
42
43/** @addtogroup grp_pgm_int Internals
44 * @internal
45 * @{
46 */
47
48/** @todo Split out all the inline stuff into a separate file. Then we can
49 * include it later when VM and VMCPU are defined and so avoid all that
50 * &pVM->pgm.s and &pVCpu->pgm.s stuff. It also chops ~1600 lines off
51 * this file and will make it somewhat easier to navigate... */
52
53/**
54 * Gets the PGMRAMRANGE structure for a guest page.
55 *
56 * @returns Pointer to the RAM range on success.
57 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
58 *
59 * @param pPGM PGM handle.
60 * @param GCPhys The GC physical address.
61 */
62DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PPGM pPGM, RTGCPHYS GCPhys)
63{
64 /*
65 * Optimize for the first range.
66 */
67 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
68 RTGCPHYS off = GCPhys - pRam->GCPhys;
69 if (RT_UNLIKELY(off >= pRam->cb))
70 {
71 do
72 {
73 pRam = pRam->CTX_SUFF(pNext);
74 if (RT_UNLIKELY(!pRam))
75 break;
76 off = GCPhys - pRam->GCPhys;
77 } while (off >= pRam->cb);
78 }
79 return pRam;
80}
81
82
83/**
84 * Gets the PGMPAGE structure for a guest page.
85 *
86 * @returns Pointer to the page on success.
87 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
88 *
89 * @param pPGM PGM handle.
90 * @param GCPhys The GC physical address.
91 */
92DECLINLINE(PPGMPAGE) pgmPhysGetPage(PPGM pPGM, RTGCPHYS GCPhys)
93{
94 /*
95 * Optimize for the first range.
96 */
97 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
98 RTGCPHYS off = GCPhys - pRam->GCPhys;
99 if (RT_UNLIKELY(off >= pRam->cb))
100 {
101 do
102 {
103 pRam = pRam->CTX_SUFF(pNext);
104 if (RT_UNLIKELY(!pRam))
105 return NULL;
106 off = GCPhys - pRam->GCPhys;
107 } while (off >= pRam->cb);
108 }
109 return &pRam->aPages[off >> PAGE_SHIFT];
110}
111
112
113/**
114 * Gets the PGMPAGE structure for a guest page.
115 *
116 * Old Phys code: Will make sure the page is present.
117 *
118 * @returns VBox status code.
119 * @retval VINF_SUCCESS and a valid *ppPage on success.
120 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
121 *
122 * @param pPGM PGM handle.
123 * @param GCPhys The GC physical address.
124 * @param ppPage Where to store the page pointer on success.
125 */
126DECLINLINE(int) pgmPhysGetPageEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
127{
128 /*
129 * Optimize for the first range.
130 */
131 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
132 RTGCPHYS off = GCPhys - pRam->GCPhys;
133 if (RT_UNLIKELY(off >= pRam->cb))
134 {
135 do
136 {
137 pRam = pRam->CTX_SUFF(pNext);
138 if (RT_UNLIKELY(!pRam))
139 {
140 *ppPage = NULL; /* avoid incorrect and very annoying GCC warnings */
141 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
142 }
143 off = GCPhys - pRam->GCPhys;
144 } while (off >= pRam->cb);
145 }
146 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
147 return VINF_SUCCESS;
148}
149
150
151
152
153/**
154 * Gets the PGMPAGE structure for a guest page.
155 *
156 * Old Phys code: Will make sure the page is present.
157 *
158 * @returns VBox status code.
159 * @retval VINF_SUCCESS and a valid *ppPage on success.
160 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
161 *
162 * @param pPGM PGM handle.
163 * @param GCPhys The GC physical address.
164 * @param ppPage Where to store the page pointer on success.
165 * @param ppRamHint Where to read and store the ram list hint.
166 * The caller initializes this to NULL before the call.
167 */
168DECLINLINE(int) pgmPhysGetPageWithHintEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
169{
170 RTGCPHYS off;
171 PPGMRAMRANGE pRam = *ppRamHint;
172 if ( !pRam
173 || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
174 {
175 pRam = pPGM->CTX_SUFF(pRamRanges);
176 off = GCPhys - pRam->GCPhys;
177 if (RT_UNLIKELY(off >= pRam->cb))
178 {
179 do
180 {
181 pRam = pRam->CTX_SUFF(pNext);
182 if (RT_UNLIKELY(!pRam))
183 {
184 *ppPage = NULL; /* Kill the incorrect and extremely annoying GCC warnings. */
185 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
186 }
187 off = GCPhys - pRam->GCPhys;
188 } while (off >= pRam->cb);
189 }
190 *ppRamHint = pRam;
191 }
192 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
193 return VINF_SUCCESS;
194}
195
196
197/**
198 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
199 *
200 * @returns Pointer to the page on success.
201 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
202 *
203 * @param pPGM PGM handle.
204 * @param GCPhys The GC physical address.
205 * @param ppRam Where to store the pointer to the PGMRAMRANGE.
206 */
207DECLINLINE(PPGMPAGE) pgmPhysGetPageAndRange(PPGM pPGM, RTGCPHYS GCPhys, PPGMRAMRANGE *ppRam)
208{
209 /*
210 * Optimize for the first range.
211 */
212 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
213 RTGCPHYS off = GCPhys - pRam->GCPhys;
214 if (RT_UNLIKELY(off >= pRam->cb))
215 {
216 do
217 {
218 pRam = pRam->CTX_SUFF(pNext);
219 if (RT_UNLIKELY(!pRam))
220 return NULL;
221 off = GCPhys - pRam->GCPhys;
222 } while (off >= pRam->cb);
223 }
224 *ppRam = pRam;
225 return &pRam->aPages[off >> PAGE_SHIFT];
226}
227
228
229/**
230 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
231 *
232 * @returns Pointer to the page on success.
233 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
234 *
235 * @param pPGM PGM handle.
236 * @param GCPhys The GC physical address.
237 * @param ppPage Where to store the pointer to the PGMPAGE structure.
238 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
239 */
240DECLINLINE(int) pgmPhysGetPageAndRangeEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
241{
242 /*
243 * Optimize for the first range.
244 */
245 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
246 RTGCPHYS off = GCPhys - pRam->GCPhys;
247 if (RT_UNLIKELY(off >= pRam->cb))
248 {
249 do
250 {
251 pRam = pRam->CTX_SUFF(pNext);
252 if (RT_UNLIKELY(!pRam))
253 {
254 *ppRam = NULL; /* Shut up silly GCC warnings. */
255 *ppPage = NULL; /* ditto */
256 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
257 }
258 off = GCPhys - pRam->GCPhys;
259 } while (off >= pRam->cb);
260 }
261 *ppRam = pRam;
262 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
263 return VINF_SUCCESS;
264}
265
266
267/**
268 * Convert GC Phys to HC Phys.
269 *
270 * @returns VBox status.
271 * @param pPGM PGM handle.
272 * @param GCPhys The GC physical address.
273 * @param pHCPhys Where to store the corresponding HC physical address.
274 *
275 * @deprecated Doesn't deal with zero, shared or write monitored pages.
276 * Avoid when writing new code!
277 */
278DECLINLINE(int) pgmRamGCPhys2HCPhys(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
279{
280 PPGMPAGE pPage;
281 int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
282 if (RT_FAILURE(rc))
283 return rc;
284 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
285 return VINF_SUCCESS;
286}
287
288#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
289
290/**
291 * Inlined version of the ring-0 version of PGMDynMapHCPage that
292 * optimizes access to pages already in the set.
293 *
294 * @returns VINF_SUCCESS. Will bail out to ring-3 on failure.
295 * @param pPGM Pointer to the PVM instance data.
296 * @param HCPhys The physical address of the page.
297 * @param ppv Where to store the mapping address.
298 */
299DECLINLINE(int) pgmR0DynMapHCPageInlined(PPGM pPGM, RTHCPHYS HCPhys, void **ppv)
300{
301 PVM pVM = PGM2VM(pPGM);
302 PPGMCPU pPGMCPU = (PPGMCPU)((uint8_t *)VMMGetCpu(pVM) + pPGM->offVCpuPGM); /* very pretty ;-) */
303 PPGMMAPSET pSet = &pPGMCPU->AutoSet;
304
305 STAM_PROFILE_START(&pPGMCPU->StatR0DynMapHCPageInl, a);
306 Assert(!(HCPhys & PAGE_OFFSET_MASK));
307 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
308
309 unsigned iHash = PGMMAPSET_HASH(HCPhys);
310 unsigned iEntry = pSet->aiHashTable[iHash];
311 if ( iEntry < pSet->cEntries
312 && pSet->aEntries[iEntry].HCPhys == HCPhys)
313 {
314 *ppv = pSet->aEntries[iEntry].pvPage;
315 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapHCPageInlHits);
316 }
317 else
318 {
319 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapHCPageInlMisses);
320 pgmR0DynMapHCPageCommon(pVM, pSet, HCPhys, ppv);
321 }
322
323 STAM_PROFILE_STOP(&pPGMCPU->StatR0DynMapHCPageInl, a);
324 return VINF_SUCCESS;
325}
326
327
328/**
329 * Inlined version of the ring-0 version of PGMDynMapGCPage that optimizes
330 * access to pages already in the set.
331 *
332 * @returns See PGMDynMapGCPage.
333 * @param pPGM Pointer to the PVM instance data.
334 * @param GCPhys The guest physical address of the page.
335 * @param ppv Where to store the mapping address.
336 */
337DECLINLINE(int) pgmR0DynMapGCPageInlined(PPGM pPGM, RTGCPHYS GCPhys, void **ppv)
338{
339 PVM pVM = PGM2VM(pPGM);
340 PPGMCPU pPGMCPU = (PPGMCPU)((uint8_t *)VMMGetCpu(pVM) + pPGM->offVCpuPGM); /* very pretty ;-) */
341
342 STAM_PROFILE_START(&pPGMCPU->StatR0DynMapGCPageInl, a);
343 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
344
345 /*
346 * Get the ram range.
347 */
348 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
349 RTGCPHYS off = GCPhys - pRam->GCPhys;
350 if (RT_UNLIKELY(off >= pRam->cb
351 /** @todo || page state stuff */))
352 {
353 /* This case is not counted into StatR0DynMapGCPageInl. */
354 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamMisses);
355 return PGMDynMapGCPage(pVM, GCPhys, ppv);
356 }
357
358 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
359 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamHits);
360
361 /*
362 * pgmR0DynMapHCPageInlined with out stats.
363 */
364 PPGMMAPSET pSet = &pPGMCPU->AutoSet;
365 Assert(!(HCPhys & PAGE_OFFSET_MASK));
366 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
367
368 unsigned iHash = PGMMAPSET_HASH(HCPhys);
369 unsigned iEntry = pSet->aiHashTable[iHash];
370 if ( iEntry < pSet->cEntries
371 && pSet->aEntries[iEntry].HCPhys == HCPhys)
372 {
373 *ppv = pSet->aEntries[iEntry].pvPage;
374 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlHits);
375 }
376 else
377 {
378 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlMisses);
379 pgmR0DynMapHCPageCommon(pVM, pSet, HCPhys, ppv);
380 }
381
382 STAM_PROFILE_STOP(&pPGMCPU->StatR0DynMapGCPageInl, a);
383 return VINF_SUCCESS;
384}
385
386
387/**
388 * Inlined version of the ring-0 version of PGMDynMapGCPageOff that optimizes
389 * access to pages already in the set.
390 *
391 * @returns See PGMDynMapGCPage.
392 * @param pPGM Pointer to the PVM instance data.
393 * @param HCPhys The physical address of the page.
394 * @param ppv Where to store the mapping address.
395 */
396DECLINLINE(int) pgmR0DynMapGCPageOffInlined(PPGM pPGM, RTGCPHYS GCPhys, void **ppv)
397{
398 PVM pVM = PGM2VM(pPGM);
399 PPGMCPU pPGMCPU = (PPGMCPU)((uint8_t *)VMMGetCpu(pVM) + pPGM->offVCpuPGM); /* very pretty ;-) */
400
401 STAM_PROFILE_START(&pPGMCPU->StatR0DynMapGCPageInl, a);
402
403 /*
404 * Get the ram range.
405 */
406 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
407 RTGCPHYS off = GCPhys - pRam->GCPhys;
408 if (RT_UNLIKELY(off >= pRam->cb
409 /** @todo || page state stuff */))
410 {
411 /* This case is not counted into StatR0DynMapGCPageInl. */
412 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamMisses);
413 return PGMDynMapGCPageOff(pVM, GCPhys, ppv);
414 }
415
416 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
417 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamHits);
418
419 /*
420 * pgmR0DynMapHCPageInlined with out stats.
421 */
422 PPGMMAPSET pSet = &pPGMCPU->AutoSet;
423 Assert(!(HCPhys & PAGE_OFFSET_MASK));
424 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
425
426 unsigned iHash = PGMMAPSET_HASH(HCPhys);
427 unsigned iEntry = pSet->aiHashTable[iHash];
428 if ( iEntry < pSet->cEntries
429 && pSet->aEntries[iEntry].HCPhys == HCPhys)
430 {
431 *ppv = (void *)((uintptr_t)pSet->aEntries[iEntry].pvPage | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
432 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlHits);
433 }
434 else
435 {
436 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlMisses);
437 pgmR0DynMapHCPageCommon(pVM, pSet, HCPhys, ppv);
438 *ppv = (void *)((uintptr_t)*ppv | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
439 }
440
441 STAM_PROFILE_STOP(&pPGMCPU->StatR0DynMapGCPageInl, a);
442 return VINF_SUCCESS;
443}
444
445#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
446#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
447
448/**
449 * Maps the page into current context (RC and maybe R0).
450 *
451 * @returns pointer to the mapping.
452 * @param pVM Pointer to the PGM instance data.
453 * @param pPage The page.
454 */
455DECLINLINE(void *) pgmPoolMapPageInlined(PPGM pPGM, PPGMPOOLPAGE pPage)
456{
457 if (pPage->idx >= PGMPOOL_IDX_FIRST)
458 {
459 Assert(pPage->idx < pPGM->CTX_SUFF(pPool)->cCurPages);
460 void *pv;
461# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
462 pgmR0DynMapHCPageInlined(pPGM, pPage->Core.Key, &pv);
463# else
464 PGMDynMapHCPage(PGM2VM(pPGM), pPage->Core.Key, &pv);
465# endif
466 return pv;
467 }
468 AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx));
469}
470
471/**
472 * Temporarily maps one host page specified by HC physical address, returning
473 * pointer within the page.
474 *
475 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
476 * reused after 8 mappings (or perhaps a few more if you score with the cache).
477 *
478 * @returns The address corresponding to HCPhys.
479 * @param pPGM Pointer to the PVM instance data.
480 * @param HCPhys HC Physical address of the page.
481 */
482DECLINLINE(void *) pgmDynMapHCPageOff(PPGM pPGM, RTHCPHYS HCPhys)
483{
484 void *pv;
485# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
486 pgmR0DynMapHCPageInlined(pPGM, HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv);
487# else
488 PGMDynMapHCPage(PGM2VM(pPGM), HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv);
489# endif
490 pv = (void *)((uintptr_t)pv | ((uintptr_t)HCPhys & PAGE_OFFSET_MASK));
491 return pv;
492}
493
494#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 || IN_RC */
495#ifndef IN_RC
496
497/**
498 * Queries the Physical TLB entry for a physical guest page,
499 * attempting to load the TLB entry if necessary.
500 *
501 * @returns VBox status code.
502 * @retval VINF_SUCCESS on success
503 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
504 *
505 * @param pPGM The PGM instance handle.
506 * @param GCPhys The address of the guest page.
507 * @param ppTlbe Where to store the pointer to the TLB entry.
508 */
509DECLINLINE(int) pgmPhysPageQueryTlbe(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
510{
511 int rc;
512 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
513 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
514 {
515 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbHits));
516 rc = VINF_SUCCESS;
517 }
518 else
519 rc = pgmPhysPageLoadIntoTlb(pPGM, GCPhys);
520 *ppTlbe = pTlbe;
521 return rc;
522}
523
524
525/**
526 * Queries the Physical TLB entry for a physical guest page,
527 * attempting to load the TLB entry if necessary.
528 *
529 * @returns VBox status code.
530 * @retval VINF_SUCCESS on success
531 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
532 *
533 * @param pPGM The PGM instance handle.
534 * @param pPage Pointer to the PGMPAGE structure corresponding to
535 * GCPhys.
536 * @param GCPhys The address of the guest page.
537 * @param ppTlbe Where to store the pointer to the TLB entry.
538 */
539DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
540{
541 int rc;
542 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
543 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
544 {
545 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbHits));
546 rc = VINF_SUCCESS;
547 }
548 else
549 rc = pgmPhysPageLoadIntoTlbWithPage(pPGM, pPage, GCPhys);
550 *ppTlbe = pTlbe;
551 return rc;
552}
553
554#endif /* !IN_RC */
555
556/**
557 * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
558 * Takes PSE-36 into account.
559 *
560 * @returns guest physical address
561 * @param pPGM Pointer to the PGM instance data.
562 * @param Pde Guest Pde
563 */
564DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PPGM pPGM, X86PDE Pde)
565{
566 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
567 GCPhys |= (RTGCPHYS)Pde.b.u8PageNoHigh << 32;
568
569 return GCPhys & pPGM->GCPhys4MBPSEMask;
570}
571
572
573/**
574 * Gets the page directory entry for the specified address (32-bit paging).
575 *
576 * @returns The page directory entry in question.
577 * @param pPGM Pointer to the PGM instance data.
578 * @param GCPtr The address.
579 */
580DECLINLINE(X86PDE) pgmGstGet32bitPDE(PPGMCPU pPGM, RTGCPTR GCPtr)
581{
582#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
583 PCX86PD pGuestPD = NULL;
584 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD);
585 if (RT_FAILURE(rc))
586 {
587 X86PDE ZeroPde = {0};
588 AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPde);
589 }
590#else
591 PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
592 if (RT_UNLIKELY(!pGuestPD))
593 pGuestPD = pgmGstLazyMap32BitPD(pPGM);
594#endif
595 return pGuestPD->a[GCPtr >> X86_PD_SHIFT];
596}
597
598
599/**
600 * Gets the address of a specific page directory entry (32-bit paging).
601 *
602 * @returns Pointer the page directory entry in question.
603 * @param pPGM Pointer to the PGM instance data.
604 * @param GCPtr The address.
605 */
606DECLINLINE(PX86PDE) pgmGstGet32bitPDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
607{
608#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
609 PX86PD pGuestPD = NULL;
610 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD);
611 AssertRCReturn(rc, NULL);
612#else
613 PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
614 if (RT_UNLIKELY(!pGuestPD))
615 pGuestPD = pgmGstLazyMap32BitPD(pPGM);
616#endif
617 return &pGuestPD->a[GCPtr >> X86_PD_SHIFT];
618}
619
620
621/**
622 * Gets the address the guest page directory (32-bit paging).
623 *
624 * @returns Pointer the page directory entry in question.
625 * @param pPGM Pointer to the PGM instance data.
626 */
627DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PPGMCPU pPGM)
628{
629#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
630 PX86PD pGuestPD = NULL;
631 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD);
632 AssertRCReturn(rc, NULL);
633#else
634 PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
635 if (RT_UNLIKELY(!pGuestPD))
636 pGuestPD = pgmGstLazyMap32BitPD(pPGM);
637#endif
638 return pGuestPD;
639}
640
641
642/**
643 * Gets the guest page directory pointer table.
644 *
645 * @returns Pointer to the page directory in question.
646 * @returns NULL if the page directory is not present or on an invalid page.
647 * @param pPGM Pointer to the PGM instance data.
648 */
649DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PPGMCPU pPGM)
650{
651#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
652 PX86PDPT pGuestPDPT = NULL;
653 int rc = pgmR0DynMapGCPageOffInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPDPT);
654 AssertRCReturn(rc, NULL);
655#else
656 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
657 if (RT_UNLIKELY(!pGuestPDPT))
658 pGuestPDPT = pgmGstLazyMapPaePDPT(pPGM);
659#endif
660 return pGuestPDPT;
661}
662
663
664/**
665 * Gets the guest page directory pointer table entry for the specified address.
666 *
667 * @returns Pointer to the page directory in question.
668 * @returns NULL if the page directory is not present or on an invalid page.
669 * @param pPGM Pointer to the PGM instance data.
670 * @param GCPtr The address.
671 */
672DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
673{
674 AssertGCPtr32(GCPtr);
675
676#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
677 PX86PDPT pGuestPDPT = 0;
678 int rc = pgmR0DynMapGCPageOffInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPDPT);
679 AssertRCReturn(rc, 0);
680#else
681 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
682 if (RT_UNLIKELY(!pGuestPDPT))
683 pGuestPDPT = pgmGstLazyMapPaePDPT(pPGM);
684#endif
685 return &pGuestPDPT->a[(GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE];
686}
687
688
689/**
690 * Gets the page directory for the specified address.
691 *
692 * @returns Pointer to the page directory in question.
693 * @returns NULL if the page directory is not present or on an invalid page.
694 * @param pPGM Pointer to the PGM instance data.
695 * @param GCPtr The address.
696 */
697DECLINLINE(PX86PDPAE) pgmGstGetPaePD(PPGMCPU pPGM, RTGCPTR GCPtr)
698{
699 AssertGCPtr32(GCPtr);
700
701 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
702 AssertReturn(pGuestPDPT, NULL);
703 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
704 if (pGuestPDPT->a[iPdpt].n.u1Present)
705 {
706#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
707 PX86PDPAE pGuestPD = NULL;
708 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
709 AssertRCReturn(rc, NULL);
710#else
711 PX86PDPAE pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
712 if ( !pGuestPD
713 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
714 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
715#endif
716 return pGuestPD;
717 /* returning NULL is ok if we assume it's just an invalid page of some kind emulated as all 0s. (not quite true) */
718 }
719 return NULL;
720}
721
722
723/**
724 * Gets the page directory entry for the specified address.
725 *
726 * @returns Pointer to the page directory entry in question.
727 * @returns NULL if the page directory is not present or on an invalid page.
728 * @param pPGM Pointer to the PGM instance data.
729 * @param GCPtr The address.
730 */
731DECLINLINE(PX86PDEPAE) pgmGstGetPaePDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
732{
733 AssertGCPtr32(GCPtr);
734
735 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
736 AssertReturn(pGuestPDPT, NULL);
737 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
738 if (pGuestPDPT->a[iPdpt].n.u1Present)
739 {
740 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
741#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
742 PX86PDPAE pGuestPD = NULL;
743 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
744 AssertRCReturn(rc, NULL);
745#else
746 PX86PDPAE pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
747 if ( !pGuestPD
748 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
749 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
750#endif
751 return &pGuestPD->a[iPD];
752 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page or something which we'll emulate as all 0s. (not quite true) */
753 }
754 return NULL;
755}
756
757
758/**
759 * Gets the page directory entry for the specified address.
760 *
761 * @returns The page directory entry in question.
762 * @returns A non-present entry if the page directory is not present or on an invalid page.
763 * @param pPGM Pointer to the PGM instance data.
764 * @param GCPtr The address.
765 */
766DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PPGMCPU pPGM, RTGCPTR GCPtr)
767{
768 AssertGCPtr32(GCPtr);
769 X86PDEPAE ZeroPde = {0};
770 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
771 if (RT_LIKELY(pGuestPDPT))
772 {
773 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
774 if (pGuestPDPT->a[iPdpt].n.u1Present)
775 {
776 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
777#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
778 PX86PDPAE pGuestPD = NULL;
779 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
780 AssertRCReturn(rc, ZeroPde);
781#else
782 PX86PDPAE pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
783 if ( !pGuestPD
784 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
785 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
786#endif
787 return pGuestPD->a[iPD];
788 }
789 }
790 return ZeroPde;
791}
792
793
794/**
795 * Gets the page directory pointer table entry for the specified address
796 * and returns the index into the page directory
797 *
798 * @returns Pointer to the page directory in question.
799 * @returns NULL if the page directory is not present or on an invalid page.
800 * @param pPGM Pointer to the PGM instance data.
801 * @param GCPtr The address.
802 * @param piPD Receives the index into the returned page directory
803 * @param pPdpe Receives the page directory pointer entry. Optional.
804 */
805DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PPGMCPU pPGM, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
806{
807 AssertGCPtr32(GCPtr);
808
809 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
810 AssertReturn(pGuestPDPT, NULL);
811 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
812 if (pPdpe)
813 *pPdpe = pGuestPDPT->a[iPdpt];
814 if (pGuestPDPT->a[iPdpt].n.u1Present)
815 {
816 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
817#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
818 PX86PDPAE pGuestPD = NULL;
819 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
820 AssertRCReturn(rc, NULL);
821#else
822 PX86PDPAE pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
823 if ( !pGuestPD
824 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
825 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
826#endif
827 *piPD = iPD;
828 return pGuestPD;
829 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page of some kind emulated as all 0s. */
830 }
831 return NULL;
832}
833
834#ifndef IN_RC
835
836/**
837 * Gets the page map level-4 pointer for the guest.
838 *
839 * @returns Pointer to the PML4 page.
840 * @param pPGM Pointer to the PGM instance data.
841 */
842DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PPGMCPU pPGM)
843{
844#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
845 PX86PML4 pGuestPml4;
846 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPml4);
847 AssertRCReturn(rc, NULL);
848#else
849 PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
850 if (RT_UNLIKELY(!pGuestPml4))
851 pGuestPml4 = pgmGstLazyMapPml4(pPGM);
852 Assert(pGuestPml4);
853#endif
854 return pGuestPml4;
855}
856
857
858/**
859 * Gets the pointer to a page map level-4 entry.
860 *
861 * @returns Pointer to the PML4 entry.
862 * @param pPGM Pointer to the PGM instance data.
863 * @param iPml4 The index.
864 */
865DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PPGMCPU pPGM, unsigned int iPml4)
866{
867#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
868 PX86PML4 pGuestPml4;
869 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPml4);
870 AssertRCReturn(rc, NULL);
871#else
872 PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
873 if (RT_UNLIKELY(!pGuestPml4))
874 pGuestPml4 = pgmGstLazyMapPml4(pPGM);
875 Assert(pGuestPml4);
876#endif
877 return &pGuestPml4->a[iPml4];
878}
879
880
881/**
882 * Gets a page map level-4 entry.
883 *
884 * @returns The PML4 entry.
885 * @param pPGM Pointer to the PGM instance data.
886 * @param iPml4 The index.
887 */
888DECLINLINE(X86PML4E) pgmGstGetLongModePML4E(PPGMCPU pPGM, unsigned int iPml4)
889{
890#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
891 PX86PML4 pGuestPml4;
892 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPml4);
893 if (RT_FAILURE(rc))
894 {
895 X86PML4E ZeroPml4e = {0};
896 AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPml4e);
897 }
898#else
899 PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
900 if (!pGuestPml4)
901 pGuestPml4 = pgmGstLazyMapPml4(pPGM);
902 Assert(pGuestPml4);
903#endif
904 return pGuestPml4->a[iPml4];
905}
906
907
908/**
909 * Gets the page directory pointer entry for the specified address.
910 *
911 * @returns Pointer to the page directory pointer entry in question.
912 * @returns NULL if the page directory is not present or on an invalid page.
913 * @param pPGM Pointer to the PGM instance data.
914 * @param GCPtr The address.
915 * @param ppPml4e Page Map Level-4 Entry (out)
916 */
917DECLINLINE(PX86PDPE) pgmGstGetLongModePDPTPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e)
918{
919 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
920 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
921 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
922 if (pPml4e->n.u1Present)
923 {
924 PX86PDPT pPdpt;
925 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdpt);
926 AssertRCReturn(rc, NULL);
927
928 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
929 return &pPdpt->a[iPdpt];
930 }
931 return NULL;
932}
933
934
935/**
936 * Gets the page directory entry for the specified address.
937 *
938 * @returns The page directory entry in question.
939 * @returns A non-present entry if the page directory is not present or on an invalid page.
940 * @param pPGM Pointer to the PGM instance data.
941 * @param GCPtr The address.
942 * @param ppPml4e Page Map Level-4 Entry (out)
943 * @param pPdpe Page directory pointer table entry (out)
944 */
945DECLINLINE(X86PDEPAE) pgmGstGetLongModePDEEx(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe)
946{
947 X86PDEPAE ZeroPde = {0};
948 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
949 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
950 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
951 if (pPml4e->n.u1Present)
952 {
953 PCX86PDPT pPdptTemp;
954 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
955 AssertRCReturn(rc, ZeroPde);
956
957 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
958 *pPdpe = pPdptTemp->a[iPdpt];
959 if (pPdptTemp->a[iPdpt].n.u1Present)
960 {
961 PCX86PDPAE pPD;
962 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
963 AssertRCReturn(rc, ZeroPde);
964
965 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
966 return pPD->a[iPD];
967 }
968 }
969
970 return ZeroPde;
971}
972
973
974/**
975 * Gets the page directory entry for the specified address.
976 *
977 * @returns The page directory entry in question.
978 * @returns A non-present entry if the page directory is not present or on an invalid page.
979 * @param pPGM Pointer to the PGM instance data.
980 * @param GCPtr The address.
981 */
982DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PPGMCPU pPGM, RTGCPTR64 GCPtr)
983{
984 X86PDEPAE ZeroPde = {0};
985 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
986 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
987 if (pGuestPml4->a[iPml4].n.u1Present)
988 {
989 PCX86PDPT pPdptTemp;
990 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
991 AssertRCReturn(rc, ZeroPde);
992
993 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
994 if (pPdptTemp->a[iPdpt].n.u1Present)
995 {
996 PCX86PDPAE pPD;
997 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
998 AssertRCReturn(rc, ZeroPde);
999
1000 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1001 return pPD->a[iPD];
1002 }
1003 }
1004 return ZeroPde;
1005}
1006
1007
1008/**
1009 * Gets the page directory entry for the specified address.
1010 *
1011 * @returns Pointer to the page directory entry in question.
1012 * @returns NULL if the page directory is not present or on an invalid page.
1013 * @param pPGM Pointer to the PGM instance data.
1014 * @param GCPtr The address.
1015 */
1016DECLINLINE(PX86PDEPAE) pgmGstGetLongModePDEPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr)
1017{
1018 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
1019 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1020 if (pGuestPml4->a[iPml4].n.u1Present)
1021 {
1022 PCX86PDPT pPdptTemp;
1023 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
1024 AssertRCReturn(rc, NULL);
1025
1026 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1027 if (pPdptTemp->a[iPdpt].n.u1Present)
1028 {
1029 PX86PDPAE pPD;
1030 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
1031 AssertRCReturn(rc, NULL);
1032
1033 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1034 return &pPD->a[iPD];
1035 }
1036 }
1037 return NULL;
1038}
1039
1040
1041/**
1042 * Gets the GUEST page directory pointer for the specified address.
1043 *
1044 * @returns The page directory in question.
1045 * @returns NULL if the page directory is not present or on an invalid page.
1046 * @param pPGM Pointer to the PGM instance data.
1047 * @param GCPtr The address.
1048 * @param ppPml4e Page Map Level-4 Entry (out)
1049 * @param pPdpe Page directory pointer table entry (out)
1050 * @param piPD Receives the index into the returned page directory
1051 */
1052DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
1053{
1054 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
1055 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1056 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
1057 if (pPml4e->n.u1Present)
1058 {
1059 PCX86PDPT pPdptTemp;
1060 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
1061 AssertRCReturn(rc, NULL);
1062
1063 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1064 *pPdpe = pPdptTemp->a[iPdpt];
1065 if (pPdptTemp->a[iPdpt].n.u1Present)
1066 {
1067 PX86PDPAE pPD;
1068 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
1069 AssertRCReturn(rc, NULL);
1070
1071 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1072 return pPD;
1073 }
1074 }
1075 return 0;
1076}
1077
1078#endif /* !IN_RC */
1079
1080/**
1081 * Gets the shadow page directory, 32-bit.
1082 *
1083 * @returns Pointer to the shadow 32-bit PD.
1084 * @param pPGM Pointer to the PGM instance data.
1085 */
1086DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PPGMCPU pPGM)
1087{
1088 return (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1089}
1090
1091
1092/**
1093 * Gets the shadow page directory entry for the specified address, 32-bit.
1094 *
1095 * @returns Shadow 32-bit PDE.
1096 * @param pPGM Pointer to the PGM instance data.
1097 * @param GCPtr The address.
1098 */
1099DECLINLINE(X86PDE) pgmShwGet32BitPDE(PPGMCPU pPGM, RTGCPTR GCPtr)
1100{
1101 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
1102
1103 PX86PD pShwPde = pgmShwGet32BitPDPtr(pPGM);
1104 if (!pShwPde)
1105 {
1106 X86PDE ZeroPde = {0};
1107 return ZeroPde;
1108 }
1109 return pShwPde->a[iPd];
1110}
1111
1112
1113/**
1114 * Gets the pointer to the shadow page directory entry for the specified
1115 * address, 32-bit.
1116 *
1117 * @returns Pointer to the shadow 32-bit PDE.
1118 * @param pPGM Pointer to the PGM instance data.
1119 * @param GCPtr The address.
1120 */
1121DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
1122{
1123 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
1124
1125 PX86PD pPde = pgmShwGet32BitPDPtr(pPGM);
1126 AssertReturn(pPde, NULL);
1127 return &pPde->a[iPd];
1128}
1129
1130
1131/**
1132 * Gets the shadow page pointer table, PAE.
1133 *
1134 * @returns Pointer to the shadow PAE PDPT.
1135 * @param pPGM Pointer to the PGM instance data.
1136 */
1137DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PPGMCPU pPGM)
1138{
1139 return (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1140}
1141
1142
1143/**
1144 * Gets the shadow page directory for the specified address, PAE.
1145 *
1146 * @returns Pointer to the shadow PD.
1147 * @param pPGM Pointer to the PGM instance data.
1148 * @param GCPtr The address.
1149 */
1150DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
1151{
1152 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1153 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
1154
1155 if (!pPdpt->a[iPdpt].n.u1Present)
1156 return NULL;
1157
1158 /* Fetch the pgm pool shadow descriptor. */
1159 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
1160 AssertReturn(pShwPde, NULL);
1161
1162 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pShwPde);
1163}
1164
1165
1166/**
1167 * Gets the shadow page directory for the specified address, PAE.
1168 *
1169 * @returns Pointer to the shadow PD.
1170 * @param pPGM Pointer to the PGM instance data.
1171 * @param GCPtr The address.
1172 */
1173DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PPGMCPU pPGM, PX86PDPT pPdpt, RTGCPTR GCPtr)
1174{
1175 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1176
1177 if (!pPdpt->a[iPdpt].n.u1Present)
1178 return NULL;
1179
1180 /* Fetch the pgm pool shadow descriptor. */
1181 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
1182 AssertReturn(pShwPde, NULL);
1183
1184 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pShwPde);
1185}
1186
1187
1188/**
1189 * Gets the shadow page directory entry, PAE.
1190 *
1191 * @returns PDE.
1192 * @param pPGM Pointer to the PGM instance data.
1193 * @param GCPtr The address.
1194 */
1195DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PPGMCPU pPGM, RTGCPTR GCPtr)
1196{
1197 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1198
1199 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pPGM, GCPtr);
1200 if (!pShwPde)
1201 {
1202 X86PDEPAE ZeroPde = {0};
1203 return ZeroPde;
1204 }
1205 return pShwPde->a[iPd];
1206}
1207
1208
1209/**
1210 * Gets the pointer to the shadow page directory entry for an address, PAE.
1211 *
1212 * @returns Pointer to the PDE.
1213 * @param pPGM Pointer to the PGM instance data.
1214 * @param GCPtr The address.
1215 */
1216DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
1217{
1218 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1219
1220 PX86PDPAE pPde = pgmShwGetPaePDPtr(pPGM, GCPtr);
1221 AssertReturn(pPde, NULL);
1222 return &pPde->a[iPd];
1223}
1224
1225#ifndef IN_RC
1226
1227/**
1228 * Gets the shadow page map level-4 pointer.
1229 *
1230 * @returns Pointer to the shadow PML4.
1231 * @param pPGM Pointer to the PGM instance data.
1232 */
1233DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PPGMCPU pPGM)
1234{
1235 return (PX86PML4)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1236}
1237
1238
1239/**
1240 * Gets the shadow page map level-4 entry for the specified address.
1241 *
1242 * @returns The entry.
1243 * @param pPGM Pointer to the PGM instance data.
1244 * @param GCPtr The address.
1245 */
1246DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PPGMCPU pPGM, RTGCPTR GCPtr)
1247{
1248 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1249 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pPGM);
1250
1251 if (!pShwPml4)
1252 {
1253 X86PML4E ZeroPml4e = {0};
1254 return ZeroPml4e;
1255 }
1256 return pShwPml4->a[iPml4];
1257}
1258
1259
1260/**
1261 * Gets the pointer to the specified shadow page map level-4 entry.
1262 *
1263 * @returns The entry.
1264 * @param pPGM Pointer to the PGM instance data.
1265 * @param iPml4 The PML4 index.
1266 */
1267DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PPGMCPU pPGM, unsigned int iPml4)
1268{
1269 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pPGM);
1270 if (!pShwPml4)
1271 return NULL;
1272 return &pShwPml4->a[iPml4];
1273}
1274
1275
1276/**
1277 * Gets the GUEST page directory pointer for the specified address.
1278 *
1279 * @returns The page directory in question.
1280 * @returns NULL if the page directory is not present or on an invalid page.
1281 * @param pPGM Pointer to the PGM instance data.
1282 * @param GCPtr The address.
1283 * @param piPD Receives the index into the returned page directory
1284 */
1285DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, unsigned *piPD)
1286{
1287 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
1288 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1289 if (pGuestPml4->a[iPml4].n.u1Present)
1290 {
1291 PCX86PDPT pPdptTemp;
1292 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
1293 AssertRCReturn(rc, NULL);
1294
1295 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1296 if (pPdptTemp->a[iPdpt].n.u1Present)
1297 {
1298 PX86PDPAE pPD;
1299 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
1300 AssertRCReturn(rc, NULL);
1301
1302 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1303 return pPD;
1304 }
1305 }
1306 return NULL;
1307}
1308
1309#endif /* !IN_RC */
1310
1311/**
1312 * Gets the page state for a physical handler.
1313 *
1314 * @returns The physical handler page state.
1315 * @param pCur The physical handler in question.
1316 */
1317DECLINLINE(unsigned) pgmHandlerPhysicalCalcState(PPGMPHYSHANDLER pCur)
1318{
1319 switch (pCur->enmType)
1320 {
1321 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
1322 return PGM_PAGE_HNDL_PHYS_STATE_WRITE;
1323
1324 case PGMPHYSHANDLERTYPE_MMIO:
1325 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
1326 return PGM_PAGE_HNDL_PHYS_STATE_ALL;
1327
1328 default:
1329 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
1330 }
1331}
1332
1333
1334/**
1335 * Gets the page state for a virtual handler.
1336 *
1337 * @returns The virtual handler page state.
1338 * @param pCur The virtual handler in question.
1339 * @remarks This should never be used on a hypervisor access handler.
1340 */
1341DECLINLINE(unsigned) pgmHandlerVirtualCalcState(PPGMVIRTHANDLER pCur)
1342{
1343 switch (pCur->enmType)
1344 {
1345 case PGMVIRTHANDLERTYPE_WRITE:
1346 return PGM_PAGE_HNDL_VIRT_STATE_WRITE;
1347 case PGMVIRTHANDLERTYPE_ALL:
1348 return PGM_PAGE_HNDL_VIRT_STATE_ALL;
1349 default:
1350 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
1351 }
1352}
1353
1354
1355/**
1356 * Clears one physical page of a virtual handler
1357 *
1358 * @param pPGM Pointer to the PGM instance.
1359 * @param pCur Virtual handler structure
1360 * @param iPage Physical page index
1361 *
1362 * @remark Only used when PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL is being set, so no
1363 * need to care about other handlers in the same page.
1364 */
1365DECLINLINE(void) pgmHandlerVirtualClearPage(PPGM pPGM, PPGMVIRTHANDLER pCur, unsigned iPage)
1366{
1367 const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
1368
1369 /*
1370 * Remove the node from the tree (it's supposed to be in the tree if we get here!).
1371 */
1372#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1373 AssertReleaseMsg(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
1374 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1375 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
1376#endif
1377 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD)
1378 {
1379 /* We're the head of the alias chain. */
1380 PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);
1381#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1382 AssertReleaseMsg(pRemove != NULL,
1383 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1384 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
1385 AssertReleaseMsg(pRemove == pPhys2Virt,
1386 ("wanted: pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
1387 " got: pRemove=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1388 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias,
1389 pRemove, pRemove->Core.Key, pRemove->Core.KeyLast, pRemove->offVirtHandler, pRemove->offNextAlias));
1390#endif
1391 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
1392 {
1393 /* Insert the next list in the alias chain into the tree. */
1394 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1395#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1396 AssertReleaseMsg(pNext->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
1397 ("pNext=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1398 pNext, pNext->Core.Key, pNext->Core.KeyLast, pNext->offVirtHandler, pNext->offNextAlias));
1399#endif
1400 pNext->offNextAlias |= PGMPHYS2VIRTHANDLER_IS_HEAD;
1401 bool fRc = RTAvlroGCPhysInsert(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, &pNext->Core);
1402 AssertRelease(fRc);
1403 }
1404 }
1405 else
1406 {
1407 /* Locate the previous node in the alias chain. */
1408 PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
1409#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1410 AssertReleaseMsg(pPrev != pPhys2Virt,
1411 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
1412 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
1413#endif
1414 for (;;)
1415 {
1416 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPrev + (pPrev->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1417 if (pNext == pPhys2Virt)
1418 {
1419 /* unlink. */
1420 LogFlow(("pgmHandlerVirtualClearPage: removed %p:{.offNextAlias=%#RX32} from alias chain. prev %p:{.offNextAlias=%#RX32} [%RGp-%RGp]\n",
1421 pPhys2Virt, pPhys2Virt->offNextAlias, pPrev, pPrev->offNextAlias, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
1422 if (!(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
1423 pPrev->offNextAlias &= ~PGMPHYS2VIRTHANDLER_OFF_MASK;
1424 else
1425 {
1426 PPGMPHYS2VIRTHANDLER pNewNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1427 pPrev->offNextAlias = ((intptr_t)pNewNext - (intptr_t)pPrev)
1428 | (pPrev->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
1429 }
1430 break;
1431 }
1432
1433 /* next */
1434 if (pNext == pPrev)
1435 {
1436#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1437 AssertReleaseMsg(pNext != pPrev,
1438 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
1439 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
1440#endif
1441 break;
1442 }
1443 pPrev = pNext;
1444 }
1445 }
1446 Log2(("PHYS2VIRT: Removing %RGp-%RGp %#RX32 %s\n",
1447 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
1448 pPhys2Virt->offNextAlias = 0;
1449 pPhys2Virt->Core.KeyLast = NIL_RTGCPHYS; /* require reinsert */
1450
1451 /*
1452 * Clear the ram flags for this page.
1453 */
1454 PPGMPAGE pPage = pgmPhysGetPage(pPGM, pPhys2Virt->Core.Key);
1455 AssertReturnVoid(pPage);
1456 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, PGM_PAGE_HNDL_VIRT_STATE_NONE);
1457}
1458
1459
1460/**
1461 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
1462 *
1463 * @returns Pointer to the shadow page structure.
1464 * @param pPool The pool.
1465 * @param idx The pool page index.
1466 */
1467DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
1468{
1469 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
1470 return &pPool->aPages[idx];
1471}
1472
1473
1474/**
1475 * Clear references to guest physical memory.
1476 *
1477 * @param pPool The pool.
1478 * @param pPoolPage The pool page.
1479 * @param pPhysPage The physical guest page tracking structure.
1480 * @param iPte Shadow PTE index
1481 */
1482DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte)
1483{
1484 /*
1485 * Just deal with the simple case here.
1486 */
1487# ifdef LOG_ENABLED
1488 const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
1489# endif
1490 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
1491 if (cRefs == 1)
1492 {
1493 Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
1494 Assert(iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage));
1495 /* Invalidate the tracking data. */
1496 PGM_PAGE_SET_TRACKING(pPhysPage, 0);
1497 }
1498 else
1499 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage, iPte);
1500 Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
1501}
1502
1503
1504/**
1505 * Moves the page to the head of the age list.
1506 *
1507 * This is done when the cached page is used in one way or another.
1508 *
1509 * @param pPool The pool.
1510 * @param pPage The cached page.
1511 */
1512DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1513{
1514 PVM pVM = pPool->CTX_SUFF(pVM);
1515 pgmLock(pVM);
1516
1517 /*
1518 * Move to the head of the age list.
1519 */
1520 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
1521 {
1522 /* unlink */
1523 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
1524 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
1525 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
1526 else
1527 pPool->iAgeTail = pPage->iAgePrev;
1528
1529 /* insert at head */
1530 pPage->iAgePrev = NIL_PGMPOOL_IDX;
1531 pPage->iAgeNext = pPool->iAgeHead;
1532 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
1533 pPool->iAgeHead = pPage->idx;
1534 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
1535 }
1536 pgmUnlock(pVM);
1537}
1538
1539/**
1540 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
1541 *
1542 * @param pVM VM Handle.
1543 * @param pPage PGM pool page
1544 */
1545DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1546{
1547 Assert(PGMIsLockOwner(pPool->CTX_SUFF(pVM)));
1548 ASMAtomicIncU32(&pPage->cLocked);
1549}
1550
1551
1552/**
1553 * Unlocks a page to allow flushing again
1554 *
1555 * @param pVM VM Handle.
1556 * @param pPage PGM pool page
1557 */
1558DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1559{
1560 Assert(PGMIsLockOwner(pPool->CTX_SUFF(pVM)));
1561 Assert(pPage->cLocked);
1562 ASMAtomicDecU32(&pPage->cLocked);
1563}
1564
1565
1566/**
1567 * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
1568 *
1569 * @returns VBox status code.
1570 * @param pPage PGM pool page
1571 */
1572DECLINLINE(bool) pgmPoolIsPageLocked(PPGM pPGM, PPGMPOOLPAGE pPage)
1573{
1574 if (pPage->cLocked)
1575 {
1576 LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
1577 if (pPage->cModifications)
1578 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
1579 return true;
1580 }
1581 return false;
1582}
1583
1584
1585/**
1586 * Tells if mappings are to be put into the shadow page table or not.
1587 *
1588 * @returns boolean result
1589 * @param pVM VM handle.
1590 */
1591DECL_FORCE_INLINE(bool) pgmMapAreMappingsEnabled(PPGM pPGM)
1592{
1593#ifdef PGM_WITHOUT_MAPPINGS
1594 /* There are no mappings in VT-x and AMD-V mode. */
1595 Assert(pPGM->fMappingsDisabled);
1596 return false;
1597#else
1598 return !pPGM->fMappingsDisabled;
1599#endif
1600}
1601
1602
1603/**
1604 * Checks if the mappings are floating and enabled.
1605 *
1606 * @returns true / false.
1607 * @param pVM The VM handle.
1608 */
1609DECL_FORCE_INLINE(bool) pgmMapAreMappingsFloating(PPGM pPGM)
1610{
1611#ifdef PGM_WITHOUT_MAPPINGS
1612 /* There are no mappings in VT-x and AMD-V mode. */
1613 Assert(pPGM->fMappingsDisabled);
1614 return false;
1615#else
1616 return !pPGM->fMappingsDisabled
1617 && !pPGM->fMappingsFixed;
1618#endif
1619}
1620
1621/** @} */
1622
1623#endif
1624
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette