VirtualBox

source: vbox/trunk/src/VBox/VMM/include/PGMInline.h

Last change on this file was 107171, checked in by vboxsync, 7 weeks ago

VMM/PGM: Introducing VBOX_WITH_ONLY_PGM_NEM_MODE to disable lots unused code on *.arm64 and darwin. jiraref:VBP-1466

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 42.6 KB
Line 
1/* $Id: PGMInline.h 107171 2024-11-28 10:38:10Z vboxsync $ */
2/** @file
3 * PGM - Inlined functions.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_PGMInline_h
29#define VMM_INCLUDED_SRC_include_PGMInline_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/cdefs.h>
35#include <VBox/types.h>
36#include <VBox/err.h>
37#include <VBox/vmm/stam.h>
38#include <VBox/param.h>
39#include <VBox/vmm/vmm.h>
40#include <VBox/vmm/mm.h>
41#include <VBox/vmm/pdmcritsect.h>
42#include <VBox/vmm/pdmapi.h>
43#include <VBox/dis.h>
44#include <VBox/vmm/dbgf.h>
45#include <VBox/log.h>
46#include <VBox/vmm/gmm.h>
47#include <VBox/vmm/hm.h>
48#include <VBox/vmm/nem.h>
49#include <iprt/asm.h>
50#include <iprt/assert.h>
51#include <iprt/avl.h>
52#include <iprt/critsect.h>
53#include <iprt/sha.h>
54
55
56
57/** @addtogroup grp_pgm_int Internals
58 * @internal
59 * @{
60 */
61
62/**
63 * Gets the PGMRAMRANGE structure for a guest page.
64 *
65 * @returns Pointer to the RAM range on success.
66 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
67 *
68 * @param pVM The cross context VM structure.
69 * @param GCPhys The GC physical address.
70 */
71DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PVMCC pVM, RTGCPHYS GCPhys)
72{
73 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
74 if (pRam)
75 {
76 RTGCPHYS const GCPhysFirst = pRam->GCPhys;
77 if (GCPhys - GCPhysFirst < pRam->cb && GCPhys >= GCPhysFirst)
78 {
79 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
80 return pRam;
81 }
82 }
83 return pgmPhysGetRangeSlow(pVM, GCPhys);
84}
85
86
87/**
88 * Gets the PGMRAMRANGE structure for a guest page, if unassigned get the ram
89 * range above it.
90 *
91 * @returns Pointer to the RAM range on success.
92 * @returns NULL if the address is located after the last range.
93 *
94 * @param pVM The cross context VM structure.
95 * @param GCPhys The GC physical address.
96 */
97DECLINLINE(PPGMRAMRANGE) pgmPhysGetRangeAtOrAbove(PVMCC pVM, RTGCPHYS GCPhys)
98{
99 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
100 if (pRam)
101 {
102 RTGCPHYS const GCPhysFirst = pRam->GCPhys;
103 if (GCPhys - GCPhysFirst < pRam->cb && GCPhys >= GCPhysFirst)
104 {
105 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
106 return pRam;
107 }
108 }
109 return pgmPhysGetRangeAtOrAboveSlow(pVM, GCPhys);
110}
111
112
113/**
114 * Gets the PGMPAGE structure for a guest page.
115 *
116 * @returns Pointer to the page on success.
117 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
118 *
119 * @param pVM The cross context VM structure.
120 * @param GCPhys The GC physical address.
121 */
122DECLINLINE(PPGMPAGE) pgmPhysGetPage(PVMCC pVM, RTGCPHYS GCPhys)
123{
124 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
125 if (pRam)
126 {
127 RTGCPHYS const GCPhysFirst = pRam->GCPhys;
128 RTGCPHYS const off = GCPhys - GCPhysFirst;
129 if (off < pRam->cb && GCPhys >= GCPhysFirst)
130 {
131 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
132 return &pRam->aPages[off >> GUEST_PAGE_SHIFT];
133 }
134 }
135 return pgmPhysGetPageSlow(pVM, GCPhys);
136}
137
138
139/**
140 * Gets the PGMPAGE structure for a guest page.
141 *
142 * Old Phys code: Will make sure the page is present.
143 *
144 * @returns VBox status code.
145 * @retval VINF_SUCCESS and a valid *ppPage on success.
146 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
147 *
148 * @param pVM The cross context VM structure.
149 * @param GCPhys The GC physical address.
150 * @param ppPage Where to store the page pointer on success.
151 */
152DECLINLINE(int) pgmPhysGetPageEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
153{
154 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
155 if (pRam)
156 {
157 RTGCPHYS const GCPhysFirst = pRam->GCPhys;
158 RTGCPHYS const off = GCPhys - GCPhysFirst;
159 if (off < pRam->cb && GCPhys >= GCPhysFirst)
160 {
161 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
162 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
163 return VINF_SUCCESS;
164 }
165 }
166 return pgmPhysGetPageExSlow(pVM, GCPhys, ppPage);
167}
168
169
170/**
171 * Gets the PGMPAGE structure for a guest page.
172 *
173 * Old Phys code: Will make sure the page is present.
174 *
175 * @returns VBox status code.
176 * @retval VINF_SUCCESS and a valid *ppPage on success.
177 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
178 *
179 * @param pVM The cross context VM structure.
180 * @param GCPhys The GC physical address.
181 * @param ppPage Where to store the page pointer on success.
182 * @param ppRamHint Where to read and store the ram list hint.
183 * The caller initializes this to NULL before the call.
184 */
185DECLINLINE(int) pgmPhysGetPageWithHintEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
186{
187 PPGMRAMRANGE pRam = *ppRamHint;
188 RTGCPHYS GCPhysFirst;
189 RTGCPHYS off;
190 if ( !pRam
191 || RT_UNLIKELY( (off = GCPhys - (GCPhysFirst = pRam->GCPhys)) >= pRam->cb
192 || GCPhys < GCPhysFirst) )
193 {
194 pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
195 if ( !pRam
196 || (off = GCPhys - (GCPhysFirst = pRam->GCPhys)) >= pRam->cb
197 || GCPhys < GCPhysFirst)
198 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRamHint);
199
200 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
201 *ppRamHint = pRam;
202 }
203 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
204 return VINF_SUCCESS;
205}
206
207
208/**
209 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
210 *
211 * @returns Pointer to the page on success.
212 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
213 *
214 * @param pVM The cross context VM structure.
215 * @param GCPhys The GC physical address.
216 * @param ppPage Where to store the pointer to the PGMPAGE structure.
217 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
218 */
219DECLINLINE(int) pgmPhysGetPageAndRangeEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
220{
221 PPGMRAMRANGE pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
222 if (pRam)
223 {
224 RTGCPHYS const GCPhysFirst = pRam->GCPhys;
225 RTGCPHYS const off = GCPhys - GCPhysFirst;
226 if (off < pRam->cb && GCPhys >= GCPhysFirst)
227 {
228 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
229 *ppRam = pRam;
230 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
231 return VINF_SUCCESS;
232 }
233 }
234 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRam);
235}
236
237
238/**
239 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
240 *
241 * @returns Pointer to the page on success.
242 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
243 *
244 * @param pVM The cross context VM structure.
245 * @param pVCpu The cross context virtual CPU structure.
246 * @param GCPhys The GC physical address.
247 * @param ppPage Where to store the pointer to the PGMPAGE structure.
248 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
249 */
250DECLINLINE(int) pgmPhysGetPageAndRangeExLockless(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys,
251 PGMPAGE volatile **ppPage, PGMRAMRANGE volatile **ppRam)
252{
253 PGMRAMRANGE volatile * const pRam = pVCpu->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
254 if (RT_LIKELY(pRam))
255 {
256 RTGCPHYS const GCPhysFirst = pRam->GCPhys;
257 RTGCPHYS const off = GCPhys - GCPhysFirst;
258 if (RT_LIKELY( off < pRam->cb
259 && GCPhys >= GCPhysFirst))
260 {
261 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
262 *ppRam = pRam;
263 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
264 return VINF_SUCCESS;
265 }
266 }
267 return pgmPhysGetPageAndRangeExSlowLockless(pVM, pVCpu, GCPhys, ppPage, ppRam);
268}
269
270
271/**
272 * Convert GC Phys to HC Phys.
273 *
274 * @returns VBox status code.
275 * @param pVM The cross context VM structure.
276 * @param GCPhys The GC physical address.
277 * @param pHCPhys Where to store the corresponding HC physical address.
278 *
279 * @deprecated Doesn't deal with zero, shared or write monitored pages.
280 * Avoid when writing new code!
281 */
282DECLINLINE(int) pgmRamGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
283{
284 PPGMPAGE pPage;
285 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
286 if (RT_FAILURE(rc))
287 return rc;
288 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & GUEST_PAGE_OFFSET_MASK);
289 return VINF_SUCCESS;
290}
291
292
293/**
294 * Queries the Physical TLB entry for a physical guest page,
295 * attempting to load the TLB entry if necessary.
296 *
297 * @returns VBox status code.
298 * @retval VINF_SUCCESS on success
299 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
300 *
301 * @param pVM The cross context VM structure.
302 * @param GCPhys The address of the guest page.
303 * @param ppTlbe Where to store the pointer to the TLB entry.
304 */
305DECLINLINE(int) pgmPhysPageQueryTlbe(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
306{
307 int rc;
308 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
309 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
310 {
311 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbHits));
312 rc = VINF_SUCCESS;
313 }
314 else
315 rc = pgmPhysPageLoadIntoTlb(pVM, GCPhys);
316 *ppTlbe = pTlbe;
317 return rc;
318}
319
320
321/**
322 * Queries the Physical TLB entry for a physical guest page,
323 * attempting to load the TLB entry if necessary.
324 *
325 * @returns VBox status code.
326 * @retval VINF_SUCCESS on success
327 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
328 *
329 * @param pVM The cross context VM structure.
330 * @param pPage Pointer to the PGMPAGE structure corresponding to
331 * GCPhys.
332 * @param GCPhys The address of the guest page.
333 * @param ppTlbe Where to store the pointer to the TLB entry.
334 */
335DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
336{
337 int rc;
338 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
339 if (pTlbe->GCPhys == (GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK))
340 {
341 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbHits));
342 rc = VINF_SUCCESS;
343 AssertPtr(pTlbe->pv);
344#ifdef IN_RING3
345 Assert(!pTlbe->pMap || RT_VALID_PTR(pTlbe->pMap->pv));
346#endif
347 }
348 else
349 rc = pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
350 *ppTlbe = pTlbe;
351 return rc;
352}
353
354
355#ifdef IN_RING3 /** @todo Need ensure a ring-0 version gets invalidated safely */
356/**
357 * Queries the VCPU local physical TLB entry for a physical guest page,
358 * attempting to load the TLB entry if necessary.
359 *
360 * Will acquire the PGM lock on TLB miss, does not require caller to own it.
361 *
362 * @returns VBox status code.
363 * @retval VINF_SUCCESS on success
364 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
365 *
366 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
367 * @param pPage Pointer to the PGMPAGE structure corresponding to GCPhys.
368 * @param GCPhys The address of the guest page.
369 * @param ppTlbe Where to store the pointer to the TLB entry.
370 * @thread EMT(pVCpu)
371 */
372DECLINLINE(int) pgmPhysPageQueryLocklessTlbeWithPage(PVMCPUCC pVCpu, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
373{
374 int rc;
375 PPGMPAGEMAPTLBE const pTlbe = &pVCpu->pgm.s.PhysTlb.aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
376 if ( pTlbe->GCPhys == (GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK)
377 && pTlbe->pPage == pPage)
378 {
379 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbHits));
380 rc = VINF_SUCCESS;
381 AssertPtr(pTlbe->pv);
382# ifdef IN_RING3
383 Assert(!pTlbe->pMap || RT_VALID_PTR(pTlbe->pMap->pv));
384# endif
385 }
386 else
387 rc = pgmPhysPageLoadIntoLocklessTlbWithPage(pVCpu, pPage, GCPhys);
388 *ppTlbe = pTlbe;
389 return rc;
390}
391#endif /* IN_RING3 */
392
393
394/**
395 * Calculates NEM page protection flags.
396 */
397DECL_FORCE_INLINE(uint32_t) pgmPhysPageCalcNemProtection(PPGMPAGE pPage, PGMPAGETYPE enmType)
398{
399 /*
400 * Deal with potentially writable pages first.
401 */
402 if (PGMPAGETYPE_IS_RWX(enmType))
403 {
404 if (!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
405 {
406 if (PGM_PAGE_IS_ALLOCATED(pPage))
407 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE;
408 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
409 }
410 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
411 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
412 }
413 /*
414 * Potentially readable & executable pages.
415 */
416 else if ( PGMPAGETYPE_IS_ROX(enmType)
417 && !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
418 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
419
420 /*
421 * The rest is needs special access handling.
422 */
423 return NEM_PAGE_PROT_NONE;
424}
425
426
427/**
428 * Enables write monitoring for an allocated page.
429 *
430 * The caller is responsible for updating the shadow page tables.
431 *
432 * @param pVM The cross context VM structure.
433 * @param pPage The page to write monitor.
434 * @param GCPhysPage The address of the page.
435 */
436DECLINLINE(void) pgmPhysPageWriteMonitor(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
437{
438 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
439 PGM_LOCK_ASSERT_OWNER(pVM);
440
441 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_WRITE_MONITORED);
442 pVM->pgm.s.cMonitoredPages++;
443
444 /* Large pages must disabled. */
445 if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
446 {
447 PPGMPAGE pFirstPage = pgmPhysGetPage(pVM, GCPhysPage & X86_PDE2M_PAE_PG_MASK);
448 AssertFatal(pFirstPage);
449 if (PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE)
450 {
451 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
452 pVM->pgm.s.cLargePagesDisabled++;
453 }
454 else
455 Assert(PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
456 }
457
458#ifdef VBOX_WITH_NATIVE_NEM
459 /* Tell NEM. */
460 if (VM_IS_NEM_ENABLED(pVM))
461 {
462 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
463 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
464 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhysPage);
465 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
466 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage) : NULL,
467 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
468 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
469 }
470#endif
471}
472
473#ifndef VBOX_VMM_TARGET_ARMV8
474
475/**
476 * Checks if the no-execute (NX) feature is active (EFER.NXE=1).
477 *
478 * Only used when the guest is in PAE or long mode. This is inlined so that we
479 * can perform consistency checks in debug builds.
480 *
481 * @returns true if it is, false if it isn't.
482 * @param pVCpu The cross context virtual CPU structure.
483 */
484DECL_FORCE_INLINE(bool) pgmGstIsNoExecuteActive(PVMCPUCC pVCpu)
485{
486 Assert(pVCpu->pgm.s.fNoExecuteEnabled == CPUMIsGuestNXEnabled(pVCpu));
487 Assert(CPUMIsGuestInPAEMode(pVCpu) || CPUMIsGuestInLongMode(pVCpu));
488 return pVCpu->pgm.s.fNoExecuteEnabled;
489}
490
491
492/**
493 * Checks if the page size extension (PSE) is currently enabled (CR4.PSE=1).
494 *
495 * Only used when the guest is in paged 32-bit mode. This is inlined so that
496 * we can perform consistency checks in debug builds.
497 *
498 * @returns true if it is, false if it isn't.
499 * @param pVCpu The cross context virtual CPU structure.
500 */
501DECL_FORCE_INLINE(bool) pgmGst32BitIsPageSizeExtActive(PVMCPUCC pVCpu)
502{
503 Assert(pVCpu->pgm.s.fGst32BitPageSizeExtension == CPUMIsGuestPageSizeExtEnabled(pVCpu));
504 Assert(!CPUMIsGuestInPAEMode(pVCpu));
505 Assert(!CPUMIsGuestInLongMode(pVCpu));
506 return pVCpu->pgm.s.fGst32BitPageSizeExtension;
507}
508
509
510/**
511 * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
512 * Takes PSE-36 into account.
513 *
514 * @returns guest physical address
515 * @param pVM The cross context VM structure.
516 * @param Pde Guest Pde
517 */
518DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PVMCC pVM, X86PDE Pde)
519{
520 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
521 GCPhys |= (RTGCPHYS)(Pde.u & X86_PDE4M_PG_HIGH_MASK) << X86_PDE4M_PG_HIGH_SHIFT;
522
523 return GCPhys & pVM->pgm.s.GCPhys4MBPSEMask;
524}
525
526
527/**
528 * Gets the address the guest page directory (32-bit paging).
529 *
530 * @returns VBox status code.
531 * @param pVCpu The cross context virtual CPU structure.
532 * @param ppPd Where to return the mapping. This is always set.
533 */
534DECLINLINE(int) pgmGstGet32bitPDPtrEx(PVMCPUCC pVCpu, PX86PD *ppPd)
535{
536 *ppPd = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
537 if (RT_UNLIKELY(!*ppPd))
538 return pgmGstLazyMap32BitPD(pVCpu, ppPd);
539 return VINF_SUCCESS;
540}
541
542
543/**
544 * Gets the address the guest page directory (32-bit paging).
545 *
546 * @returns Pointer to the page directory entry in question.
547 * @param pVCpu The cross context virtual CPU structure.
548 */
549DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PVMCPUCC pVCpu)
550{
551 PX86PD pGuestPD = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
552 if (RT_UNLIKELY(!pGuestPD))
553 {
554 int rc = pgmGstLazyMap32BitPD(pVCpu, &pGuestPD);
555 if (RT_FAILURE(rc))
556 return NULL;
557 }
558 return pGuestPD;
559}
560
561
562/**
563 * Gets the guest page directory pointer table.
564 *
565 * @returns VBox status code.
566 * @param pVCpu The cross context virtual CPU structure.
567 * @param ppPdpt Where to return the mapping. This is always set.
568 */
569DECLINLINE(int) pgmGstGetPaePDPTPtrEx(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
570{
571 *ppPdpt = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
572 if (RT_UNLIKELY(!*ppPdpt))
573 return pgmGstLazyMapPaePDPT(pVCpu, ppPdpt);
574 return VINF_SUCCESS;
575}
576
577
578/**
579 * Gets the guest page directory pointer table.
580 *
581 * @returns Pointer to the page directory in question.
582 * @returns NULL if the page directory is not present or on an invalid page.
583 * @param pVCpu The cross context virtual CPU structure.
584 */
585DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PVMCPUCC pVCpu)
586{
587 PX86PDPT pGuestPdpt;
588 int rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGuestPdpt);
589 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
590 return pGuestPdpt;
591}
592
593
594/**
595 * Gets the guest page directory pointer table entry for the specified address.
596 *
597 * @returns Pointer to the page directory in question.
598 * @returns NULL if the page directory is not present or on an invalid page.
599 * @param pVCpu The cross context virtual CPU structure.
600 * @param GCPtr The address.
601 */
602DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
603{
604 AssertGCPtr32(GCPtr);
605
606 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
607 if (RT_UNLIKELY(!pGuestPDPT))
608 {
609 int rc = pgmGstLazyMapPaePDPT(pVCpu, &pGuestPDPT);
610 if (RT_FAILURE(rc))
611 return NULL;
612 }
613 return &pGuestPDPT->a[(uint32_t)GCPtr >> X86_PDPT_SHIFT];
614}
615
616
617/**
618 * Gets the page directory entry for the specified address.
619 *
620 * @returns The page directory entry in question.
621 * @returns A non-present entry if the page directory is not present or on an invalid page.
622 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
623 * @param GCPtr The address.
624 */
625DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
626{
627 AssertGCPtr32(GCPtr);
628 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
629 if (RT_LIKELY(pGuestPDPT))
630 {
631 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
632 if ((pGuestPDPT->a[iPdpt].u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
633 {
634 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
635 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
636 if ( !pGuestPD
637 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
638 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
639 if (pGuestPD)
640 return pGuestPD->a[iPD];
641 }
642 }
643
644 X86PDEPAE ZeroPde = {0};
645 return ZeroPde;
646}
647
648
649/**
650 * Gets the page directory pointer table entry for the specified address
651 * and returns the index into the page directory
652 *
653 * @returns Pointer to the page directory in question.
654 * @returns NULL if the page directory is not present or on an invalid page.
655 * @param pVCpu The cross context virtual CPU structure.
656 * @param GCPtr The address.
657 * @param piPD Receives the index into the returned page directory
658 * @param pPdpe Receives the page directory pointer entry. Optional.
659 */
660DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
661{
662 AssertGCPtr32(GCPtr);
663
664 /* The PDPE. */
665 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
666 if (pGuestPDPT)
667 {
668 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
669 X86PGPAEUINT const uPdpe = pGuestPDPT->a[iPdpt].u;
670 if (pPdpe)
671 pPdpe->u = uPdpe;
672 if ((uPdpe & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
673 {
674
675 /* The PDE. */
676 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
677 if ( !pGuestPD
678 || (uPdpe & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
679 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
680 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
681 return pGuestPD;
682 }
683 }
684 return NULL;
685}
686
687
688/**
689 * Gets the page map level-4 pointer for the guest.
690 *
691 * @returns VBox status code.
692 * @param pVCpu The cross context virtual CPU structure.
693 * @param ppPml4 Where to return the mapping. Always set.
694 */
695DECLINLINE(int) pgmGstGetLongModePML4PtrEx(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
696{
697 *ppPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
698 if (RT_UNLIKELY(!*ppPml4))
699 return pgmGstLazyMapPml4(pVCpu, ppPml4);
700 return VINF_SUCCESS;
701}
702
703
704/**
705 * Gets the page map level-4 pointer for the guest.
706 *
707 * @returns Pointer to the PML4 page.
708 * @param pVCpu The cross context virtual CPU structure.
709 */
710DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PVMCPUCC pVCpu)
711{
712 PX86PML4 pGuestPml4;
713 int rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGuestPml4);
714 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
715 return pGuestPml4;
716}
717
718
719/**
720 * Gets the pointer to a page map level-4 entry.
721 *
722 * @returns Pointer to the PML4 entry.
723 * @param pVCpu The cross context virtual CPU structure.
724 * @param iPml4 The index.
725 * @remarks Only used by AssertCR3.
726 */
727DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
728{
729 PX86PML4 pGuestPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
730 if (pGuestPml4)
731 { /* likely */ }
732 else
733 {
734 int rc = pgmGstLazyMapPml4(pVCpu, &pGuestPml4);
735 AssertRCReturn(rc, NULL);
736 }
737 return &pGuestPml4->a[iPml4];
738}
739
740
741/**
742 * Gets the page directory entry for the specified address.
743 *
744 * @returns The page directory entry in question.
745 * @returns A non-present entry if the page directory is not present or on an invalid page.
746 * @param pVCpu The cross context virtual CPU structure.
747 * @param GCPtr The address.
748 */
749DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PVMCPUCC pVCpu, RTGCPTR64 GCPtr)
750{
751 /*
752 * Note! To keep things simple, ASSUME invalid physical addresses will
753 * cause X86_TRAP_PF_RSVD. This isn't a problem until we start
754 * supporting 52-bit wide physical guest addresses.
755 */
756 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
757 if (RT_LIKELY(pGuestPml4))
758 {
759 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
760 X86PGPAEUINT const uPml4e = pGuestPml4->a[iPml4].u;
761 if ((uPml4e & (pVCpu->pgm.s.fGstAmd64MbzPml4eMask | X86_PML4E_P)) == X86_PML4E_P)
762 {
763 PCX86PDPT pPdptTemp;
764 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPml4e & X86_PML4E_PG_MASK, &pPdptTemp);
765 if (RT_SUCCESS(rc))
766 {
767 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
768 X86PGPAEUINT const uPdpte = pPdptTemp->a[iPdpt].u;
769 if ((uPdpte & (pVCpu->pgm.s.fGstAmd64MbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
770 {
771 PCX86PDPAE pPD;
772 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPdpte & X86_PDPE_PG_MASK, &pPD);
773 if (RT_SUCCESS(rc))
774 {
775 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
776 return pPD->a[iPD];
777 }
778 }
779 }
780 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
781 }
782 }
783
784 X86PDEPAE ZeroPde = {0};
785 return ZeroPde;
786}
787
788
789/**
790 * Gets the GUEST page directory pointer for the specified address.
791 *
792 * @returns The page directory in question.
793 * @returns NULL if the page directory is not present or on an invalid page.
794 * @param pVCpu The cross context virtual CPU structure.
795 * @param GCPtr The address.
796 * @param ppPml4e Page Map Level-4 Entry (out)
797 * @param pPdpe Page directory pointer table entry (out)
798 * @param piPD Receives the index into the returned page directory
799 */
800DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
801{
802 /* The PMLE4. */
803 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
804 if (pGuestPml4)
805 {
806 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
807 *ppPml4e = &pGuestPml4->a[iPml4];
808 X86PGPAEUINT const uPml4e = pGuestPml4->a[iPml4].u;
809 if ((uPml4e & (pVCpu->pgm.s.fGstAmd64MbzPml4eMask | X86_PML4E_P)) == X86_PML4E_P)
810 {
811 /* The PDPE. */
812 PCX86PDPT pPdptTemp;
813 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPml4e & X86_PML4E_PG_MASK, &pPdptTemp);
814 if (RT_SUCCESS(rc))
815 {
816 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
817 X86PGPAEUINT const uPdpe = pPdptTemp->a[iPdpt].u;
818 pPdpe->u = uPdpe;
819 if ((uPdpe & (pVCpu->pgm.s.fGstAmd64MbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
820 {
821 /* The PDE. */
822 PX86PDPAE pPD;
823 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPdpe & X86_PDPE_PG_MASK, &pPD);
824 if (RT_SUCCESS(rc))
825 {
826 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
827 return pPD;
828 }
829 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
830 }
831 }
832 else
833 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
834 }
835 }
836 return NULL;
837}
838
839
840#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
841# if 0
842/**
843 * Gets the pointer to a page map level-4 entry when the guest using EPT paging.
844 *
845 * @returns Pointer to the PML4 entry.
846 * @param pVCpu The cross context virtual CPU structure.
847 * @param iPml4 The index.
848 * @remarks Only used by AssertCR3.
849 */
850DECLINLINE(PEPTPML4E) pgmGstGetEptPML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
851{
852 PEPTPML4 pEptPml4 = pVCpu->pgm.s.CTX_SUFF(pGstEptPml4);
853 if (pEptPml4)
854 { /* likely */ }
855 else
856 {
857 int const rc = pgmGstLazyMapEptPml4(pVCpu, &pEptPml4);
858 AssertRCReturn(rc, NULL);
859 }
860 return &pEptPml4->a[iPml4];
861}
862# endif
863
864
865/**
866 * Gets the page map level-4 pointer for the guest when the guest is using EPT
867 * paging.
868 *
869 * @returns VBox status code.
870 * @param pVCpu The cross context virtual CPU structure.
871 * @param ppEptPml4 Where to return the mapping. Always set.
872 */
873DECLINLINE(int) pgmGstGetEptPML4PtrEx(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
874{
875 /* Shadow CR3 might not have been mapped at this point, see PGMHCChangeMode. */
876 *ppEptPml4 = pVCpu->pgm.s.CTX_SUFF(pGstEptPml4);
877 if (!*ppEptPml4)
878 return pgmGstLazyMapEptPml4(pVCpu, ppEptPml4);
879 return VINF_SUCCESS;
880}
881
882
883# if 0
884/**
885 * Gets the page map level-4 pointer for the guest when the guest is using EPT
886 * paging.
887 *
888 * @returns Pointer to the EPT PML4 page.
889 * @param pVCpu The cross context virtual CPU structure.
890 */
891DECLINLINE(PEPTPML4) pgmGstGetEptPML4Ptr(PVMCPUCC pVCpu)
892{
893 PEPTPML4 pEptPml4;
894 int rc = pgmGstGetEptPML4PtrEx(pVCpu, &pEptPml4);
895 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
896 return pEptPml4;
897}
898# endif
899#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
900
901#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
902
903/**
904 * Gets the shadow page directory, 32-bit.
905 *
906 * @returns Pointer to the shadow 32-bit PD.
907 * @param pVCpu The cross context virtual CPU structure.
908 */
909DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PVMCPUCC pVCpu)
910{
911 return (PX86PD)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
912}
913
914
915/**
916 * Gets the shadow page directory entry for the specified address, 32-bit.
917 *
918 * @returns Shadow 32-bit PDE.
919 * @param pVCpu The cross context virtual CPU structure.
920 * @param GCPtr The address.
921 */
922DECLINLINE(X86PDE) pgmShwGet32BitPDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
923{
924 PX86PD pShwPde = pgmShwGet32BitPDPtr(pVCpu);
925 if (!pShwPde)
926 {
927 X86PDE ZeroPde = {0};
928 return ZeroPde;
929 }
930 return pShwPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
931}
932
933
934/**
935 * Gets the pointer to the shadow page directory entry for the specified
936 * address, 32-bit.
937 *
938 * @returns Pointer to the shadow 32-bit PDE.
939 * @param pVCpu The cross context virtual CPU structure.
940 * @param GCPtr The address.
941 */
942DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
943{
944 PX86PD pPde = pgmShwGet32BitPDPtr(pVCpu);
945 AssertReturn(pPde, NULL);
946 return &pPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
947}
948
949
950/**
951 * Gets the shadow page pointer table, PAE.
952 *
953 * @returns Pointer to the shadow PAE PDPT.
954 * @param pVCpu The cross context virtual CPU structure.
955 */
956DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PVMCPUCC pVCpu)
957{
958 return (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
959}
960
961
962/**
963 * Gets the shadow page directory for the specified address, PAE.
964 *
965 * @returns Pointer to the shadow PD.
966 * @param pVCpu The cross context virtual CPU structure.
967 * @param pPdpt Pointer to the page directory pointer table.
968 * @param GCPtr The address.
969 */
970DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPUCC pVCpu, PX86PDPT pPdpt, RTGCPTR GCPtr)
971{
972 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
973 if (pPdpt->a[iPdpt].u & X86_PDPE_P)
974 {
975 /* Fetch the pgm pool shadow descriptor. */
976 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
977 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
978 AssertReturn(pShwPde, NULL);
979
980 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
981 }
982 return NULL;
983}
984
985
986/**
987 * Gets the shadow page directory for the specified address, PAE.
988 *
989 * @returns Pointer to the shadow PD.
990 * @param pVCpu The cross context virtual CPU structure.
991 * @param GCPtr The address.
992 */
993DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
994{
995 return pgmShwGetPaePDPtr(pVCpu, pgmShwGetPaePDPTPtr(pVCpu), GCPtr);
996}
997
998
999/**
1000 * Gets the shadow page directory entry, PAE.
1001 *
1002 * @returns PDE.
1003 * @param pVCpu The cross context virtual CPU structure.
1004 * @param GCPtr The address.
1005 */
1006DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1007{
1008 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1009 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
1010 if (pShwPde)
1011 return pShwPde->a[iPd];
1012
1013 X86PDEPAE ZeroPde = {0};
1014 return ZeroPde;
1015}
1016
1017
1018/**
1019 * Gets the pointer to the shadow page directory entry for an address, PAE.
1020 *
1021 * @returns Pointer to the PDE.
1022 * @param pVCpu The cross context virtual CPU structure.
1023 * @param GCPtr The address.
1024 * @remarks Only used by AssertCR3.
1025 */
1026DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1027{
1028 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1029 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
1030 AssertReturn(pShwPde, NULL);
1031 return &pShwPde->a[iPd];
1032}
1033
1034
1035/**
1036 * Gets the shadow page map level-4 pointer.
1037 *
1038 * @returns Pointer to the shadow PML4.
1039 * @param pVCpu The cross context virtual CPU structure.
1040 */
1041DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PVMCPUCC pVCpu)
1042{
1043 return (PX86PML4)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1044}
1045
1046
1047/**
1048 * Gets the shadow page map level-4 entry for the specified address.
1049 *
1050 * @returns The entry.
1051 * @param pVCpu The cross context virtual CPU structure.
1052 * @param GCPtr The address.
1053 */
1054DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1055{
1056 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1057 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
1058 if (pShwPml4)
1059 return pShwPml4->a[iPml4];
1060
1061 X86PML4E ZeroPml4e = {0};
1062 return ZeroPml4e;
1063}
1064
1065
1066/**
1067 * Gets the pointer to the specified shadow page map level-4 entry.
1068 *
1069 * @returns The entry.
1070 * @param pVCpu The cross context virtual CPU structure.
1071 * @param iPml4 The PML4 index.
1072 */
1073DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
1074{
1075 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
1076 if (pShwPml4)
1077 return &pShwPml4->a[iPml4];
1078 return NULL;
1079}
1080
1081# endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */
1082#endif /* !VBOX_VMM_TARGET_ARMV8 */
1083
1084/**
1085 * Cached physical handler lookup.
1086 *
1087 * @returns VBox status code.
1088 * @retval VERR_NOT_FOUND if no handler.
1089 * @param pVM The cross context VM structure.
1090 * @param GCPhys The lookup address.
1091 * @param ppHandler Where to return the handler pointer.
1092 */
1093DECLINLINE(int) pgmHandlerPhysicalLookup(PVMCC pVM, RTGCPHYS GCPhys, PPGMPHYSHANDLER *ppHandler)
1094{
1095 PPGMPHYSHANDLER pHandler = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.ptrFromInt(pVM->pgm.s.idxLastPhysHandler);
1096 if ( pHandler
1097 && pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.isPtrRetOkay(pHandler)
1098 && GCPhys >= pHandler->Key
1099 && GCPhys < pHandler->KeyLast
1100 && pHandler->hType != NIL_PGMPHYSHANDLERTYPE
1101 && pHandler->hType != 0)
1102
1103 {
1104 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerLookupHits));
1105 *ppHandler = pHandler;
1106 return VINF_SUCCESS;
1107 }
1108
1109 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerLookupMisses));
1110 AssertPtrReturn(pVM->VMCC_CTX(pgm).s.pPhysHandlerTree, VERR_PGM_HANDLER_IPE_1);
1111 int rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pHandler);
1112 if (RT_SUCCESS(rc))
1113 {
1114 *ppHandler = pHandler;
1115 pVM->pgm.s.idxLastPhysHandler = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.ptrToInt(pHandler);
1116 return VINF_SUCCESS;
1117 }
1118 *ppHandler = NULL;
1119 return rc;
1120}
1121
1122
1123/**
1124 * Converts a handle to a pointer.
1125 *
1126 * @returns Pointer on success, NULL on failure (asserted).
1127 * @param pVM The cross context VM structure.
1128 * @param hType Physical access handler type handle.
1129 */
1130DECLINLINE(PCPGMPHYSHANDLERTYPEINT) pgmHandlerPhysicalTypeHandleToPtr(PVMCC pVM, PGMPHYSHANDLERTYPE hType)
1131{
1132#ifdef IN_RING0
1133 PPGMPHYSHANDLERTYPEINT pType = &pVM->pgmr0.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
1134#elif defined(IN_RING3)
1135 PPGMPHYSHANDLERTYPEINT pType = &pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
1136#else
1137# error "Invalid context"
1138#endif
1139 AssertReturn(pType->hType == hType, NULL);
1140 return pType;
1141}
1142
1143
1144/**
1145 * Converts a handle to a pointer, never returns NULL.
1146 *
1147 * @returns Pointer on success, dummy on failure (asserted).
1148 * @param pVM The cross context VM structure.
1149 * @param hType Physical access handler type handle.
1150 */
1151DECLINLINE(PCPGMPHYSHANDLERTYPEINT) pgmHandlerPhysicalTypeHandleToPtr2(PVMCC pVM, PGMPHYSHANDLERTYPE hType)
1152{
1153#ifdef IN_RING0
1154 PPGMPHYSHANDLERTYPEINT pType = &pVM->pgmr0.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
1155#elif defined(IN_RING3)
1156 PPGMPHYSHANDLERTYPEINT pType = &pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
1157#else
1158# error "Invalid context"
1159#endif
1160 AssertReturn(pType->hType == hType, &g_pgmHandlerPhysicalDummyType);
1161 return pType;
1162}
1163
1164
1165/**
1166 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
1167 *
1168 * @returns Pointer to the shadow page structure.
1169 * @param pPool The pool.
1170 * @param idx The pool page index.
1171 */
1172DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
1173{
1174 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
1175 return &pPool->aPages[idx];
1176}
1177
1178
1179/**
1180 * Clear references to guest physical memory.
1181 *
1182 * @param pPool The pool.
1183 * @param pPoolPage The pool page.
1184 * @param pPhysPage The physical guest page tracking structure.
1185 * @param iPte Shadow PTE index
1186 */
1187DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte)
1188{
1189 /*
1190 * Just deal with the simple case here.
1191 */
1192#ifdef VBOX_STRICT
1193 PVMCC pVM = pPool->CTX_SUFF(pVM); NOREF(pVM);
1194#endif
1195#ifdef LOG_ENABLED
1196 const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
1197#endif
1198 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
1199 if (cRefs == 1)
1200 {
1201#if 0 /* for more debug info */
1202 AssertMsg( pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage)
1203 && iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage),
1204 ("idx=%#x iPte=%#x enmKind=%d vs pPhysPage=%R[pgmpage] idx=%#x iPte=%#x enmKind=%d [iPte]=%#RX64\n",
1205 pPoolPage->idx, iPte, pPoolPage->enmKind,
1206 pPhysPage, PGM_PAGE_GET_TD_IDX(pPhysPage), PGM_PAGE_GET_PTE_INDEX(pPhysPage),
1207 pPool->aPages[PGM_PAGE_GET_TD_IDX(pPhysPage)].enmKind,
1208 ((uint64_t *)pPoolPage->CTX_SUFF(pvPage))[iPte]));
1209#else
1210 Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
1211 Assert(iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage));
1212#endif
1213 /* Invalidate the tracking data. */
1214 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
1215 }
1216 else
1217 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage, iPte);
1218 Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
1219}
1220
1221
1222/**
1223 * Moves the page to the head of the age list.
1224 *
1225 * This is done when the cached page is used in one way or another.
1226 *
1227 * @param pPool The pool.
1228 * @param pPage The cached page.
1229 */
1230DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1231{
1232 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
1233
1234 /*
1235 * Move to the head of the age list.
1236 */
1237 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
1238 {
1239 /* unlink */
1240 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
1241 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
1242 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
1243 else
1244 pPool->iAgeTail = pPage->iAgePrev;
1245
1246 /* insert at head */
1247 pPage->iAgePrev = NIL_PGMPOOL_IDX;
1248 pPage->iAgeNext = pPool->iAgeHead;
1249 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
1250 pPool->iAgeHead = pPage->idx;
1251 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
1252 }
1253}
1254
1255
1256/**
1257 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
1258 *
1259 * @param pPool The pool.
1260 * @param pPage PGM pool page
1261 */
1262DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1263{
1264 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1265 ASMAtomicIncU32(&pPage->cLocked);
1266}
1267
1268
1269/**
1270 * Unlocks a page to allow flushing again
1271 *
1272 * @param pPool The pool.
1273 * @param pPage PGM pool page
1274 */
1275DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1276{
1277 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1278 Assert(pPage->cLocked);
1279 ASMAtomicDecU32(&pPage->cLocked);
1280}
1281
1282
1283/**
1284 * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
1285 *
1286 * @returns VBox status code.
1287 * @param pPage PGM pool page
1288 */
1289DECLINLINE(bool) pgmPoolIsPageLocked(PPGMPOOLPAGE pPage)
1290{
1291 if (pPage->cLocked)
1292 {
1293 LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
1294 if (pPage->cModifications)
1295 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
1296 return true;
1297 }
1298 return false;
1299}
1300
1301
1302/**
1303 * Check if the specified page is dirty (not write monitored)
1304 *
1305 * @return dirty or not
1306 * @param pVM The cross context VM structure.
1307 * @param GCPhys Guest physical address
1308 */
1309DECLINLINE(bool) pgmPoolIsDirtyPage(PVMCC pVM, RTGCPHYS GCPhys)
1310{
1311 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1312 PGM_LOCK_ASSERT_OWNER(pVM);
1313 if (!pPool->cDirtyPages)
1314 return false;
1315 return pgmPoolIsDirtyPageSlow(pVM, GCPhys);
1316}
1317
1318
1319/** @} */
1320
1321#endif /* !VMM_INCLUDED_SRC_include_PGMInline_h */
1322
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette