VirtualBox

source: vbox/trunk/src/VBox/VMM/include/PGMInline.h

Last change on this file was 106061, checked in by vboxsync, 3 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 42.6 KB
Line 
1/* $Id: PGMInline.h 106061 2024-09-16 14:03:52Z vboxsync $ */
2/** @file
3 * PGM - Inlined functions.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_PGMInline_h
29#define VMM_INCLUDED_SRC_include_PGMInline_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/cdefs.h>
35#include <VBox/types.h>
36#include <VBox/err.h>
37#include <VBox/vmm/stam.h>
38#include <VBox/param.h>
39#include <VBox/vmm/vmm.h>
40#include <VBox/vmm/mm.h>
41#include <VBox/vmm/pdmcritsect.h>
42#include <VBox/vmm/pdmapi.h>
43#include <VBox/dis.h>
44#include <VBox/vmm/dbgf.h>
45#include <VBox/log.h>
46#include <VBox/vmm/gmm.h>
47#include <VBox/vmm/hm.h>
48#include <VBox/vmm/nem.h>
49#include <iprt/asm.h>
50#include <iprt/assert.h>
51#include <iprt/avl.h>
52#include <iprt/critsect.h>
53#include <iprt/sha.h>
54
55
56
57/** @addtogroup grp_pgm_int Internals
58 * @internal
59 * @{
60 */
61
62/**
63 * Gets the PGMRAMRANGE structure for a guest page.
64 *
65 * @returns Pointer to the RAM range on success.
66 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
67 *
68 * @param pVM The cross context VM structure.
69 * @param GCPhys The GC physical address.
70 */
71DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PVMCC pVM, RTGCPHYS GCPhys)
72{
73 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
74 if (pRam)
75 {
76 RTGCPHYS const GCPhysFirst = pRam->GCPhys;
77 if (GCPhys - GCPhysFirst < pRam->cb && GCPhys >= GCPhysFirst)
78 {
79 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
80 return pRam;
81 }
82 }
83 return pgmPhysGetRangeSlow(pVM, GCPhys);
84}
85
86
87/**
88 * Gets the PGMRAMRANGE structure for a guest page, if unassigned get the ram
89 * range above it.
90 *
91 * @returns Pointer to the RAM range on success.
92 * @returns NULL if the address is located after the last range.
93 *
94 * @param pVM The cross context VM structure.
95 * @param GCPhys The GC physical address.
96 */
97DECLINLINE(PPGMRAMRANGE) pgmPhysGetRangeAtOrAbove(PVMCC pVM, RTGCPHYS GCPhys)
98{
99 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
100 if (pRam)
101 {
102 RTGCPHYS const GCPhysFirst = pRam->GCPhys;
103 if (GCPhys - GCPhysFirst < pRam->cb && GCPhys >= GCPhysFirst)
104 {
105 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
106 return pRam;
107 }
108 }
109 return pgmPhysGetRangeAtOrAboveSlow(pVM, GCPhys);
110}
111
112
113/**
114 * Gets the PGMPAGE structure for a guest page.
115 *
116 * @returns Pointer to the page on success.
117 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
118 *
119 * @param pVM The cross context VM structure.
120 * @param GCPhys The GC physical address.
121 */
122DECLINLINE(PPGMPAGE) pgmPhysGetPage(PVMCC pVM, RTGCPHYS GCPhys)
123{
124 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
125 if (pRam)
126 {
127 RTGCPHYS const GCPhysFirst = pRam->GCPhys;
128 RTGCPHYS const off = GCPhys - GCPhysFirst;
129 if (off < pRam->cb && GCPhys >= GCPhysFirst)
130 {
131 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
132 return &pRam->aPages[off >> GUEST_PAGE_SHIFT];
133 }
134 }
135 return pgmPhysGetPageSlow(pVM, GCPhys);
136}
137
138
139/**
140 * Gets the PGMPAGE structure for a guest page.
141 *
142 * Old Phys code: Will make sure the page is present.
143 *
144 * @returns VBox status code.
145 * @retval VINF_SUCCESS and a valid *ppPage on success.
146 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
147 *
148 * @param pVM The cross context VM structure.
149 * @param GCPhys The GC physical address.
150 * @param ppPage Where to store the page pointer on success.
151 */
152DECLINLINE(int) pgmPhysGetPageEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
153{
154 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
155 if (pRam)
156 {
157 RTGCPHYS const GCPhysFirst = pRam->GCPhys;
158 RTGCPHYS const off = GCPhys - GCPhysFirst;
159 if (off < pRam->cb && GCPhys >= GCPhysFirst)
160 {
161 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
162 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
163 return VINF_SUCCESS;
164 }
165 }
166 return pgmPhysGetPageExSlow(pVM, GCPhys, ppPage);
167}
168
169
170/**
171 * Gets the PGMPAGE structure for a guest page.
172 *
173 * Old Phys code: Will make sure the page is present.
174 *
175 * @returns VBox status code.
176 * @retval VINF_SUCCESS and a valid *ppPage on success.
177 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
178 *
179 * @param pVM The cross context VM structure.
180 * @param GCPhys The GC physical address.
181 * @param ppPage Where to store the page pointer on success.
182 * @param ppRamHint Where to read and store the ram list hint.
183 * The caller initializes this to NULL before the call.
184 */
185DECLINLINE(int) pgmPhysGetPageWithHintEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
186{
187 PPGMRAMRANGE pRam = *ppRamHint;
188 RTGCPHYS GCPhysFirst;
189 RTGCPHYS off;
190 if ( !pRam
191 || RT_UNLIKELY( (off = GCPhys - (GCPhysFirst = pRam->GCPhys)) >= pRam->cb
192 || GCPhys < GCPhysFirst) )
193 {
194 pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
195 if ( !pRam
196 || (off = GCPhys - (GCPhysFirst = pRam->GCPhys)) >= pRam->cb
197 || GCPhys < GCPhysFirst)
198 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRamHint);
199
200 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
201 *ppRamHint = pRam;
202 }
203 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
204 return VINF_SUCCESS;
205}
206
207
208/**
209 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
210 *
211 * @returns Pointer to the page on success.
212 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
213 *
214 * @param pVM The cross context VM structure.
215 * @param GCPhys The GC physical address.
216 * @param ppPage Where to store the pointer to the PGMPAGE structure.
217 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
218 */
219DECLINLINE(int) pgmPhysGetPageAndRangeEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
220{
221 PPGMRAMRANGE pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
222 if (pRam)
223 {
224 RTGCPHYS const GCPhysFirst = pRam->GCPhys;
225 RTGCPHYS const off = GCPhys - GCPhysFirst;
226 if (off < pRam->cb && GCPhys >= GCPhysFirst)
227 {
228 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
229 *ppRam = pRam;
230 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
231 return VINF_SUCCESS;
232 }
233 }
234 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRam);
235}
236
237
238/**
239 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
240 *
241 * @returns Pointer to the page on success.
242 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
243 *
244 * @param pVM The cross context VM structure.
245 * @param pVCpu The cross context virtual CPU structure.
246 * @param GCPhys The GC physical address.
247 * @param ppPage Where to store the pointer to the PGMPAGE structure.
248 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
249 */
250DECLINLINE(int) pgmPhysGetPageAndRangeExLockless(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys,
251 PGMPAGE volatile **ppPage, PGMRAMRANGE volatile **ppRam)
252{
253 PGMRAMRANGE volatile * const pRam = pVCpu->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
254 if (RT_LIKELY(pRam))
255 {
256 RTGCPHYS const GCPhysFirst = pRam->GCPhys;
257 RTGCPHYS const off = GCPhys - GCPhysFirst;
258 if (RT_LIKELY( off < pRam->cb
259 && GCPhys >= GCPhysFirst))
260 {
261 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
262 *ppRam = pRam;
263 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
264 return VINF_SUCCESS;
265 }
266 }
267 return pgmPhysGetPageAndRangeExSlowLockless(pVM, pVCpu, GCPhys, ppPage, ppRam);
268}
269
270
271/**
272 * Convert GC Phys to HC Phys.
273 *
274 * @returns VBox status code.
275 * @param pVM The cross context VM structure.
276 * @param GCPhys The GC physical address.
277 * @param pHCPhys Where to store the corresponding HC physical address.
278 *
279 * @deprecated Doesn't deal with zero, shared or write monitored pages.
280 * Avoid when writing new code!
281 */
282DECLINLINE(int) pgmRamGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
283{
284 PPGMPAGE pPage;
285 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
286 if (RT_FAILURE(rc))
287 return rc;
288 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & GUEST_PAGE_OFFSET_MASK);
289 return VINF_SUCCESS;
290}
291
292
293/**
294 * Queries the Physical TLB entry for a physical guest page,
295 * attempting to load the TLB entry if necessary.
296 *
297 * @returns VBox status code.
298 * @retval VINF_SUCCESS on success
299 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
300 *
301 * @param pVM The cross context VM structure.
302 * @param GCPhys The address of the guest page.
303 * @param ppTlbe Where to store the pointer to the TLB entry.
304 */
305DECLINLINE(int) pgmPhysPageQueryTlbe(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
306{
307 int rc;
308 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
309 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
310 {
311 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbHits));
312 rc = VINF_SUCCESS;
313 }
314 else
315 rc = pgmPhysPageLoadIntoTlb(pVM, GCPhys);
316 *ppTlbe = pTlbe;
317 return rc;
318}
319
320
321/**
322 * Queries the Physical TLB entry for a physical guest page,
323 * attempting to load the TLB entry if necessary.
324 *
325 * @returns VBox status code.
326 * @retval VINF_SUCCESS on success
327 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
328 *
329 * @param pVM The cross context VM structure.
330 * @param pPage Pointer to the PGMPAGE structure corresponding to
331 * GCPhys.
332 * @param GCPhys The address of the guest page.
333 * @param ppTlbe Where to store the pointer to the TLB entry.
334 */
335DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
336{
337 int rc;
338 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
339 if (pTlbe->GCPhys == (GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK))
340 {
341 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbHits));
342 rc = VINF_SUCCESS;
343 AssertPtr(pTlbe->pv);
344#ifdef IN_RING3
345 Assert(!pTlbe->pMap || RT_VALID_PTR(pTlbe->pMap->pv));
346#endif
347 }
348 else
349 rc = pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
350 *ppTlbe = pTlbe;
351 return rc;
352}
353
354
355#ifdef IN_RING3 /** @todo Need ensure a ring-0 version gets invalidated safely */
356/**
357 * Queries the VCPU local physical TLB entry for a physical guest page,
358 * attempting to load the TLB entry if necessary.
359 *
360 * Will acquire the PGM lock on TLB miss, does not require caller to own it.
361 *
362 * @returns VBox status code.
363 * @retval VINF_SUCCESS on success
364 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
365 *
366 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
367 * @param pPage Pointer to the PGMPAGE structure corresponding to GCPhys.
368 * @param GCPhys The address of the guest page.
369 * @param ppTlbe Where to store the pointer to the TLB entry.
370 * @thread EMT(pVCpu)
371 */
372DECLINLINE(int) pgmPhysPageQueryLocklessTlbeWithPage(PVMCPUCC pVCpu, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
373{
374 int rc;
375 PPGMPAGEMAPTLBE const pTlbe = &pVCpu->pgm.s.PhysTlb.aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
376 if ( pTlbe->GCPhys == (GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK)
377 && pTlbe->pPage == pPage)
378 {
379 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbHits));
380 rc = VINF_SUCCESS;
381 AssertPtr(pTlbe->pv);
382# ifdef IN_RING3
383 Assert(!pTlbe->pMap || RT_VALID_PTR(pTlbe->pMap->pv));
384# endif
385 }
386 else
387 rc = pgmPhysPageLoadIntoLocklessTlbWithPage(pVCpu, pPage, GCPhys);
388 *ppTlbe = pTlbe;
389 return rc;
390}
391#endif /* IN_RING3 */
392
393
394/**
395 * Calculates NEM page protection flags.
396 */
397DECL_FORCE_INLINE(uint32_t) pgmPhysPageCalcNemProtection(PPGMPAGE pPage, PGMPAGETYPE enmType)
398{
399 /*
400 * Deal with potentially writable pages first.
401 */
402 if (PGMPAGETYPE_IS_RWX(enmType))
403 {
404 if (!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
405 {
406 if (PGM_PAGE_IS_ALLOCATED(pPage))
407 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE;
408 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
409 }
410 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
411 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
412 }
413 /*
414 * Potentially readable & executable pages.
415 */
416 else if ( PGMPAGETYPE_IS_ROX(enmType)
417 && !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
418 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
419
420 /*
421 * The rest is needs special access handling.
422 */
423 return NEM_PAGE_PROT_NONE;
424}
425
426
427/**
428 * Enables write monitoring for an allocated page.
429 *
430 * The caller is responsible for updating the shadow page tables.
431 *
432 * @param pVM The cross context VM structure.
433 * @param pPage The page to write monitor.
434 * @param GCPhysPage The address of the page.
435 */
436DECLINLINE(void) pgmPhysPageWriteMonitor(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
437{
438 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
439 PGM_LOCK_ASSERT_OWNER(pVM);
440
441 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_WRITE_MONITORED);
442 pVM->pgm.s.cMonitoredPages++;
443
444 /* Large pages must disabled. */
445 if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
446 {
447 PPGMPAGE pFirstPage = pgmPhysGetPage(pVM, GCPhysPage & X86_PDE2M_PAE_PG_MASK);
448 AssertFatal(pFirstPage);
449 if (PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE)
450 {
451 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
452 pVM->pgm.s.cLargePagesDisabled++;
453 }
454 else
455 Assert(PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
456 }
457
458#ifdef VBOX_WITH_NATIVE_NEM
459 /* Tell NEM. */
460 if (VM_IS_NEM_ENABLED(pVM))
461 {
462 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
463 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
464 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhysPage);
465 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
466 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage) : NULL,
467 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
468 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
469 }
470#endif
471}
472
473#ifndef VBOX_VMM_TARGET_ARMV8
474
475/**
476 * Checks if the no-execute (NX) feature is active (EFER.NXE=1).
477 *
478 * Only used when the guest is in PAE or long mode. This is inlined so that we
479 * can perform consistency checks in debug builds.
480 *
481 * @returns true if it is, false if it isn't.
482 * @param pVCpu The cross context virtual CPU structure.
483 */
484DECL_FORCE_INLINE(bool) pgmGstIsNoExecuteActive(PVMCPUCC pVCpu)
485{
486 Assert(pVCpu->pgm.s.fNoExecuteEnabled == CPUMIsGuestNXEnabled(pVCpu));
487 Assert(CPUMIsGuestInPAEMode(pVCpu) || CPUMIsGuestInLongMode(pVCpu));
488 return pVCpu->pgm.s.fNoExecuteEnabled;
489}
490
491
492/**
493 * Checks if the page size extension (PSE) is currently enabled (CR4.PSE=1).
494 *
495 * Only used when the guest is in paged 32-bit mode. This is inlined so that
496 * we can perform consistency checks in debug builds.
497 *
498 * @returns true if it is, false if it isn't.
499 * @param pVCpu The cross context virtual CPU structure.
500 */
501DECL_FORCE_INLINE(bool) pgmGst32BitIsPageSizeExtActive(PVMCPUCC pVCpu)
502{
503 Assert(pVCpu->pgm.s.fGst32BitPageSizeExtension == CPUMIsGuestPageSizeExtEnabled(pVCpu));
504 Assert(!CPUMIsGuestInPAEMode(pVCpu));
505 Assert(!CPUMIsGuestInLongMode(pVCpu));
506 return pVCpu->pgm.s.fGst32BitPageSizeExtension;
507}
508
509
510/**
511 * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
512 * Takes PSE-36 into account.
513 *
514 * @returns guest physical address
515 * @param pVM The cross context VM structure.
516 * @param Pde Guest Pde
517 */
518DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PVMCC pVM, X86PDE Pde)
519{
520 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
521 GCPhys |= (RTGCPHYS)(Pde.u & X86_PDE4M_PG_HIGH_MASK) << X86_PDE4M_PG_HIGH_SHIFT;
522
523 return GCPhys & pVM->pgm.s.GCPhys4MBPSEMask;
524}
525
526
527/**
528 * Gets the address the guest page directory (32-bit paging).
529 *
530 * @returns VBox status code.
531 * @param pVCpu The cross context virtual CPU structure.
532 * @param ppPd Where to return the mapping. This is always set.
533 */
534DECLINLINE(int) pgmGstGet32bitPDPtrEx(PVMCPUCC pVCpu, PX86PD *ppPd)
535{
536 *ppPd = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
537 if (RT_UNLIKELY(!*ppPd))
538 return pgmGstLazyMap32BitPD(pVCpu, ppPd);
539 return VINF_SUCCESS;
540}
541
542
543/**
544 * Gets the address the guest page directory (32-bit paging).
545 *
546 * @returns Pointer to the page directory entry in question.
547 * @param pVCpu The cross context virtual CPU structure.
548 */
549DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PVMCPUCC pVCpu)
550{
551 PX86PD pGuestPD = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
552 if (RT_UNLIKELY(!pGuestPD))
553 {
554 int rc = pgmGstLazyMap32BitPD(pVCpu, &pGuestPD);
555 if (RT_FAILURE(rc))
556 return NULL;
557 }
558 return pGuestPD;
559}
560
561
562/**
563 * Gets the guest page directory pointer table.
564 *
565 * @returns VBox status code.
566 * @param pVCpu The cross context virtual CPU structure.
567 * @param ppPdpt Where to return the mapping. This is always set.
568 */
569DECLINLINE(int) pgmGstGetPaePDPTPtrEx(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
570{
571 *ppPdpt = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
572 if (RT_UNLIKELY(!*ppPdpt))
573 return pgmGstLazyMapPaePDPT(pVCpu, ppPdpt);
574 return VINF_SUCCESS;
575}
576
577
578/**
579 * Gets the guest page directory pointer table.
580 *
581 * @returns Pointer to the page directory in question.
582 * @returns NULL if the page directory is not present or on an invalid page.
583 * @param pVCpu The cross context virtual CPU structure.
584 */
585DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PVMCPUCC pVCpu)
586{
587 PX86PDPT pGuestPdpt;
588 int rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGuestPdpt);
589 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
590 return pGuestPdpt;
591}
592
593
594/**
595 * Gets the guest page directory pointer table entry for the specified address.
596 *
597 * @returns Pointer to the page directory in question.
598 * @returns NULL if the page directory is not present or on an invalid page.
599 * @param pVCpu The cross context virtual CPU structure.
600 * @param GCPtr The address.
601 */
602DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
603{
604 AssertGCPtr32(GCPtr);
605
606 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
607 if (RT_UNLIKELY(!pGuestPDPT))
608 {
609 int rc = pgmGstLazyMapPaePDPT(pVCpu, &pGuestPDPT);
610 if (RT_FAILURE(rc))
611 return NULL;
612 }
613 return &pGuestPDPT->a[(uint32_t)GCPtr >> X86_PDPT_SHIFT];
614}
615
616
617/**
618 * Gets the page directory entry for the specified address.
619 *
620 * @returns The page directory entry in question.
621 * @returns A non-present entry if the page directory is not present or on an invalid page.
622 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
623 * @param GCPtr The address.
624 */
625DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
626{
627 AssertGCPtr32(GCPtr);
628 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
629 if (RT_LIKELY(pGuestPDPT))
630 {
631 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
632 if ((pGuestPDPT->a[iPdpt].u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
633 {
634 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
635 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
636 if ( !pGuestPD
637 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
638 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
639 if (pGuestPD)
640 return pGuestPD->a[iPD];
641 }
642 }
643
644 X86PDEPAE ZeroPde = {0};
645 return ZeroPde;
646}
647
648
649/**
650 * Gets the page directory pointer table entry for the specified address
651 * and returns the index into the page directory
652 *
653 * @returns Pointer to the page directory in question.
654 * @returns NULL if the page directory is not present or on an invalid page.
655 * @param pVCpu The cross context virtual CPU structure.
656 * @param GCPtr The address.
657 * @param piPD Receives the index into the returned page directory
658 * @param pPdpe Receives the page directory pointer entry. Optional.
659 */
660DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
661{
662 AssertGCPtr32(GCPtr);
663
664 /* The PDPE. */
665 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
666 if (pGuestPDPT)
667 {
668 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
669 X86PGPAEUINT const uPdpe = pGuestPDPT->a[iPdpt].u;
670 if (pPdpe)
671 pPdpe->u = uPdpe;
672 if ((uPdpe & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
673 {
674
675 /* The PDE. */
676 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
677 if ( !pGuestPD
678 || (uPdpe & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
679 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
680 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
681 return pGuestPD;
682 }
683 }
684 return NULL;
685}
686
687
688/**
689 * Gets the page map level-4 pointer for the guest.
690 *
691 * @returns VBox status code.
692 * @param pVCpu The cross context virtual CPU structure.
693 * @param ppPml4 Where to return the mapping. Always set.
694 */
695DECLINLINE(int) pgmGstGetLongModePML4PtrEx(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
696{
697 *ppPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
698 if (RT_UNLIKELY(!*ppPml4))
699 return pgmGstLazyMapPml4(pVCpu, ppPml4);
700 return VINF_SUCCESS;
701}
702
703
704/**
705 * Gets the page map level-4 pointer for the guest.
706 *
707 * @returns Pointer to the PML4 page.
708 * @param pVCpu The cross context virtual CPU structure.
709 */
710DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PVMCPUCC pVCpu)
711{
712 PX86PML4 pGuestPml4;
713 int rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGuestPml4);
714 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
715 return pGuestPml4;
716}
717
718
719/**
720 * Gets the pointer to a page map level-4 entry.
721 *
722 * @returns Pointer to the PML4 entry.
723 * @param pVCpu The cross context virtual CPU structure.
724 * @param iPml4 The index.
725 * @remarks Only used by AssertCR3.
726 */
727DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
728{
729 PX86PML4 pGuestPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
730 if (pGuestPml4)
731 { /* likely */ }
732 else
733 {
734 int rc = pgmGstLazyMapPml4(pVCpu, &pGuestPml4);
735 AssertRCReturn(rc, NULL);
736 }
737 return &pGuestPml4->a[iPml4];
738}
739
740
741/**
742 * Gets the page directory entry for the specified address.
743 *
744 * @returns The page directory entry in question.
745 * @returns A non-present entry if the page directory is not present or on an invalid page.
746 * @param pVCpu The cross context virtual CPU structure.
747 * @param GCPtr The address.
748 */
749DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PVMCPUCC pVCpu, RTGCPTR64 GCPtr)
750{
751 /*
752 * Note! To keep things simple, ASSUME invalid physical addresses will
753 * cause X86_TRAP_PF_RSVD. This isn't a problem until we start
754 * supporting 52-bit wide physical guest addresses.
755 */
756 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
757 if (RT_LIKELY(pGuestPml4))
758 {
759 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
760 X86PGPAEUINT const uPml4e = pGuestPml4->a[iPml4].u;
761 if ((uPml4e & (pVCpu->pgm.s.fGstAmd64MbzPml4eMask | X86_PML4E_P)) == X86_PML4E_P)
762 {
763 PCX86PDPT pPdptTemp;
764 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPml4e & X86_PML4E_PG_MASK, &pPdptTemp);
765 if (RT_SUCCESS(rc))
766 {
767 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
768 X86PGPAEUINT const uPdpte = pPdptTemp->a[iPdpt].u;
769 if ((uPdpte & (pVCpu->pgm.s.fGstAmd64MbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
770 {
771 PCX86PDPAE pPD;
772 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPdpte & X86_PDPE_PG_MASK, &pPD);
773 if (RT_SUCCESS(rc))
774 {
775 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
776 return pPD->a[iPD];
777 }
778 }
779 }
780 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
781 }
782 }
783
784 X86PDEPAE ZeroPde = {0};
785 return ZeroPde;
786}
787
788
789/**
790 * Gets the GUEST page directory pointer for the specified address.
791 *
792 * @returns The page directory in question.
793 * @returns NULL if the page directory is not present or on an invalid page.
794 * @param pVCpu The cross context virtual CPU structure.
795 * @param GCPtr The address.
796 * @param ppPml4e Page Map Level-4 Entry (out)
797 * @param pPdpe Page directory pointer table entry (out)
798 * @param piPD Receives the index into the returned page directory
799 */
800DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
801{
802 /* The PMLE4. */
803 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
804 if (pGuestPml4)
805 {
806 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
807 *ppPml4e = &pGuestPml4->a[iPml4];
808 X86PGPAEUINT const uPml4e = pGuestPml4->a[iPml4].u;
809 if ((uPml4e & (pVCpu->pgm.s.fGstAmd64MbzPml4eMask | X86_PML4E_P)) == X86_PML4E_P)
810 {
811 /* The PDPE. */
812 PCX86PDPT pPdptTemp;
813 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPml4e & X86_PML4E_PG_MASK, &pPdptTemp);
814 if (RT_SUCCESS(rc))
815 {
816 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
817 X86PGPAEUINT const uPdpe = pPdptTemp->a[iPdpt].u;
818 pPdpe->u = uPdpe;
819 if ((uPdpe & (pVCpu->pgm.s.fGstAmd64MbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
820 {
821 /* The PDE. */
822 PX86PDPAE pPD;
823 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPdpe & X86_PDPE_PG_MASK, &pPD);
824 if (RT_SUCCESS(rc))
825 {
826 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
827 return pPD;
828 }
829 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
830 }
831 }
832 else
833 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
834 }
835 }
836 return NULL;
837}
838
839
840#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
841# if 0
842/**
843 * Gets the pointer to a page map level-4 entry when the guest using EPT paging.
844 *
845 * @returns Pointer to the PML4 entry.
846 * @param pVCpu The cross context virtual CPU structure.
847 * @param iPml4 The index.
848 * @remarks Only used by AssertCR3.
849 */
850DECLINLINE(PEPTPML4E) pgmGstGetEptPML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
851{
852 PEPTPML4 pEptPml4 = pVCpu->pgm.s.CTX_SUFF(pGstEptPml4);
853 if (pEptPml4)
854 { /* likely */ }
855 else
856 {
857 int const rc = pgmGstLazyMapEptPml4(pVCpu, &pEptPml4);
858 AssertRCReturn(rc, NULL);
859 }
860 return &pEptPml4->a[iPml4];
861}
862# endif
863
864
865/**
866 * Gets the page map level-4 pointer for the guest when the guest is using EPT
867 * paging.
868 *
869 * @returns VBox status code.
870 * @param pVCpu The cross context virtual CPU structure.
871 * @param ppEptPml4 Where to return the mapping. Always set.
872 */
873DECLINLINE(int) pgmGstGetEptPML4PtrEx(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
874{
875 /* Shadow CR3 might not have been mapped at this point, see PGMHCChangeMode. */
876 *ppEptPml4 = pVCpu->pgm.s.CTX_SUFF(pGstEptPml4);
877 if (!*ppEptPml4)
878 return pgmGstLazyMapEptPml4(pVCpu, ppEptPml4);
879 return VINF_SUCCESS;
880}
881
882
883# if 0
884/**
885 * Gets the page map level-4 pointer for the guest when the guest is using EPT
886 * paging.
887 *
888 * @returns Pointer to the EPT PML4 page.
889 * @param pVCpu The cross context virtual CPU structure.
890 */
891DECLINLINE(PEPTPML4) pgmGstGetEptPML4Ptr(PVMCPUCC pVCpu)
892{
893 PEPTPML4 pEptPml4;
894 int rc = pgmGstGetEptPML4PtrEx(pVCpu, &pEptPml4);
895 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
896 return pEptPml4;
897}
898# endif
899#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
900
901
902/**
903 * Gets the shadow page directory, 32-bit.
904 *
905 * @returns Pointer to the shadow 32-bit PD.
906 * @param pVCpu The cross context virtual CPU structure.
907 */
908DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PVMCPUCC pVCpu)
909{
910 return (PX86PD)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
911}
912
913
914/**
915 * Gets the shadow page directory entry for the specified address, 32-bit.
916 *
917 * @returns Shadow 32-bit PDE.
918 * @param pVCpu The cross context virtual CPU structure.
919 * @param GCPtr The address.
920 */
921DECLINLINE(X86PDE) pgmShwGet32BitPDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
922{
923 PX86PD pShwPde = pgmShwGet32BitPDPtr(pVCpu);
924 if (!pShwPde)
925 {
926 X86PDE ZeroPde = {0};
927 return ZeroPde;
928 }
929 return pShwPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
930}
931
932
933/**
934 * Gets the pointer to the shadow page directory entry for the specified
935 * address, 32-bit.
936 *
937 * @returns Pointer to the shadow 32-bit PDE.
938 * @param pVCpu The cross context virtual CPU structure.
939 * @param GCPtr The address.
940 */
941DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
942{
943 PX86PD pPde = pgmShwGet32BitPDPtr(pVCpu);
944 AssertReturn(pPde, NULL);
945 return &pPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
946}
947
948
949/**
950 * Gets the shadow page pointer table, PAE.
951 *
952 * @returns Pointer to the shadow PAE PDPT.
953 * @param pVCpu The cross context virtual CPU structure.
954 */
955DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PVMCPUCC pVCpu)
956{
957 return (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
958}
959
960
961/**
962 * Gets the shadow page directory for the specified address, PAE.
963 *
964 * @returns Pointer to the shadow PD.
965 * @param pVCpu The cross context virtual CPU structure.
966 * @param pPdpt Pointer to the page directory pointer table.
967 * @param GCPtr The address.
968 */
969DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPUCC pVCpu, PX86PDPT pPdpt, RTGCPTR GCPtr)
970{
971 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
972 if (pPdpt->a[iPdpt].u & X86_PDPE_P)
973 {
974 /* Fetch the pgm pool shadow descriptor. */
975 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
976 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
977 AssertReturn(pShwPde, NULL);
978
979 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
980 }
981 return NULL;
982}
983
984
985/**
986 * Gets the shadow page directory for the specified address, PAE.
987 *
988 * @returns Pointer to the shadow PD.
989 * @param pVCpu The cross context virtual CPU structure.
990 * @param GCPtr The address.
991 */
992DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
993{
994 return pgmShwGetPaePDPtr(pVCpu, pgmShwGetPaePDPTPtr(pVCpu), GCPtr);
995}
996
997
998/**
999 * Gets the shadow page directory entry, PAE.
1000 *
1001 * @returns PDE.
1002 * @param pVCpu The cross context virtual CPU structure.
1003 * @param GCPtr The address.
1004 */
1005DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1006{
1007 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1008 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
1009 if (pShwPde)
1010 return pShwPde->a[iPd];
1011
1012 X86PDEPAE ZeroPde = {0};
1013 return ZeroPde;
1014}
1015
1016
1017/**
1018 * Gets the pointer to the shadow page directory entry for an address, PAE.
1019 *
1020 * @returns Pointer to the PDE.
1021 * @param pVCpu The cross context virtual CPU structure.
1022 * @param GCPtr The address.
1023 * @remarks Only used by AssertCR3.
1024 */
1025DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1026{
1027 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1028 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
1029 AssertReturn(pShwPde, NULL);
1030 return &pShwPde->a[iPd];
1031}
1032
1033
1034/**
1035 * Gets the shadow page map level-4 pointer.
1036 *
1037 * @returns Pointer to the shadow PML4.
1038 * @param pVCpu The cross context virtual CPU structure.
1039 */
1040DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PVMCPUCC pVCpu)
1041{
1042 return (PX86PML4)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1043}
1044
1045
1046/**
1047 * Gets the shadow page map level-4 entry for the specified address.
1048 *
1049 * @returns The entry.
1050 * @param pVCpu The cross context virtual CPU structure.
1051 * @param GCPtr The address.
1052 */
1053DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1054{
1055 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1056 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
1057 if (pShwPml4)
1058 return pShwPml4->a[iPml4];
1059
1060 X86PML4E ZeroPml4e = {0};
1061 return ZeroPml4e;
1062}
1063
1064
1065/**
1066 * Gets the pointer to the specified shadow page map level-4 entry.
1067 *
1068 * @returns The entry.
1069 * @param pVCpu The cross context virtual CPU structure.
1070 * @param iPml4 The PML4 index.
1071 */
1072DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
1073{
1074 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
1075 if (pShwPml4)
1076 return &pShwPml4->a[iPml4];
1077 return NULL;
1078}
1079
1080#endif /* !VBOX_VMM_TARGET_ARMV8 */
1081
1082/**
1083 * Cached physical handler lookup.
1084 *
1085 * @returns VBox status code.
1086 * @retval VERR_NOT_FOUND if no handler.
1087 * @param pVM The cross context VM structure.
1088 * @param GCPhys The lookup address.
1089 * @param ppHandler Where to return the handler pointer.
1090 */
1091DECLINLINE(int) pgmHandlerPhysicalLookup(PVMCC pVM, RTGCPHYS GCPhys, PPGMPHYSHANDLER *ppHandler)
1092{
1093 PPGMPHYSHANDLER pHandler = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.ptrFromInt(pVM->pgm.s.idxLastPhysHandler);
1094 if ( pHandler
1095 && pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.isPtrRetOkay(pHandler)
1096 && GCPhys >= pHandler->Key
1097 && GCPhys < pHandler->KeyLast
1098 && pHandler->hType != NIL_PGMPHYSHANDLERTYPE
1099 && pHandler->hType != 0)
1100
1101 {
1102 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerLookupHits));
1103 *ppHandler = pHandler;
1104 return VINF_SUCCESS;
1105 }
1106
1107 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerLookupMisses));
1108 AssertPtrReturn(pVM->VMCC_CTX(pgm).s.pPhysHandlerTree, VERR_PGM_HANDLER_IPE_1);
1109 int rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pHandler);
1110 if (RT_SUCCESS(rc))
1111 {
1112 *ppHandler = pHandler;
1113 pVM->pgm.s.idxLastPhysHandler = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.ptrToInt(pHandler);
1114 return VINF_SUCCESS;
1115 }
1116 *ppHandler = NULL;
1117 return rc;
1118}
1119
1120
1121/**
1122 * Converts a handle to a pointer.
1123 *
1124 * @returns Pointer on success, NULL on failure (asserted).
1125 * @param pVM The cross context VM structure.
1126 * @param hType Physical access handler type handle.
1127 */
1128DECLINLINE(PCPGMPHYSHANDLERTYPEINT) pgmHandlerPhysicalTypeHandleToPtr(PVMCC pVM, PGMPHYSHANDLERTYPE hType)
1129{
1130#ifdef IN_RING0
1131 PPGMPHYSHANDLERTYPEINT pType = &pVM->pgmr0.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
1132#elif defined(IN_RING3)
1133 PPGMPHYSHANDLERTYPEINT pType = &pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
1134#else
1135# error "Invalid context"
1136#endif
1137 AssertReturn(pType->hType == hType, NULL);
1138 return pType;
1139}
1140
1141
1142/**
1143 * Converts a handle to a pointer, never returns NULL.
1144 *
1145 * @returns Pointer on success, dummy on failure (asserted).
1146 * @param pVM The cross context VM structure.
1147 * @param hType Physical access handler type handle.
1148 */
1149DECLINLINE(PCPGMPHYSHANDLERTYPEINT) pgmHandlerPhysicalTypeHandleToPtr2(PVMCC pVM, PGMPHYSHANDLERTYPE hType)
1150{
1151#ifdef IN_RING0
1152 PPGMPHYSHANDLERTYPEINT pType = &pVM->pgmr0.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
1153#elif defined(IN_RING3)
1154 PPGMPHYSHANDLERTYPEINT pType = &pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
1155#else
1156# error "Invalid context"
1157#endif
1158 AssertReturn(pType->hType == hType, &g_pgmHandlerPhysicalDummyType);
1159 return pType;
1160}
1161
1162
1163/**
1164 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
1165 *
1166 * @returns Pointer to the shadow page structure.
1167 * @param pPool The pool.
1168 * @param idx The pool page index.
1169 */
1170DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
1171{
1172 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
1173 return &pPool->aPages[idx];
1174}
1175
1176
1177/**
1178 * Clear references to guest physical memory.
1179 *
1180 * @param pPool The pool.
1181 * @param pPoolPage The pool page.
1182 * @param pPhysPage The physical guest page tracking structure.
1183 * @param iPte Shadow PTE index
1184 */
1185DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte)
1186{
1187 /*
1188 * Just deal with the simple case here.
1189 */
1190#ifdef VBOX_STRICT
1191 PVMCC pVM = pPool->CTX_SUFF(pVM); NOREF(pVM);
1192#endif
1193#ifdef LOG_ENABLED
1194 const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
1195#endif
1196 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
1197 if (cRefs == 1)
1198 {
1199#if 0 /* for more debug info */
1200 AssertMsg( pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage)
1201 && iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage),
1202 ("idx=%#x iPte=%#x enmKind=%d vs pPhysPage=%R[pgmpage] idx=%#x iPte=%#x enmKind=%d [iPte]=%#RX64\n",
1203 pPoolPage->idx, iPte, pPoolPage->enmKind,
1204 pPhysPage, PGM_PAGE_GET_TD_IDX(pPhysPage), PGM_PAGE_GET_PTE_INDEX(pPhysPage),
1205 pPool->aPages[PGM_PAGE_GET_TD_IDX(pPhysPage)].enmKind,
1206 ((uint64_t *)pPoolPage->CTX_SUFF(pvPage))[iPte]));
1207#else
1208 Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
1209 Assert(iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage));
1210#endif
1211 /* Invalidate the tracking data. */
1212 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
1213 }
1214 else
1215 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage, iPte);
1216 Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
1217}
1218
1219
1220/**
1221 * Moves the page to the head of the age list.
1222 *
1223 * This is done when the cached page is used in one way or another.
1224 *
1225 * @param pPool The pool.
1226 * @param pPage The cached page.
1227 */
1228DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1229{
1230 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
1231
1232 /*
1233 * Move to the head of the age list.
1234 */
1235 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
1236 {
1237 /* unlink */
1238 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
1239 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
1240 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
1241 else
1242 pPool->iAgeTail = pPage->iAgePrev;
1243
1244 /* insert at head */
1245 pPage->iAgePrev = NIL_PGMPOOL_IDX;
1246 pPage->iAgeNext = pPool->iAgeHead;
1247 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
1248 pPool->iAgeHead = pPage->idx;
1249 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
1250 }
1251}
1252
1253
1254/**
1255 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
1256 *
1257 * @param pPool The pool.
1258 * @param pPage PGM pool page
1259 */
1260DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1261{
1262 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1263 ASMAtomicIncU32(&pPage->cLocked);
1264}
1265
1266
1267/**
1268 * Unlocks a page to allow flushing again
1269 *
1270 * @param pPool The pool.
1271 * @param pPage PGM pool page
1272 */
1273DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1274{
1275 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1276 Assert(pPage->cLocked);
1277 ASMAtomicDecU32(&pPage->cLocked);
1278}
1279
1280
1281/**
1282 * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
1283 *
1284 * @returns VBox status code.
1285 * @param pPage PGM pool page
1286 */
1287DECLINLINE(bool) pgmPoolIsPageLocked(PPGMPOOLPAGE pPage)
1288{
1289 if (pPage->cLocked)
1290 {
1291 LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
1292 if (pPage->cModifications)
1293 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
1294 return true;
1295 }
1296 return false;
1297}
1298
1299
1300/**
1301 * Check if the specified page is dirty (not write monitored)
1302 *
1303 * @return dirty or not
1304 * @param pVM The cross context VM structure.
1305 * @param GCPhys Guest physical address
1306 */
1307DECLINLINE(bool) pgmPoolIsDirtyPage(PVMCC pVM, RTGCPHYS GCPhys)
1308{
1309 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1310 PGM_LOCK_ASSERT_OWNER(pVM);
1311 if (!pPool->cDirtyPages)
1312 return false;
1313 return pgmPoolIsDirtyPageSlow(pVM, GCPhys);
1314}
1315
1316
1317/** @} */
1318
1319#endif /* !VMM_INCLUDED_SRC_include_PGMInline_h */
1320
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette