VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllGst.h

Last change on this file was 98103, checked in by vboxsync, 17 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 19.4 KB
RevLine 
[23]1/* $Id: PGMAllGst.h 98103 2023-01-17 14:15:46Z vboxsync $ */
[1]2/** @file
3 * VBox - Page Manager, Guest Paging Template - All context code.
4 */
5
6/*
[98103]7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
[1]8 *
[96407]9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
[1]26 */
27
28
[73272]29/*********************************************************************************************************************************
30* Internal Functions *
31*********************************************************************************************************************************/
[20374]32RT_C_DECLS_BEGIN
[96740]33/** @todo Do we really need any of these forward declarations? */
[30889]34#if PGM_GST_TYPE == PGM_TYPE_32BIT \
35 || PGM_GST_TYPE == PGM_TYPE_PAE \
36 || PGM_GST_TYPE == PGM_TYPE_AMD64
[92426]37DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PGSTPTWALK pGstWalk);
[30889]38#endif
[96740]39PGM_GST_DECL(int, Enter)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3);
[92426]40PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk);
[80268]41PGM_GST_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
[96740]42PGM_GST_DECL(int, Exit)(PVMCPUCC pVCpu);
[73199]43
44#ifdef IN_RING3 /* r3 only for now. */
[80268]45PGM_GST_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta);
[73199]46#endif
[20374]47RT_C_DECLS_END
[1]48
49
[73261]50/**
51 * Enters the guest mode.
52 *
53 * @returns VBox status code.
54 * @param pVCpu The cross context virtual CPU structure.
55 * @param GCPhysCR3 The physical address from the CR3 register.
56 */
[80268]57PGM_GST_DECL(int, Enter)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3)
[73261]58{
59 /*
60 * Map and monitor CR3
61 */
62 uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
63 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
64 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
[92626]65 return g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
[73261]66}
67
68
69/**
70 * Exits the guest mode.
71 *
72 * @returns VBox status code.
73 * @param pVCpu The cross context virtual CPU structure.
74 */
[80268]75PGM_GST_DECL(int, Exit)(PVMCPUCC pVCpu)
[73261]76{
77 uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
78 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
79 AssertReturn(g_aPgmBothModeData[idxBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
80 return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
81}
82
83
[30889]84#if PGM_GST_TYPE == PGM_TYPE_32BIT \
85 || PGM_GST_TYPE == PGM_TYPE_PAE \
86 || PGM_GST_TYPE == PGM_TYPE_AMD64
[1]87
[30889]88
[92426]89DECLINLINE(int) PGM_GST_NAME(WalkReturnNotPresent)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel)
[30889]90{
[39078]91 NOREF(iLevel); NOREF(pVCpu);
[92426]92 pWalk->fNotPresent = true;
93 pWalk->uLevel = (uint8_t)iLevel;
[30889]94 return VERR_PAGE_TABLE_NOT_PRESENT;
95}
96
[92426]97DECLINLINE(int) PGM_GST_NAME(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel, int rc)
[30889]98{
[39078]99 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
[92426]100 pWalk->fBadPhysAddr = true;
101 pWalk->uLevel = (uint8_t)iLevel;
[30889]102 return VERR_PAGE_TABLE_NOT_PRESENT;
103}
104
[92426]105DECLINLINE(int) PGM_GST_NAME(WalkReturnRsvdError)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel)
[30889]106{
[39078]107 NOREF(pVCpu);
[92426]108 pWalk->fRsvdError = true;
109 pWalk->uLevel = (uint8_t)iLevel;
[30889]110 return VERR_PAGE_TABLE_NOT_PRESENT;
111}
112
113
[1]114/**
[30889]115 * Performs a guest page table walk.
116 *
117 * @returns VBox status code.
118 * @retval VINF_SUCCESS on success.
119 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
120 *
[58123]121 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
[30889]122 * @param GCPtr The guest virtual address to walk by.
[92684]123 * @param pWalk The page walk info.
124 * @param pGstWalk The guest mode specific page walk info.
[30889]125 */
[92426]126DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PGSTPTWALK pGstWalk)
[30889]127{
128 int rc;
129
[92186]130#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
[92426]131/** @def PGM_GST_SLAT_WALK
132 * Macro to perform guest second-level address translation (EPT or Nested).
133 *
[92459]134 * @param a_pVCpu The cross context virtual CPU structure of the calling
135 * EMT.
[92426]136 * @param a_GCPtrNested The nested-guest linear address that caused the
137 * second-level translation.
138 * @param a_GCPhysNested The nested-guest physical address to translate.
139 * @param a_GCPhysOut Where to store the guest-physical address (result).
140 */
[92186]141# define PGM_GST_SLAT_WALK(a_pVCpu, a_GCPtrNested, a_GCPhysNested, a_GCPhysOut, a_pWalk) \
142 do { \
[93572]143 if ((a_pVCpu)->pgm.s.enmGuestSlatMode == PGMSLAT_EPT) \
[92186]144 { \
[96739]145 PGMPTWALK WalkSlat; \
146 PGMPTWALKGST WalkGstSlat; \
147 int const rcX = pgmGstSlatWalk(a_pVCpu, a_GCPhysNested, true /* fIsLinearAddrValid */, a_GCPtrNested, &WalkSlat, \
148 &WalkGstSlat); \
[92186]149 if (RT_SUCCESS(rcX)) \
[96739]150 (a_GCPhysOut) = WalkSlat.GCPhys; \
[92186]151 else \
152 { \
[96739]153 *(a_pWalk) = WalkSlat; \
[92186]154 return rcX; \
155 } \
156 } \
157 } while (0)
158#endif
159
[30889]160 /*
[92426]161 * Init the walking structures.
[30889]162 */
163 RT_ZERO(*pWalk);
[92426]164 RT_ZERO(*pGstWalk);
165 pWalk->GCPtr = GCPtr;
[30889]166
167# if PGM_GST_TYPE == PGM_TYPE_32BIT \
168 || PGM_GST_TYPE == PGM_TYPE_PAE
169 /*
170 * Boundary check for PAE and 32-bit (prevents trouble further down).
171 */
172 if (RT_UNLIKELY(GCPtr >= _4G))
173 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 8);
174# endif
175
[92331]176 uint64_t fEffective;
[30889]177 {
178# if PGM_GST_TYPE == PGM_TYPE_AMD64
179 /*
[92186]180 * The PML4 table.
[30889]181 */
[92426]182 rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGstWalk->pPml4);
[65531]183 if (RT_SUCCESS(rc)) { /* probable */ }
184 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
[30889]185
[85073]186 PX86PML4E pPml4e;
[92426]187 pGstWalk->pPml4e = pPml4e = &pGstWalk->pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK];
[85073]188 X86PML4E Pml4e;
[92426]189 pGstWalk->Pml4e.u = Pml4e.u = pPml4e->u;
[30889]190
[92062]191 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ }
[65531]192 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 4);
193
194 if (RT_LIKELY(GST_IS_PML4E_VALID(pVCpu, Pml4e))) { /* likely */ }
195 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4);
196
[93160]197 fEffective = Pml4e.u & ( X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_PWT | X86_PML4E_PCD | X86_PML4E_A
198 | X86_PML4E_NX);
199 pWalk->fEffective = fEffective;
[65531]200
[30889]201 /*
[92186]202 * The PDPT.
[30889]203 */
[92186]204 RTGCPHYS GCPhysPdpt = Pml4e.u & X86_PML4E_PG_MASK;
205#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
206 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPdpt, GCPhysPdpt, pWalk);
207#endif
[92426]208 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPdpt, &pGstWalk->pPdpt);
[65531]209 if (RT_SUCCESS(rc)) { /* probable */ }
210 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
[30889]211
212# elif PGM_GST_TYPE == PGM_TYPE_PAE
[92426]213 rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGstWalk->pPdpt);
[65531]214 if (RT_SUCCESS(rc)) { /* probable */ }
215 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
[92186]216#endif
[30889]217 }
218 {
219# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
[85073]220 PX86PDPE pPdpe;
[92426]221 pGstWalk->pPdpe = pPdpe = &pGstWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
[85073]222 X86PDPE Pdpe;
[92426]223 pGstWalk->Pdpe.u = Pdpe.u = pPdpe->u;
[30889]224
[92062]225 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pdpe)) { /* probable */ }
[65531]226 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 3);
227
228 if (RT_LIKELY(GST_IS_PDPE_VALID(pVCpu, Pdpe))) { /* likely */ }
229 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 3);
230
231# if PGM_GST_TYPE == PGM_TYPE_AMD64
[93160]232 fEffective &= (Pdpe.u & ( X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US
233 | X86_PDPE_PWT | X86_PDPE_PCD | X86_PDPE_A));
234 fEffective |= Pdpe.u & X86_PDPE_LM_NX;
[65531]235# else
[93160]236 /*
237 * NX in the legacy-mode PAE PDPE is reserved. The valid check above ensures the NX bit is not set.
238 * The RW, US, A bits MBZ in PAE PDPTE entries but must be 1 the way we compute cumulative (effective) access rights.
239 */
240 Assert(!(Pdpe.u & X86_PDPE_LM_NX));
241 fEffective = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A
242 | (Pdpe.u & (X86_PDPE_PWT | X86_PDPE_PCD));
[65531]243# endif
[93160]244 pWalk->fEffective = fEffective;
[65531]245
[30889]246 /*
[92186]247 * The PD.
[30889]248 */
[92186]249 RTGCPHYS GCPhysPd = Pdpe.u & X86_PDPE_PG_MASK;
250# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
251 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPd, GCPhysPd, pWalk);
252# endif
[92426]253 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPd, &pGstWalk->pPd);
[65531]254 if (RT_SUCCESS(rc)) { /* probable */ }
255 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 2, rc);
[92186]256
[30889]257# elif PGM_GST_TYPE == PGM_TYPE_32BIT
[92426]258 rc = pgmGstGet32bitPDPtrEx(pVCpu, &pGstWalk->pPd);
[65531]259 if (RT_SUCCESS(rc)) { /* probable */ }
260 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
[30889]261# endif
262 }
263 {
[85073]264 PGSTPDE pPde;
[92426]265 pGstWalk->pPde = pPde = &pGstWalk->pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK];
[85073]266 GSTPDE Pde;
[92426]267 pGstWalk->Pde.u = Pde.u = pPde->u;
[92062]268 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pde)) { /* probable */ }
[65531]269 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 2);
[86476]270 if ((Pde.u & X86_PDE_PS) && GST_IS_PSE_ACTIVE(pVCpu))
[30889]271 {
[65531]272 if (RT_LIKELY(GST_IS_BIG_PDE_VALID(pVCpu, Pde))) { /* likely */ }
273 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
[30889]274
[65531]275 /*
276 * We're done.
277 */
[92186]278# if PGM_GST_TYPE == PGM_TYPE_32BIT
[93160]279 fEffective = Pde.u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A);
[92186]280# else
[93160]281 fEffective &= Pde.u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A);
282 fEffective |= Pde.u & X86_PDE2M_PAE_NX;
[92186]283# endif
[92257]284 fEffective |= Pde.u & (X86_PDE4M_D | X86_PDE4M_G);
285 fEffective |= (Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT;
[92426]286 pWalk->fEffective = fEffective;
[92319]287 Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK));
[92331]288 Assert(fEffective & PGM_PTATTRS_R_MASK);
[92186]289
[92426]290 pWalk->fBigPage = true;
291 pWalk->fSucceeded = true;
[92186]292 RTGCPHYS GCPhysPde = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)
293 | (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
294# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
295 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPde, GCPhysPde, pWalk);
296# endif
[92426]297 pWalk->GCPhys = GCPhysPde;
298 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys);
[30889]299 return VINF_SUCCESS;
300 }
301
302 if (RT_UNLIKELY(!GST_IS_PDE_VALID(pVCpu, Pde)))
303 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
[92186]304# if PGM_GST_TYPE == PGM_TYPE_32BIT
[93160]305 fEffective = Pde.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
[65531]306# else
[93160]307 fEffective &= Pde.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
308 fEffective |= Pde.u & X86_PDE_PAE_NX;
[65531]309# endif
[93160]310 pWalk->fEffective = fEffective;
[30889]311
312 /*
[92186]313 * The PT.
[30889]314 */
[92186]315 RTGCPHYS GCPhysPt = GST_GET_PDE_GCPHYS(Pde);
316# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
317 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPt, GCPhysPt, pWalk);
318# endif
[92426]319 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pGstWalk->pPt);
[65531]320 if (RT_SUCCESS(rc)) { /* probable */ }
321 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc);
[30889]322 }
323 {
[85073]324 PGSTPTE pPte;
[92426]325 pGstWalk->pPte = pPte = &pGstWalk->pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
[85073]326 GSTPTE Pte;
[92426]327 pGstWalk->Pte.u = Pte.u = pPte->u;
[30889]328
[92062]329 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pte)) { /* probable */ }
[65531]330 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 1);
331
332 if (RT_LIKELY(GST_IS_PTE_VALID(pVCpu, Pte))) { /* likely */ }
333 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 1);
334
[30889]335 /*
336 * We're done.
337 */
[93160]338 fEffective &= Pte.u & (X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A);
339 fEffective |= Pte.u & (X86_PTE_D | X86_PTE_PAT | X86_PTE_G);
340# if PGM_GST_TYPE != PGM_TYPE_32BIT
341 fEffective |= Pte.u & X86_PTE_PAE_NX;
[92186]342# endif
[92426]343 pWalk->fEffective = fEffective;
[92319]344 Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK));
[92331]345 Assert(fEffective & PGM_PTATTRS_R_MASK);
[65531]346
[92426]347 pWalk->fSucceeded = true;
[92311]348 RTGCPHYS GCPhysPte = GST_GET_PTE_GCPHYS(Pte)
[93554]349 | (GCPtr & GUEST_PAGE_OFFSET_MASK);
[92186]350# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
351 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPte, GCPhysPte, pWalk);
352# endif
[92426]353 pWalk->GCPhys = GCPhysPte;
[30889]354 return VINF_SUCCESS;
355 }
356}
357
[92186]358#endif /* 32BIT, PAE, AMD64 */
[30889]359
360/**
[1]361 * Gets effective Guest OS page information.
362 *
363 * When GCPtr is in a big page, the function will return as if it was a normal
364 * 4KB page. If the need for distinguishing between big and normal page becomes
365 * necessary at a later point, a PGMGstGetPage Ex() will be created for that
366 * purpose.
367 *
[58170]368 * @returns VBox status code.
[58123]369 * @param pVCpu The cross context virtual CPU structure.
[23853]370 * @param GCPtr Guest Context virtual address of the page.
[92426]371 * @param pWalk Where to store the page walk info.
[1]372 */
[92426]373PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
[1]374{
375#if PGM_GST_TYPE == PGM_TYPE_REAL \
376 || PGM_GST_TYPE == PGM_TYPE_PROT
[93572]377
378 RT_ZERO(*pWalk);
379# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
380 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
381 {
[96739]382 PGMPTWALK WalkSlat;
383 PGMPTWALKGST WalkGstSlat;
384 int const rc = pgmGstSlatWalk(pVCpu, GCPtr, true /* fIsLinearAddrValid */, GCPtr, &WalkSlat, &WalkGstSlat);
[93572]385 if (RT_SUCCESS(rc))
386 {
387 pWalk->fSucceeded = true;
388 pWalk->GCPtr = GCPtr;
[96739]389 pWalk->GCPhys = WalkSlat.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
[93572]390 pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
391 }
392 else
[96739]393 *pWalk = WalkSlat;
[93572]394 return rc;
395 }
396# endif
397
[1]398 /*
399 * Fake it.
400 */
[92426]401 pWalk->fSucceeded = true;
402 pWalk->GCPtr = GCPtr;
[93931]403 pWalk->GCPhys = GCPtr & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
[92426]404 pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
[39078]405 NOREF(pVCpu);
[1]406 return VINF_SUCCESS;
407
[30889]408#elif PGM_GST_TYPE == PGM_TYPE_32BIT \
409 || PGM_GST_TYPE == PGM_TYPE_PAE \
410 || PGM_GST_TYPE == PGM_TYPE_AMD64
[1]411
[92426]412 GSTPTWALK GstWalk;
[92642]413 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, pWalk, &GstWalk);
[30889]414 if (RT_FAILURE(rc))
415 return rc;
[24997]416
[92642]417 Assert(pWalk->fSucceeded);
418 Assert(pWalk->GCPtr == GCPtr);
419
420 PGMPTATTRS fFlags;
421 if (!pWalk->fBigPage)
[92426]422 fFlags = (GstWalk.Pte.u & ~(GST_PTE_PG_MASK | X86_PTE_RW | X86_PTE_US)) /* NX not needed */
[92642]423 | (pWalk->fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK))
[12932]424# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
[92642]425 | (pWalk->fEffective & PGM_PTATTRS_NX_MASK)
[7629]426# endif
[92426]427 ;
428 else
429 {
430 fFlags = (GstWalk.Pde.u & ~(GST_PTE_PG_MASK | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PS)) /* NX not needed */
[92642]431 | (pWalk->fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK | PGM_PTATTRS_PAT_MASK))
[12932]432# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
[92642]433 | (pWalk->fEffective & PGM_PTATTRS_NX_MASK)
[7730]434# endif
[92426]435 ;
[1]436 }
[30889]437
[93554]438 pWalk->GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
[92426]439 pWalk->fEffective = fFlags;
[1]440 return VINF_SUCCESS;
[30889]441
[1]442#else
[7733]443# error "shouldn't be here!"
[1]444 /* something else... */
445 return VERR_NOT_SUPPORTED;
446#endif
447}
448
449
450/**
451 * Modify page flags for a range of pages in the guest's tables
452 *
453 * The existing flags are ANDed with the fMask and ORed with the fFlags.
454 *
455 * @returns VBox status code.
[58123]456 * @param pVCpu The cross context virtual CPU structure.
[1]457 * @param GCPtr Virtual address of the first page in the range. Page aligned!
458 * @param cb Size (in bytes) of the page range to apply the modification to. Page aligned!
459 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
460 * @param fMask The AND mask - page flags X86_PTE_*.
461 */
[80268]462PGM_GST_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
[1]463{
[93554]464 Assert((cb & GUEST_PAGE_OFFSET_MASK) == 0); RT_NOREF_PV(cb);
[30889]465
[1]466#if PGM_GST_TYPE == PGM_TYPE_32BIT \
467 || PGM_GST_TYPE == PGM_TYPE_PAE \
468 || PGM_GST_TYPE == PGM_TYPE_AMD64
469 for (;;)
470 {
[92426]471 PGMPTWALK Walk;
472 GSTPTWALK GstWalk;
473 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk, &GstWalk);
[30889]474 if (RT_FAILURE(rc))
475 return rc;
[13919]476
[92426]477 if (!Walk.fBigPage)
[1]478 {
479 /*
[30889]480 * 4KB Page table, process
[1]481 *
[30889]482 * Walk pages till we're done.
[1]483 */
484 unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
[92426]485 while (iPTE < RT_ELEMENTS(GstWalk.pPt->a))
[1]486 {
[92426]487 GSTPTE Pte = GstWalk.pPt->a[iPTE];
[32036]488 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK))
[1]489 | (fFlags & ~GST_PTE_PG_MASK);
[92426]490 GstWalk.pPt->a[iPTE] = Pte;
[1]491
492 /* next page */
[93554]493 cb -= GUEST_PAGE_SIZE;
[1]494 if (!cb)
495 return VINF_SUCCESS;
[93554]496 GCPtr += GUEST_PAGE_SIZE;
[1]497 iPTE++;
498 }
499 }
500 else
501 {
502 /*
[31849]503 * 2/4MB Page table
[1]504 */
[30889]505 GSTPDE PdeNew;
[11531]506# if PGM_GST_TYPE == PGM_TYPE_32BIT
[92426]507 PdeNew.u = (GstWalk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))
[11531]508# else
[92426]509 PdeNew.u = (GstWalk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))
[11531]510# endif
[30889]511 | (fFlags & ~GST_PTE_PG_MASK)
512 | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
[92426]513 *GstWalk.pPde = PdeNew;
[1]514
515 /* advance */
516 const unsigned cbDone = GST_BIG_PAGE_SIZE - (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
517 if (cbDone >= cb)
518 return VINF_SUCCESS;
519 cb -= cbDone;
520 GCPtr += cbDone;
521 }
522 }
523
524#else
[2227]525 /* real / protected mode: ignore. */
[39078]526 NOREF(pVCpu); NOREF(GCPtr); NOREF(fFlags); NOREF(fMask);
[2227]527 return VINF_SUCCESS;
[1]528#endif
529}
530
531
[73268]532#ifdef IN_RING3
533/**
534 * Relocate any GC pointers related to guest mode paging.
535 *
536 * @returns VBox status code.
537 * @param pVCpu The cross context virtual CPU structure.
538 * @param offDelta The relocation offset.
539 */
[80268]540PGM_GST_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta)
[73268]541{
[80181]542 RT_NOREF(pVCpu, offDelta);
[73268]543 return VINF_SUCCESS;
544}
545#endif
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use