VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllShw.h@ 43667

Last change on this file since 43667 was 43387, checked in by vboxsync, 12 years ago

VMM: HM cleanup.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 19.2 KB
Line 
1/* $Id: PGMAllShw.h 43387 2012-09-21 09:40:25Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Shadow Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Defined Constants And Macros *
20*******************************************************************************/
21#undef SHWPT
22#undef PSHWPT
23#undef SHWPTE
24#undef PSHWPTE
25#undef SHWPD
26#undef PSHWPD
27#undef SHWPDE
28#undef PSHWPDE
29#undef SHW_PDE_PG_MASK
30#undef SHW_PD_SHIFT
31#undef SHW_PD_MASK
32#undef SHW_PTE_PG_MASK
33#undef SHW_PTE_IS_P
34#undef SHW_PTE_IS_RW
35#undef SHW_PTE_IS_US
36#undef SHW_PTE_IS_A
37#undef SHW_PTE_IS_D
38#undef SHW_PTE_IS_P_RW
39#undef SHW_PTE_IS_TRACK_DIRTY
40#undef SHW_PTE_GET_HCPHYS
41#undef SHW_PTE_GET_U
42#undef SHW_PTE_LOG64
43#undef SHW_PTE_SET
44#undef SHW_PTE_ATOMIC_SET
45#undef SHW_PTE_ATOMIC_SET2
46#undef SHW_PTE_SET_RO
47#undef SHW_PTE_SET_RW
48#undef SHW_PT_SHIFT
49#undef SHW_PT_MASK
50#undef SHW_TOTAL_PD_ENTRIES
51#undef SHW_PDPT_SHIFT
52#undef SHW_PDPT_MASK
53#undef SHW_PDPE_PG_MASK
54#undef SHW_POOL_ROOT_IDX
55
56#if PGM_SHW_TYPE == PGM_TYPE_32BIT
57# define SHWPT X86PT
58# define PSHWPT PX86PT
59# define SHWPTE X86PTE
60# define PSHWPTE PX86PTE
61# define SHWPD X86PD
62# define PSHWPD PX86PD
63# define SHWPDE X86PDE
64# define PSHWPDE PX86PDE
65# define SHW_PDE_PG_MASK X86_PDE_PG_MASK
66# define SHW_PD_SHIFT X86_PD_SHIFT
67# define SHW_PD_MASK X86_PD_MASK
68# define SHW_TOTAL_PD_ENTRIES X86_PG_ENTRIES
69# define SHW_PTE_PG_MASK X86_PTE_PG_MASK
70# define SHW_PTE_IS_P(Pte) ( (Pte).n.u1Present )
71# define SHW_PTE_IS_RW(Pte) ( (Pte).n.u1Write )
72# define SHW_PTE_IS_US(Pte) ( (Pte).n.u1User )
73# define SHW_PTE_IS_A(Pte) ( (Pte).n.u1Accessed )
74# define SHW_PTE_IS_D(Pte) ( (Pte).n.u1Dirty )
75# define SHW_PTE_IS_P_RW(Pte) ( (Pte).n.u1Present && (Pte).n.u1Write )
76# define SHW_PTE_IS_TRACK_DIRTY(Pte) ( !!((Pte).u & PGM_PTFLAGS_TRACK_DIRTY) )
77# define SHW_PTE_GET_HCPHYS(Pte) ( (Pte).u & X86_PTE_PG_MASK )
78# define SHW_PTE_LOG64(Pte) ( (uint64_t)(Pte).u )
79# define SHW_PTE_GET_U(Pte) ( (Pte).u ) /**< Use with care. */
80# define SHW_PTE_SET(Pte, uNew) do { (Pte).u = (uNew); } while (0)
81# define SHW_PTE_ATOMIC_SET(Pte, uNew) do { ASMAtomicWriteU32(&(Pte).u, (uNew)); } while (0)
82# define SHW_PTE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU32(&(Pte).u, (Pte2).u); } while (0)
83# define SHW_PTE_SET_RO(Pte) do { (Pte).n.u1Write = 0; } while (0)
84# define SHW_PTE_SET_RW(Pte) do { (Pte).n.u1Write = 1; } while (0)
85# define SHW_PT_SHIFT X86_PT_SHIFT
86# define SHW_PT_MASK X86_PT_MASK
87# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PD
88
89#elif PGM_SHW_TYPE == PGM_TYPE_EPT
90# define SHWPT EPTPT
91# define PSHWPT PEPTPT
92# define SHWPTE EPTPTE
93# define PSHWPTE PEPTPTE
94# define SHWPD EPTPD
95# define PSHWPD PEPTPD
96# define SHWPDE EPTPDE
97# define PSHWPDE PEPTPDE
98# define SHW_PDE_PG_MASK EPT_PDE_PG_MASK
99# define SHW_PD_SHIFT EPT_PD_SHIFT
100# define SHW_PD_MASK EPT_PD_MASK
101# define SHW_PTE_PG_MASK EPT_PTE_PG_MASK
102# define SHW_PTE_IS_P(Pte) ( (Pte).n.u1Present ) /* Approximation, works for us. */
103# define SHW_PTE_IS_RW(Pte) ( (Pte).n.u1Write )
104# define SHW_PTE_IS_US(Pte) ( true )
105# define SHW_PTE_IS_A(Pte) ( true )
106# define SHW_PTE_IS_D(Pte) ( true )
107# define SHW_PTE_IS_P_RW(Pte) ( (Pte).n.u1Present && (Pte).n.u1Write )
108# define SHW_PTE_IS_TRACK_DIRTY(Pte) ( false )
109# define SHW_PTE_GET_HCPHYS(Pte) ( (Pte).u & X86_PTE_PG_MASK )
110# define SHW_PTE_LOG64(Pte) ( (Pte).u )
111# define SHW_PTE_GET_U(Pte) ( (Pte).u ) /**< Use with care. */
112# define SHW_PTE_SET(Pte, uNew) do { (Pte).u = (uNew); } while (0)
113# define SHW_PTE_ATOMIC_SET(Pte, uNew) do { ASMAtomicWriteU64(&(Pte).u, (uNew)); } while (0)
114# define SHW_PTE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU64(&(Pte).u, (Pte2).u); } while (0)
115# define SHW_PTE_SET_RO(Pte) do { (Pte).n.u1Write = 0; } while (0)
116# define SHW_PTE_SET_RW(Pte) do { (Pte).n.u1Write = 1; } while (0)
117# define SHW_PT_SHIFT EPT_PT_SHIFT
118# define SHW_PT_MASK EPT_PT_MASK
119# define SHW_PDPT_SHIFT EPT_PDPT_SHIFT
120# define SHW_PDPT_MASK EPT_PDPT_MASK
121# define SHW_PDPE_PG_MASK EPT_PDPE_PG_MASK
122# define SHW_TOTAL_PD_ENTRIES (EPT_PG_AMD64_ENTRIES*EPT_PG_AMD64_PDPE_ENTRIES)
123# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_NESTED_ROOT /* do not use! exception is real mode & protected mode without paging. */
124
125#else
126# define SHWPT PGMSHWPTPAE
127# define PSHWPT PPGMSHWPTPAE
128# define SHWPTE PGMSHWPTEPAE
129# define PSHWPTE PPGMSHWPTEPAE
130# define SHWPD X86PDPAE
131# define PSHWPD PX86PDPAE
132# define SHWPDE X86PDEPAE
133# define PSHWPDE PX86PDEPAE
134# define SHW_PDE_PG_MASK X86_PDE_PAE_PG_MASK
135# define SHW_PD_SHIFT X86_PD_PAE_SHIFT
136# define SHW_PD_MASK X86_PD_PAE_MASK
137# define SHW_PTE_PG_MASK X86_PTE_PAE_PG_MASK
138# define SHW_PTE_IS_P(Pte) PGMSHWPTEPAE_IS_P(Pte)
139# define SHW_PTE_IS_RW(Pte) PGMSHWPTEPAE_IS_RW(Pte)
140# define SHW_PTE_IS_US(Pte) PGMSHWPTEPAE_IS_US(Pte)
141# define SHW_PTE_IS_A(Pte) PGMSHWPTEPAE_IS_A(Pte)
142# define SHW_PTE_IS_D(Pte) PGMSHWPTEPAE_IS_D(Pte)
143# define SHW_PTE_IS_P_RW(Pte) PGMSHWPTEPAE_IS_P_RW(Pte)
144# define SHW_PTE_IS_TRACK_DIRTY(Pte) PGMSHWPTEPAE_IS_TRACK_DIRTY(Pte)
145# define SHW_PTE_GET_HCPHYS(Pte) PGMSHWPTEPAE_GET_HCPHYS(Pte)
146# define SHW_PTE_LOG64(Pte) PGMSHWPTEPAE_GET_LOG(Pte)
147# define SHW_PTE_GET_U(Pte) PGMSHWPTEPAE_GET_U(Pte) /**< Use with care. */
148# define SHW_PTE_SET(Pte, uNew) PGMSHWPTEPAE_SET(Pte, uNew)
149# define SHW_PTE_ATOMIC_SET(Pte, uNew) PGMSHWPTEPAE_ATOMIC_SET(Pte, uNew)
150# define SHW_PTE_ATOMIC_SET2(Pte, Pte2) PGMSHWPTEPAE_ATOMIC_SET2(Pte, Pte2)
151# define SHW_PTE_SET_RO(Pte) PGMSHWPTEPAE_SET_RO(Pte)
152# define SHW_PTE_SET_RW(Pte) PGMSHWPTEPAE_SET_RW(Pte)
153# define SHW_PT_SHIFT X86_PT_PAE_SHIFT
154# define SHW_PT_MASK X86_PT_PAE_MASK
155
156# if PGM_SHW_TYPE == PGM_TYPE_AMD64
157# define SHW_PDPT_SHIFT X86_PDPT_SHIFT
158# define SHW_PDPT_MASK X86_PDPT_MASK_AMD64
159# define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
160# define SHW_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES)
161# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_AMD64_CR3
162
163# else /* 32 bits PAE mode */
164# define SHW_PDPT_SHIFT X86_PDPT_SHIFT
165# define SHW_PDPT_MASK X86_PDPT_MASK_PAE
166# define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
167# define SHW_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES)
168# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PDPT
169
170# endif
171#endif
172
173
174
175/*******************************************************************************
176* Internal Functions *
177*******************************************************************************/
178RT_C_DECLS_BEGIN
179PGM_SHW_DECL(int, GetPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys);
180PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags);
181RT_C_DECLS_END
182
183
184
185/**
186 * Gets effective page information (from the VMM page directory).
187 *
188 * @returns VBox status.
189 * @param pVCpu Pointer to the VMCPU.
190 * @param GCPtr Guest Context virtual address of the page.
191 * @param pfFlags Where to store the flags. These are X86_PTE_*.
192 * @param pHCPhys Where to store the HC physical address of the page.
193 * This is page aligned.
194 * @remark You should use PGMMapGetPage() for pages in a mapping.
195 */
196PGM_SHW_DECL(int, GetPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
197{
198#if PGM_SHW_TYPE == PGM_TYPE_NESTED
199 NOREF(pVCpu); NOREF(GCPtr); NOREF(pfFlags); NOREF(pHCPhys);
200 return VERR_PAGE_TABLE_NOT_PRESENT;
201
202#else /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT */
203 PVM pVM = pVCpu->CTX_SUFF(pVM);
204
205 PGM_LOCK_ASSERT_OWNER(pVM);
206
207 /*
208 * Get the PDE.
209 */
210# if PGM_SHW_TYPE == PGM_TYPE_AMD64
211 X86PDEPAE Pde;
212
213 /* PML4 */
214 X86PML4E Pml4e = pgmShwGetLongModePML4E(pVCpu, GCPtr);
215 if (!Pml4e.n.u1Present)
216 return VERR_PAGE_TABLE_NOT_PRESENT;
217
218 /* PDPT */
219 PX86PDPT pPDPT;
220 int rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
221 if (RT_FAILURE(rc))
222 return rc;
223 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
224 X86PDPE Pdpe = pPDPT->a[iPDPT];
225 if (!Pdpe.n.u1Present)
226 return VERR_PAGE_TABLE_NOT_PRESENT;
227
228 /* PD */
229 PX86PDPAE pPd;
230 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
231 if (RT_FAILURE(rc))
232 return rc;
233 const unsigned iPd = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;
234 Pde = pPd->a[iPd];
235
236 /* Merge accessed, write, user and no-execute bits into the PDE. */
237 Pde.n.u1Accessed &= Pml4e.n.u1Accessed & Pdpe.lm.u1Accessed;
238 Pde.n.u1Write &= Pml4e.n.u1Write & Pdpe.lm.u1Write;
239 Pde.n.u1User &= Pml4e.n.u1User & Pdpe.lm.u1User;
240 Pde.n.u1NoExecute |= Pml4e.n.u1NoExecute | Pdpe.lm.u1NoExecute;
241
242# elif PGM_SHW_TYPE == PGM_TYPE_PAE
243 X86PDEPAE Pde = pgmShwGetPaePDE(pVCpu, GCPtr);
244
245# elif PGM_SHW_TYPE == PGM_TYPE_EPT
246 const unsigned iPd = ((GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK);
247 PEPTPD pPDDst;
248 EPTPDE Pde;
249
250 int rc = pgmShwGetEPTPDPtr(pVCpu, GCPtr, NULL, &pPDDst);
251 if (rc != VINF_SUCCESS) /** @todo this function isn't expected to return informational status codes. Check callers / fix. */
252 {
253 AssertRC(rc);
254 return rc;
255 }
256 Assert(pPDDst);
257 Pde = pPDDst->a[iPd];
258
259# else /* PGM_TYPE_32BIT */
260 X86PDE Pde = pgmShwGet32BitPDE(pVCpu, GCPtr);
261# endif
262 if (!Pde.n.u1Present)
263 return VERR_PAGE_TABLE_NOT_PRESENT;
264
265 /** Deal with large pages. */
266 if (Pde.b.u1Size)
267 {
268 /*
269 * Store the results.
270 * RW and US flags depend on the entire page translation hierarchy - except for
271 * legacy PAE which has a simplified PDPE.
272 */
273 if (pfFlags)
274 {
275 *pfFlags = (Pde.u & ~SHW_PDE_PG_MASK);
276# if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE) /** @todo why do we have to check the guest state here? */
277 if ((Pde.u & X86_PTE_PAE_NX) && CPUMIsGuestNXEnabled(pVCpu))
278 *pfFlags |= X86_PTE_PAE_NX;
279# endif
280 }
281
282 if (pHCPhys)
283 *pHCPhys = (Pde.u & SHW_PDE_PG_MASK) + (GCPtr & (RT_BIT(SHW_PD_SHIFT) - 1) & X86_PAGE_4K_BASE_MASK);
284
285 return VINF_SUCCESS;
286 }
287
288 /*
289 * Get PT entry.
290 */
291 PSHWPT pPT;
292 if (!(Pde.u & PGM_PDFLAGS_MAPPING))
293 {
294 int rc2 = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT);
295 if (RT_FAILURE(rc2))
296 return rc2;
297 }
298 else /* mapping: */
299 {
300# if PGM_SHW_TYPE == PGM_TYPE_AMD64 \
301 || PGM_SHW_TYPE == PGM_TYPE_EPT
302 AssertFailed(); /* can't happen */
303 pPT = NULL; /* shut up MSC */
304# else
305 Assert(pgmMapAreMappingsEnabled(pVM));
306
307 PPGMMAPPING pMap = pgmGetMapping(pVM, (RTGCPTR)GCPtr);
308 AssertMsgReturn(pMap, ("GCPtr=%RGv\n", GCPtr), VERR_PGM_MAPPING_IPE);
309# if PGM_SHW_TYPE == PGM_TYPE_32BIT
310 pPT = pMap->aPTs[(GCPtr - pMap->GCPtr) >> X86_PD_SHIFT].CTX_SUFF(pPT);
311# else /* PAE */
312 pPT = pMap->aPTs[(GCPtr - pMap->GCPtr) >> X86_PD_SHIFT].CTX_SUFF(paPaePTs);
313# endif
314# endif
315 }
316 const unsigned iPt = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK;
317 SHWPTE Pte = pPT->a[iPt];
318 if (!SHW_PTE_IS_P(Pte))
319 return VERR_PAGE_NOT_PRESENT;
320
321 /*
322 * Store the results.
323 * RW and US flags depend on the entire page translation hierarchy - except for
324 * legacy PAE which has a simplified PDPE.
325 */
326 if (pfFlags)
327 {
328 *pfFlags = (SHW_PTE_GET_U(Pte) & ~SHW_PTE_PG_MASK)
329 & ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US));
330# if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE) /** @todo why do we have to check the guest state here? */
331 /* The NX bit is determined by a bitwise OR between the PT and PD */
332 if (((SHW_PTE_GET_U(Pte) | Pde.u) & X86_PTE_PAE_NX) && CPUMIsGuestNXEnabled(pVCpu))
333 *pfFlags |= X86_PTE_PAE_NX;
334# endif
335 }
336
337 if (pHCPhys)
338 *pHCPhys = SHW_PTE_GET_HCPHYS(Pte);
339
340 return VINF_SUCCESS;
341#endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED */
342}
343
344
345/**
346 * Modify page flags for a range of pages in the shadow context.
347 *
348 * The existing flags are ANDed with the fMask and ORed with the fFlags.
349 *
350 * @returns VBox status code.
351 * @param pVCpu Pointer to the VMCPU.
352 * @param GCPtr Virtual address of the first page in the range. Page aligned!
353 * @param cb Size (in bytes) of the range to apply the modification to. Page aligned!
354 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
355 * @param fMask The AND mask - page flags X86_PTE_*.
356 * Be extremely CAREFUL with ~'ing values because they can be 32-bit!
357 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
358 * @remark You must use PGMMapModifyPage() for pages in a mapping.
359 */
360PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
361{
362# if PGM_SHW_TYPE == PGM_TYPE_NESTED
363 NOREF(pVCpu); NOREF(GCPtr); NOREF(cb); NOREF(fFlags); NOREF(fMask); NOREF(fOpFlags);
364 return VERR_PAGE_TABLE_NOT_PRESENT;
365
366# else /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT */
367 PVM pVM = pVCpu->CTX_SUFF(pVM);
368 int rc;
369
370 PGM_LOCK_ASSERT_OWNER(pVM);
371
372 /*
373 * Walk page tables and pages till we're done.
374 */
375 for (;;)
376 {
377 /*
378 * Get the PDE.
379 */
380# if PGM_SHW_TYPE == PGM_TYPE_AMD64
381 X86PDEPAE Pde;
382 /* PML4 */
383 X86PML4E Pml4e = pgmShwGetLongModePML4E(pVCpu, GCPtr);
384 if (!Pml4e.n.u1Present)
385 return VERR_PAGE_TABLE_NOT_PRESENT;
386
387 /* PDPT */
388 PX86PDPT pPDPT;
389 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
390 if (RT_FAILURE(rc))
391 return rc;
392 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
393 X86PDPE Pdpe = pPDPT->a[iPDPT];
394 if (!Pdpe.n.u1Present)
395 return VERR_PAGE_TABLE_NOT_PRESENT;
396
397 /* PD */
398 PX86PDPAE pPd;
399 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
400 if (RT_FAILURE(rc))
401 return rc;
402 const unsigned iPd = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;
403 Pde = pPd->a[iPd];
404
405# elif PGM_SHW_TYPE == PGM_TYPE_PAE
406 X86PDEPAE Pde = pgmShwGetPaePDE(pVCpu, GCPtr);
407
408# elif PGM_SHW_TYPE == PGM_TYPE_EPT
409 const unsigned iPd = ((GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK);
410 PEPTPD pPDDst;
411 EPTPDE Pde;
412
413 rc = pgmShwGetEPTPDPtr(pVCpu, GCPtr, NULL, &pPDDst);
414 if (rc != VINF_SUCCESS)
415 {
416 AssertRC(rc);
417 return rc;
418 }
419 Assert(pPDDst);
420 Pde = pPDDst->a[iPd];
421
422# else /* PGM_TYPE_32BIT */
423 X86PDE Pde = pgmShwGet32BitPDE(pVCpu, GCPtr);
424# endif
425 if (!Pde.n.u1Present)
426 return VERR_PAGE_TABLE_NOT_PRESENT;
427
428 AssertFatal(!Pde.b.u1Size);
429
430 /*
431 * Map the page table.
432 */
433 PSHWPT pPT;
434 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT);
435 if (RT_FAILURE(rc))
436 return rc;
437
438 unsigned iPTE = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK;
439 while (iPTE < RT_ELEMENTS(pPT->a))
440 {
441 if (SHW_PTE_IS_P(pPT->a[iPTE]))
442 {
443 SHWPTE const OrgPte = pPT->a[iPTE];
444 SHWPTE NewPte;
445
446 SHW_PTE_SET(NewPte, (SHW_PTE_GET_U(OrgPte) & (fMask | SHW_PTE_PG_MASK)) | (fFlags & ~SHW_PTE_PG_MASK));
447 if (!SHW_PTE_IS_P(NewPte))
448 {
449 /** @todo Some CSAM code path might end up here and upset
450 * the page pool. */
451 AssertFailed();
452 }
453 else if ( SHW_PTE_IS_RW(NewPte)
454 && !SHW_PTE_IS_RW(OrgPte)
455 && !(fOpFlags & PGM_MK_PG_IS_MMIO2) )
456 {
457 /** @todo Optimize \#PF handling by caching data. We can
458 * then use this when PGM_MK_PG_IS_WRITE_FAULT is
459 * set instead of resolving the guest physical
460 * address yet again. */
461 RTGCPHYS GCPhys;
462 uint64_t fGstPte;
463 rc = PGMGstGetPage(pVCpu, GCPtr, &fGstPte, &GCPhys);
464 AssertRC(rc);
465 if (RT_SUCCESS(rc))
466 {
467 Assert(fGstPte & X86_PTE_RW);
468 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
469 Assert(pPage);
470 if (pPage)
471 {
472 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
473 AssertRCReturn(rc, rc);
474 Log(("%s: pgmPhysPageMakeWritable on %RGv / %RGp %R[pgmpage]\n", __PRETTY_FUNCTION__, GCPtr, GCPhys, pPage));
475 }
476 }
477 }
478
479 SHW_PTE_ATOMIC_SET2(pPT->a[iPTE], NewPte);
480# if PGM_SHW_TYPE == PGM_TYPE_EPT
481 HMInvalidatePhysPage(pVM, (RTGCPHYS)GCPtr);
482# else
483 PGM_INVL_PG_ALL_VCPU(pVM, GCPtr);
484# endif
485 }
486
487 /* next page */
488 cb -= PAGE_SIZE;
489 if (!cb)
490 return VINF_SUCCESS;
491 GCPtr += PAGE_SIZE;
492 iPTE++;
493 }
494 }
495# endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED */
496}
497
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use