VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 43667

Last change on this file since 43667 was 43387, checked in by vboxsync, 12 years ago

VMM: HM cleanup.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 87.5 KB
RevLine 
[23]1/* $Id: PGMAll.cpp 43387 2012-09-21 09:40:25Z vboxsync $ */
[1]2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
[28800]7 * Copyright (C) 2006-2007 Oracle Corporation
[1]8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
[5999]12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
[1]16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
[35346]22#include <VBox/vmm/pgm.h>
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/selm.h>
25#include <VBox/vmm/iom.h>
[1]26#include <VBox/sup.h>
[35346]27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/stam.h>
29#include <VBox/vmm/csam.h>
30#include <VBox/vmm/patm.h>
31#include <VBox/vmm/trpm.h>
[40274]32#ifdef VBOX_WITH_REM
33# include <VBox/vmm/rem.h>
34#endif
[35346]35#include <VBox/vmm/em.h>
[43387]36#include <VBox/vmm/hm.h>
37#include <VBox/vmm/hm_vmx.h>
[35333]38#include "PGMInternal.h"
[35346]39#include <VBox/vmm/vm.h>
[35333]40#include "PGMInline.h"
[1]41#include <iprt/assert.h>
[29250]42#include <iprt/asm-amd64-x86.h>
[1]43#include <iprt/string.h>
44#include <VBox/log.h>
45#include <VBox/param.h>
46#include <VBox/err.h>
47
48
49/*******************************************************************************
50* Structures and Typedefs *
51*******************************************************************************/
52/**
53 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
54 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
55 */
56typedef struct PGMHVUSTATE
57{
[41783]58 /** Pointer to the VM. */
[1]59 PVM pVM;
[41783]60 /** Pointer to the VMCPU. */
[18927]61 PVMCPU pVCpu;
[1]62 /** The todo flags. */
63 RTUINT fTodo;
64 /** The CR4 register value. */
65 uint32_t cr4;
66} PGMHVUSTATE, *PPGMHVUSTATE;
67
68
69/*******************************************************************************
70* Internal Functions *
71*******************************************************************************/
[18992]72DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
[31167]73DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
[31066]74#ifndef IN_RC
[31081]75static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
76static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
[31066]77#endif
[1]78
[31066]79
[1]80/*
81 * Shadow - 32-bit mode
82 */
83#define PGM_SHW_TYPE PGM_TYPE_32BIT
84#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
85#include "PGMAllShw.h"
86
87/* Guest - real mode */
88#define PGM_GST_TYPE PGM_TYPE_REAL
89#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
90#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
91#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
[16428]92#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
[17215]93#include "PGMGstDefs.h"
[1]94#include "PGMAllGst.h"
95#include "PGMAllBth.h"
96#undef BTH_PGMPOOLKIND_PT_FOR_PT
[16317]97#undef BTH_PGMPOOLKIND_ROOT
[1]98#undef PGM_BTH_NAME
99#undef PGM_GST_TYPE
100#undef PGM_GST_NAME
101
102/* Guest - protected mode */
103#define PGM_GST_TYPE PGM_TYPE_PROT
104#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
105#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
106#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
[16428]107#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
[17215]108#include "PGMGstDefs.h"
[1]109#include "PGMAllGst.h"
110#include "PGMAllBth.h"
111#undef BTH_PGMPOOLKIND_PT_FOR_PT
[16317]112#undef BTH_PGMPOOLKIND_ROOT
[1]113#undef PGM_BTH_NAME
114#undef PGM_GST_TYPE
115#undef PGM_GST_NAME
116
117/* Guest - 32-bit mode */
118#define PGM_GST_TYPE PGM_TYPE_32BIT
119#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
120#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
121#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
122#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
[16317]123#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
[17215]124#include "PGMGstDefs.h"
[1]125#include "PGMAllGst.h"
126#include "PGMAllBth.h"
127#undef BTH_PGMPOOLKIND_PT_FOR_BIG
128#undef BTH_PGMPOOLKIND_PT_FOR_PT
[16317]129#undef BTH_PGMPOOLKIND_ROOT
[1]130#undef PGM_BTH_NAME
131#undef PGM_GST_TYPE
132#undef PGM_GST_NAME
133
134#undef PGM_SHW_TYPE
135#undef PGM_SHW_NAME
136
137
138/*
139 * Shadow - PAE mode
140 */
141#define PGM_SHW_TYPE PGM_TYPE_PAE
142#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
143#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
144#include "PGMAllShw.h"
145
146/* Guest - real mode */
147#define PGM_GST_TYPE PGM_TYPE_REAL
148#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
149#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
150#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
[16428]151#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
[17215]152#include "PGMGstDefs.h"
[1]153#include "PGMAllBth.h"
154#undef BTH_PGMPOOLKIND_PT_FOR_PT
[16317]155#undef BTH_PGMPOOLKIND_ROOT
[1]156#undef PGM_BTH_NAME
157#undef PGM_GST_TYPE
158#undef PGM_GST_NAME
159
160/* Guest - protected mode */
161#define PGM_GST_TYPE PGM_TYPE_PROT
162#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
163#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
164#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
[16428]165#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
[17215]166#include "PGMGstDefs.h"
[1]167#include "PGMAllBth.h"
168#undef BTH_PGMPOOLKIND_PT_FOR_PT
[16317]169#undef BTH_PGMPOOLKIND_ROOT
[1]170#undef PGM_BTH_NAME
171#undef PGM_GST_TYPE
172#undef PGM_GST_NAME
173
174/* Guest - 32-bit mode */
175#define PGM_GST_TYPE PGM_TYPE_32BIT
176#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
177#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
178#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
179#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
[16317]180#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
[17215]181#include "PGMGstDefs.h"
[1]182#include "PGMAllBth.h"
183#undef BTH_PGMPOOLKIND_PT_FOR_BIG
184#undef BTH_PGMPOOLKIND_PT_FOR_PT
[16317]185#undef BTH_PGMPOOLKIND_ROOT
[1]186#undef PGM_BTH_NAME
187#undef PGM_GST_TYPE
188#undef PGM_GST_NAME
189
190
191/* Guest - PAE mode */
192#define PGM_GST_TYPE PGM_TYPE_PAE
193#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
194#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
195#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
196#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
[16317]197#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
[17215]198#include "PGMGstDefs.h"
[1]199#include "PGMAllGst.h"
200#include "PGMAllBth.h"
201#undef BTH_PGMPOOLKIND_PT_FOR_BIG
202#undef BTH_PGMPOOLKIND_PT_FOR_PT
[16317]203#undef BTH_PGMPOOLKIND_ROOT
[1]204#undef PGM_BTH_NAME
205#undef PGM_GST_TYPE
206#undef PGM_GST_NAME
207
208#undef PGM_SHW_TYPE
209#undef PGM_SHW_NAME
210
211
[13832]212#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
[1]213/*
214 * Shadow - AMD64 mode
215 */
[13188]216# define PGM_SHW_TYPE PGM_TYPE_AMD64
217# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
218# include "PGMAllShw.h"
[1]219
[16321]220/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
[13188]221# define PGM_GST_TYPE PGM_TYPE_PROT
222# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
223# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
224# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
[16428]225# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
[17215]226# include "PGMGstDefs.h"
[13188]227# include "PGMAllBth.h"
228# undef BTH_PGMPOOLKIND_PT_FOR_PT
[16317]229# undef BTH_PGMPOOLKIND_ROOT
[13188]230# undef PGM_BTH_NAME
231# undef PGM_GST_TYPE
232# undef PGM_GST_NAME
[9001]233
[13188]234# ifdef VBOX_WITH_64_BITS_GUESTS
[1]235/* Guest - AMD64 mode */
[13188]236# define PGM_GST_TYPE PGM_TYPE_AMD64
237# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
238# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
239# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
240# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
[16317]241# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
[17215]242# include "PGMGstDefs.h"
[13188]243# include "PGMAllGst.h"
244# include "PGMAllBth.h"
245# undef BTH_PGMPOOLKIND_PT_FOR_BIG
246# undef BTH_PGMPOOLKIND_PT_FOR_PT
[16317]247# undef BTH_PGMPOOLKIND_ROOT
[13188]248# undef PGM_BTH_NAME
249# undef PGM_GST_TYPE
250# undef PGM_GST_NAME
251# endif /* VBOX_WITH_64_BITS_GUESTS */
[1]252
[13188]253# undef PGM_SHW_TYPE
254# undef PGM_SHW_NAME
[9021]255
[13188]256
[9021]257/*
258 * Shadow - Nested paging mode
259 */
[13188]260# define PGM_SHW_TYPE PGM_TYPE_NESTED
261# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
262# include "PGMAllShw.h"
[9021]263
264/* Guest - real mode */
[13188]265# define PGM_GST_TYPE PGM_TYPE_REAL
266# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
267# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
[17215]268# include "PGMGstDefs.h"
[13188]269# include "PGMAllBth.h"
270# undef PGM_BTH_NAME
271# undef PGM_GST_TYPE
272# undef PGM_GST_NAME
[9021]273
274/* Guest - protected mode */
[13188]275# define PGM_GST_TYPE PGM_TYPE_PROT
276# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
277# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
[17215]278# include "PGMGstDefs.h"
[13188]279# include "PGMAllBth.h"
280# undef PGM_BTH_NAME
281# undef PGM_GST_TYPE
282# undef PGM_GST_NAME
[9021]283
284/* Guest - 32-bit mode */
[13188]285# define PGM_GST_TYPE PGM_TYPE_32BIT
286# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
287# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
[17215]288# include "PGMGstDefs.h"
[13188]289# include "PGMAllBth.h"
290# undef PGM_BTH_NAME
291# undef PGM_GST_TYPE
292# undef PGM_GST_NAME
[9021]293
294/* Guest - PAE mode */
[13188]295# define PGM_GST_TYPE PGM_TYPE_PAE
296# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
297# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
[17215]298# include "PGMGstDefs.h"
[13188]299# include "PGMAllBth.h"
300# undef PGM_BTH_NAME
301# undef PGM_GST_TYPE
302# undef PGM_GST_NAME
[9021]303
[13188]304# ifdef VBOX_WITH_64_BITS_GUESTS
[9021]305/* Guest - AMD64 mode */
[13188]306# define PGM_GST_TYPE PGM_TYPE_AMD64
307# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
308# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
[17215]309# include "PGMGstDefs.h"
[13188]310# include "PGMAllBth.h"
311# undef PGM_BTH_NAME
312# undef PGM_GST_TYPE
313# undef PGM_GST_NAME
314# endif /* VBOX_WITH_64_BITS_GUESTS */
[9021]315
[13188]316# undef PGM_SHW_TYPE
317# undef PGM_SHW_NAME
[10822]318
[13188]319
[10822]320/*
321 * Shadow - EPT
322 */
[13188]323# define PGM_SHW_TYPE PGM_TYPE_EPT
324# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
325# include "PGMAllShw.h"
[10822]326
327/* Guest - real mode */
[13188]328# define PGM_GST_TYPE PGM_TYPE_REAL
329# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
330# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
331# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
[17215]332# include "PGMGstDefs.h"
[13188]333# include "PGMAllBth.h"
334# undef BTH_PGMPOOLKIND_PT_FOR_PT
335# undef PGM_BTH_NAME
336# undef PGM_GST_TYPE
337# undef PGM_GST_NAME
[10822]338
339/* Guest - protected mode */
[13188]340# define PGM_GST_TYPE PGM_TYPE_PROT
341# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
342# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
343# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
[17215]344# include "PGMGstDefs.h"
[13188]345# include "PGMAllBth.h"
346# undef BTH_PGMPOOLKIND_PT_FOR_PT
347# undef PGM_BTH_NAME
348# undef PGM_GST_TYPE
349# undef PGM_GST_NAME
[10822]350
351/* Guest - 32-bit mode */
[13188]352# define PGM_GST_TYPE PGM_TYPE_32BIT
353# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
354# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
355# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
[17215]356# include "PGMGstDefs.h"
[13188]357# include "PGMAllBth.h"
358# undef BTH_PGMPOOLKIND_PT_FOR_PT
359# undef PGM_BTH_NAME
360# undef PGM_GST_TYPE
361# undef PGM_GST_NAME
[10822]362
363/* Guest - PAE mode */
[13188]364# define PGM_GST_TYPE PGM_TYPE_PAE
365# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
366# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
367# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
[17215]368# include "PGMGstDefs.h"
[13188]369# include "PGMAllBth.h"
370# undef BTH_PGMPOOLKIND_PT_FOR_PT
371# undef PGM_BTH_NAME
372# undef PGM_GST_TYPE
373# undef PGM_GST_NAME
[10822]374
[13188]375# ifdef VBOX_WITH_64_BITS_GUESTS
[10822]376/* Guest - AMD64 mode */
[13188]377# define PGM_GST_TYPE PGM_TYPE_AMD64
378# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
379# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
380# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
[17215]381# include "PGMGstDefs.h"
[13188]382# include "PGMAllBth.h"
383# undef BTH_PGMPOOLKIND_PT_FOR_PT
384# undef PGM_BTH_NAME
385# undef PGM_GST_TYPE
386# undef PGM_GST_NAME
387# endif /* VBOX_WITH_64_BITS_GUESTS */
[10822]388
[13188]389# undef PGM_SHW_TYPE
390# undef PGM_SHW_NAME
[10822]391
[13832]392#endif /* !IN_RC */
[1]393
[13067]394
395#ifndef IN_RING3
[1]396/**
397 * #PF Handler.
398 *
399 * @returns VBox status code (appropriate for trap handling and GC return).
[41802]400 * @param pVCpu Pointer to the VMCPU.
[1]401 * @param uErr The trap error code.
402 * @param pRegFrame Trap register frame.
403 * @param pvFault The fault address.
404 */
[20671]405VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
[1]406{
[20671]407 PVM pVM = pVCpu->CTX_SUFF(pVM);
408
[41906]409 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
[31123]410 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, a);
[18927]411 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
[1]412
413
414#ifdef VBOX_WITH_STATISTICS
415 /*
416 * Error code stats.
417 */
418 if (uErr & X86_TRAP_PF_US)
419 {
420 if (!(uErr & X86_TRAP_PF_P))
421 {
422 if (uErr & X86_TRAP_PF_RW)
[31123]423 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
[1]424 else
[31123]425 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
[1]426 }
427 else if (uErr & X86_TRAP_PF_RW)
[31123]428 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
[1]429 else if (uErr & X86_TRAP_PF_RSVD)
[31123]430 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
[7629]431 else if (uErr & X86_TRAP_PF_ID)
[31123]432 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
[1]433 else
[31123]434 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
[1]435 }
436 else
[7629]437 { /* Supervisor */
[1]438 if (!(uErr & X86_TRAP_PF_P))
439 {
440 if (uErr & X86_TRAP_PF_RW)
[31123]441 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
[1]442 else
[31123]443 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
[1]444 }
445 else if (uErr & X86_TRAP_PF_RW)
[31123]446 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
[7629]447 else if (uErr & X86_TRAP_PF_ID)
[31123]448 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
[1]449 else if (uErr & X86_TRAP_PF_RSVD)
[31123]450 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
[1]451 }
[13232]452#endif /* VBOX_WITH_STATISTICS */
[1]453
454 /*
455 * Call the worker.
456 */
[26202]457 bool fLockTaken = false;
458 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
459 if (fLockTaken)
460 {
[37354]461 PGM_LOCK_ASSERT_OWNER(pVM);
[26202]462 pgmUnlock(pVM);
463 }
[31780]464 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
[21056]465
[31780]466 /*
467 * Return code tweaks.
468 */
469 if (rc != VINF_SUCCESS)
470 {
471 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
472 rc = VINF_SUCCESS;
473
[21056]474# ifdef IN_RING0
[31780]475 /* Note: hack alert for difficult to reproduce problem. */
476 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
477 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
478 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
479 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
480 {
481 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
482 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
483 rc = VINF_SUCCESS;
484 }
485# endif
[21056]486 }
487
[31123]488 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eGuestPF); });
[18927]489 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
[31123]490 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
491 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
[1]492 return rc;
493}
[13067]494#endif /* !IN_RING3 */
[1]495
[13067]496
[1]497/**
498 * Prefetch a page
499 *
500 * Typically used to sync commonly used pages before entering raw mode
501 * after a CR3 reload.
502 *
503 * @returns VBox status code suitable for scheduling.
504 * @retval VINF_SUCCESS on success.
505 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
[41802]506 * @param pVCpu Pointer to the VMCPU.
[1]507 * @param GCPtrPage Page to invalidate.
508 */
[18992]509VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
[1]510{
[31123]511 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
[18992]512 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
[31123]513 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
[13818]514 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
[1]515 return rc;
516}
517
518
519/**
520 * Gets the mapping corresponding to the specified address (if any).
521 *
522 * @returns Pointer to the mapping.
523 * @returns NULL if not
524 *
[41783]525 * @param pVM Pointer to the VM.
[1]526 * @param GCPtr The guest context pointer.
527 */
528PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
529{
[13019]530 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
[1]531 while (pMapping)
532 {
533 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
534 break;
535 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
536 return pMapping;
[13019]537 pMapping = pMapping->CTX_SUFF(pNext);
[1]538 }
539 return NULL;
540}
541
542
543/**
544 * Verifies a range of pages for read or write access
545 *
546 * Only checks the guest's page tables
547 *
548 * @returns VBox status code.
[41802]549 * @param pVCpu Pointer to the VMCPU.
[1]550 * @param Addr Guest virtual address to check
551 * @param cbSize Access size
552 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
[13232]553 * @remarks Current not in use.
[1]554 */
[18992]555VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
[1]556{
557 /*
558 * Validate input.
559 */
560 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
561 {
562 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
563 return VERR_INVALID_PARAMETER;
564 }
565
566 uint64_t fPage;
[18988]567 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
[13816]568 if (RT_FAILURE(rc))
[1]569 {
[13823]570 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
[1]571 return VINF_EM_RAW_GUEST_TRAP;
572 }
573
574 /*
575 * Check if the access would cause a page fault
576 *
577 * Note that hypervisor page directories are not present in the guest's tables, so this check
578 * is sufficient.
579 */
580 bool fWrite = !!(fAccess & X86_PTE_RW);
581 bool fUser = !!(fAccess & X86_PTE_US);
582 if ( !(fPage & X86_PTE_P)
583 || (fWrite && !(fPage & X86_PTE_RW))
584 || (fUser && !(fPage & X86_PTE_US)) )
585 {
[13823]586 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
[1]587 return VINF_EM_RAW_GUEST_TRAP;
588 }
[13816]589 if ( RT_SUCCESS(rc)
[1]590 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
[18992]591 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
[1]592 return rc;
593}
594
595
596/**
597 * Verifies a range of pages for read or write access
598 *
599 * Supports handling of pages marked for dirty bit tracking and CSAM
600 *
601 * @returns VBox status code.
[41802]602 * @param pVCpu Pointer to the VMCPU.
[1]603 * @param Addr Guest virtual address to check
604 * @param cbSize Access size
605 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
606 */
[18992]607VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
[1]608{
[18992]609 PVM pVM = pVCpu->CTX_SUFF(pVM);
610
[13232]611 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
612
[1]613 /*
[13232]614 * Get going.
[1]615 */
616 uint64_t fPageGst;
[18988]617 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
[13816]618 if (RT_FAILURE(rc))
[1]619 {
[13823]620 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
[1]621 return VINF_EM_RAW_GUEST_TRAP;
622 }
623
624 /*
625 * Check if the access would cause a page fault
626 *
627 * Note that hypervisor page directories are not present in the guest's tables, so this check
628 * is sufficient.
629 */
630 const bool fWrite = !!(fAccess & X86_PTE_RW);
631 const bool fUser = !!(fAccess & X86_PTE_US);
[30889]632 if ( !(fPageGst & X86_PTE_P)
[1]633 || (fWrite && !(fPageGst & X86_PTE_RW))
634 || (fUser && !(fPageGst & X86_PTE_US)) )
635 {
[13823]636 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
[1]637 return VINF_EM_RAW_GUEST_TRAP;
638 }
639
[31066]640 if (!pVM->pgm.s.fNestedPaging)
[1]641 {
642 /*
[13232]643 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
644 */
[18988]645 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
[9021]646 if ( rc == VERR_PAGE_NOT_PRESENT
647 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
648 {
649 /*
[30889]650 * Page is not present in our page tables.
651 * Try to sync it!
652 */
[9021]653 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
654 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
[18992]655 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
[9021]656 if (rc != VINF_SUCCESS)
657 return rc;
658 }
659 else
[13823]660 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
[1]661 }
662
663#if 0 /* def VBOX_STRICT; triggers too often now */
664 /*
665 * This check is a bit paranoid, but useful.
666 */
[30889]667 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
[1]668 uint64_t fPageShw;
[18988]669 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
[13816]670 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
[1]671 || (fWrite && !(fPageShw & X86_PTE_RW))
672 || (fUser && !(fPageShw & X86_PTE_US)) )
673 {
[13823]674 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
[1]675 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
676 return VINF_EM_RAW_GUEST_TRAP;
677 }
678#endif
679
[13816]680 if ( RT_SUCCESS(rc)
[1]681 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
682 || Addr + cbSize < Addr))
[2279]683 {
684 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
[2298]685 for (;;)
[2279]686 {
[2324]687 Addr += PAGE_SIZE;
688 if (cbSize > PAGE_SIZE)
689 cbSize -= PAGE_SIZE;
690 else
691 cbSize = 1;
[18992]692 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
[2279]693 if (rc != VINF_SUCCESS)
694 break;
[2324]695 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
[2298]696 break;
[2279]697 }
698 }
[1]699 return rc;
700}
701
[2298]702
[1]703/**
704 * Emulation of the invlpg instruction (HC only actually).
705 *
[31636]706 * @returns Strict VBox status code, special care required.
[13235]707 * @retval VINF_PGM_SYNC_CR3 - handled.
708 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
709 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
710 *
[41802]711 * @param pVCpu Pointer to the VMCPU.
[1]712 * @param GCPtrPage Page to invalidate.
[13235]713 *
714 * @remark ASSUMES the page table entry or page directory is valid. Fairly
715 * safe, but there could be edge cases!
716 *
[1]717 * @todo Flush page or page directory only if necessary!
[31636]718 * @todo VBOXSTRICTRC
[1]719 */
[18992]720VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
[1]721{
[18992]722 PVM pVM = pVCpu->CTX_SUFF(pVM);
[4268]723 int rc;
[13823]724 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
[1]725
[40274]726#if !defined(IN_RING3) && defined(VBOX_WITH_REM)
[4268]727 /*
728 * Notify the recompiler so it can record this instruction.
729 */
[25958]730 REMNotifyInvalidatePage(pVM, GCPtrPage);
[13235]731#endif /* !IN_RING3 */
[4268]732
[13235]733
[13832]734#ifdef IN_RC
[13235]735 /*
736 * Check for conflicts and pending CR3 monitoring updates.
737 */
[36891]738 if (pgmMapAreMappingsFloating(pVM))
[13235]739 {
740 if ( pgmGetMapping(pVM, GCPtrPage)
[18988]741 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
[13235]742 {
743 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
[19141]744 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
[31123]745 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgConflict);
[13235]746 return VINF_PGM_SYNC_CR3;
747 }
748
[18927]749 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
[13235]750 {
751 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
[31123]752 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgSyncMonCR3);
[13235]753 return VINF_EM_RAW_EMULATE_INSTR;
754 }
755 }
[13832]756#endif /* IN_RC */
[13235]757
758 /*
759 * Call paging mode specific worker.
760 */
[31123]761 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
[19790]762 pgmLock(pVM);
[18992]763 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
[19790]764 pgmUnlock(pVM);
[31123]765 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
[1]766
[13235]767#ifdef IN_RING3
[1]768 /*
769 * Check if we have a pending update of the CR3 monitoring.
770 */
[13816]771 if ( RT_SUCCESS(rc)
[18927]772 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
[1]773 {
[18927]774 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
[25935]775 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
[1]776 }
777
778 /*
779 * Inform CSAM about the flush
[13235]780 *
781 * Note: This is to check if monitored pages have been changed; when we implement
782 * callbacks for virtual handlers, this is no longer required.
[1]783 */
784 CSAMR3FlushPage(pVM, GCPtrPage);
[13235]785#endif /* IN_RING3 */
[25912]786
787 /* Ignore all irrelevant error codes. */
[26150]788 if ( rc == VERR_PAGE_NOT_PRESENT
789 || rc == VERR_PAGE_TABLE_NOT_PRESENT
790 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
791 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
[25912]792 rc = VINF_SUCCESS;
793
[1]794 return rc;
795}
796
797
798/**
799 * Executes an instruction using the interpreter.
800 *
801 * @returns VBox status code (appropriate for trap handling and GC return).
[41783]802 * @param pVM Pointer to the VM.
[41802]803 * @param pVCpu Pointer to the VMCPU.
[1]804 * @param pRegFrame Register frame.
805 * @param pvFault Fault address.
806 */
[31636]807VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
[1]808{
[40450]809 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
[1]810 if (rc == VERR_EM_INTERPRETER)
811 rc = VINF_EM_RAW_EMULATE_INSTR;
812 if (rc != VINF_SUCCESS)
[31636]813 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
[1]814 return rc;
815}
816
817
818/**
819 * Gets effective page information (from the VMM page directory).
820 *
821 * @returns VBox status.
[41802]822 * @param pVCpu Pointer to the VMCPU.
[1]823 * @param GCPtr Guest Context virtual address of the page.
824 * @param pfFlags Where to store the flags. These are X86_PTE_*.
825 * @param pHCPhys Where to store the HC physical address of the page.
826 * This is page aligned.
827 * @remark You should use PGMMapGetPage() for pages in a mapping.
828 */
[18988]829VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
[1]830{
[20068]831 pgmLock(pVCpu->CTX_SUFF(pVM));
832 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
833 pgmUnlock(pVCpu->CTX_SUFF(pVM));
834 return rc;
[1]835}
836
837
838/**
839 * Modify page flags for a range of pages in the shadow context.
840 *
841 * The existing flags are ANDed with the fMask and ORed with the fFlags.
842 *
843 * @returns VBox status code.
[41802]844 * @param pVCpu Pointer to the VMCPU.
[1]845 * @param GCPtr Virtual address of the first page in the range.
846 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
847 * @param fMask The AND mask - page flags X86_PTE_*.
848 * Be very CAREFUL when ~'ing constants which could be 32-bit!
[30326]849 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
[1]850 * @remark You must use PGMMapModifyPage() for pages in a mapping.
851 */
[30326]852DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
[1]853{
[32036]854 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
[30326]855 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
[1]856
[30326]857 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
[1]858
[19874]859 PVM pVM = pVCpu->CTX_SUFF(pVM);
860 pgmLock(pVM);
[30326]861 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
[19874]862 pgmUnlock(pVM);
863 return rc;
[1]864}
865
[30326]866
[9824]867/**
[30326]868 * Changing the page flags for a single page in the shadow page tables so as to
869 * make it read-only.
870 *
871 * @returns VBox status code.
[41802]872 * @param pVCpu Pointer to the VMCPU.
[30326]873 * @param GCPtr Virtual address of the first page in the range.
874 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
875 */
876VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
877{
878 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
879}
880
881
882/**
883 * Changing the page flags for a single page in the shadow page tables so as to
884 * make it writable.
885 *
886 * The call must know with 101% certainty that the guest page tables maps this
887 * as writable too. This function will deal shared, zero and write monitored
888 * pages.
889 *
890 * @returns VBox status code.
[41802]891 * @param pVCpu Pointer to the VMCPU.
[30326]892 * @param GCPtr Virtual address of the first page in the range.
893 * @param fMmio2 Set if it is an MMIO2 page.
894 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
895 */
896VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
897{
898 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
899}
900
901
902/**
903 * Changing the page flags for a single page in the shadow page tables so as to
904 * make it not present.
905 *
906 * @returns VBox status code.
[41802]907 * @param pVCpu Pointer to the VMCPU.
[30326]908 * @param GCPtr Virtual address of the first page in the range.
909 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
910 */
911VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
912{
913 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
914}
915
916
917/**
[16232]918 * Gets the shadow page directory for the specified address, PAE.
919 *
920 * @returns Pointer to the shadow PD.
[41802]921 * @param pVCpu Pointer to the VMCPU.
[16232]922 * @param GCPtr The address.
[32004]923 * @param uGstPdpe Guest PDPT entry. Valid.
[16232]924 * @param ppPD Receives address of page directory
925 */
[31081]926int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
[16232]927{
[16579]928 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
[31167]929 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
[16579]930 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
[18992]931 PVM pVM = pVCpu->CTX_SUFF(pVM);
[16579]932 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
[16232]933 PPGMPOOLPAGE pShwPage;
934 int rc;
935
[37354]936 PGM_LOCK_ASSERT_OWNER(pVM);
[20762]937
[9824]938 /* Allocate page directory if not present. */
939 if ( !pPdpe->n.u1Present
[32009]940 && !(pPdpe->u & X86_PDPE_PG_MASK))
[9824]941 {
[16579]942 RTGCPTR64 GCPdPt;
943 PGMPOOLKIND enmKind;
944
[31066]945 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
[16232]946 {
[30861]947 /* AMD-V nested paging or real/protected mode without paging. */
[16579]948 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
949 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
[16232]950 }
951 else
952 {
[18927]953 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
[16624]954 {
[31081]955 if (!(uGstPdpe & X86_PDPE_P))
[17593]956 {
957 /* PD not present; guest must reload CR3 to change it.
958 * No need to monitor anything in this case.
959 */
[43387]960 Assert(!HMIsEnabled(pVM));
[17593]961
[32009]962 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
[17593]963 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
[31081]964 uGstPdpe |= X86_PDPE_P;
[17593]965 }
966 else
967 {
[32009]968 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
[17593]969 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
970 }
[16624]971 }
972 else
973 {
[18927]974 GCPdPt = CPUMGetGuestCR3(pVCpu);
[16626]975 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
[16624]976 }
[16232]977 }
978
[16579]979 /* Create a reference back to the PDPT by using the index in its shadow page. */
[41458]980 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
981 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
982 &pShwPage);
[9824]983 AssertRCReturn(rc, rc);
[17178]984
985 /* The PD was cached or created; hook it up now. */
[32004]986 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A));
[17178]987
[17586]988# if defined(IN_RC)
[30861]989 /*
990 * In 32 bits PAE mode we *must* invalidate the TLB when changing a
991 * PDPT entry; the CPU fetches them only during cr3 load, so any
[17178]992 * non-present PDPT will continue to cause page faults.
993 */
994 ASMReloadCR3();
995# endif
[31402]996 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
[9824]997 }
998 else
999 {
[32009]1000 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
[39402]1001 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
[32009]1002 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
[17178]1003
[18731]1004 pgmPoolCacheUsed(pPool, pShwPage);
[9824]1005 }
[31170]1006 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
[9824]1007 return VINF_SUCCESS;
1008}
1009
[17305]1010
[9824]1011/**
[16203]1012 * Gets the pointer to the shadow page directory entry for an address, PAE.
1013 *
1014 * @returns Pointer to the PDE.
[31167]1015 * @param pVCpu The current CPU.
[16203]1016 * @param GCPtr The address.
1017 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1018 */
[31167]1019DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
[16203]1020{
1021 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
[31167]1022 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1023 PVM pVM = pVCpu->CTX_SUFF(pVM);
[20762]1024
[37354]1025 PGM_LOCK_ASSERT_OWNER(pVM);
[20762]1026
[16203]1027 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1028 if (!pPdpt->a[iPdPt].n.u1Present)
[17158]1029 {
1030 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
[16203]1031 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
[17158]1032 }
[32009]1033 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
[16203]1034
1035 /* Fetch the pgm pool shadow descriptor. */
[32009]1036 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
[39402]1037 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
[16203]1038
1039 *ppShwPde = pShwPde;
1040 return VINF_SUCCESS;
1041}
1042
[13832]1043#ifndef IN_RC
[13188]1044
[8533]1045/**
[13935]1046 * Syncs the SHADOW page directory pointer for the specified address.
[8533]1047 *
[13935]1048 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1049 *
1050 * The caller is responsible for making sure the guest has a valid PD before
1051 * calling this function.
1052 *
[8533]1053 * @returns VBox status.
[41802]1054 * @param pVCpu Pointer to the VMCPU.
[8533]1055 * @param GCPtr The address.
[31997]1056 * @param uGstPml4e Guest PML4 entry (valid).
1057 * @param uGstPdpe Guest PDPT entry (valid).
[8533]1058 * @param ppPD Receives address of page directory
1059 */
[31081]1060static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
[8533]1061{
[18992]1062 PVM pVM = pVCpu->CTX_SUFF(pVM);
[18927]1063 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
[13991]1064 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
[31167]1065 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
[31066]1066 bool fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
[8533]1067 PPGMPOOLPAGE pShwPage;
1068 int rc;
[1]1069
[37354]1070 PGM_LOCK_ASSERT_OWNER(pVM);
[20762]1071
[9755]1072 /* Allocate page directory pointer table if not present. */
[8533]1073 if ( !pPml4e->n.u1Present
[32000]1074 && !(pPml4e->u & X86_PML4E_PG_MASK))
[8533]1075 {
[16579]1076 RTGCPTR64 GCPml4;
1077 PGMPOOLKIND enmKind;
1078
[18927]1079 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
[16579]1080
[25866]1081 if (fNestedPagingOrNoGstPaging)
[16579]1082 {
1083 /* AMD-V nested paging or real/protected mode without paging */
1084 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1085 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1086 }
1087 else
1088 {
[32000]1089 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
[16579]1090 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1091 }
1092
1093 /* Create a reference back to the PDPT by using the index in its shadow page. */
[41458]1094 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1095 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1096 &pShwPage);
[8533]1097 AssertRCReturn(rc, rc);
1098 }
1099 else
1100 {
[32000]1101 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
[39402]1102 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
[18731]1103
1104 pgmPoolCacheUsed(pPool, pShwPage);
[8533]1105 }
[9685]1106 /* The PDPT was cached or created; hook it up now. */
[31997]1107 pPml4e->u |= pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask);
[8533]1108
1109 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
[31170]1110 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
[8533]1111 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1112
[9755]1113 /* Allocate page directory if not present. */
[8533]1114 if ( !pPdpe->n.u1Present
[32009]1115 && !(pPdpe->u & X86_PDPE_PG_MASK))
[8533]1116 {
[16579]1117 RTGCPTR64 GCPdPt;
1118 PGMPOOLKIND enmKind;
1119
[25866]1120 if (fNestedPagingOrNoGstPaging)
[16579]1121 {
1122 /* AMD-V nested paging or real/protected mode without paging */
1123 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1124 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1125 }
1126 else
1127 {
[32009]1128 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
[16579]1129 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1130 }
1131
1132 /* Create a reference back to the PDPT by using the index in its shadow page. */
[41458]1133 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1134 pShwPage->idx, iPdPt, false /*fLockPage*/,
1135 &pShwPage);
[8533]1136 AssertRCReturn(rc, rc);
1137 }
1138 else
1139 {
[32009]1140 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
[39402]1141 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
[18731]1142
1143 pgmPoolCacheUsed(pPool, pShwPage);
[8533]1144 }
[9685]1145 /* The PD was cached or created; hook it up now. */
[31997]1146 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask);
[8533]1147
[31170]1148 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
[8533]1149 return VINF_SUCCESS;
1150}
[9570]1151
[13232]1152
[9570]1153/**
[13991]1154 * Gets the SHADOW page directory pointer for the specified address (long mode).
[9570]1155 *
1156 * @returns VBox status.
[41802]1157 * @param pVCpu Pointer to the VMCPU.
[9570]1158 * @param GCPtr The address.
1159 * @param ppPdpt Receives address of pdpt
1160 * @param ppPD Receives address of page directory
1161 */
[18992]1162DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
[9570]1163{
[13991]1164 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
[31167]1165 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
[20762]1166
[39034]1167 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
[20762]1168
[39402]1169 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
[13991]1170 if (ppPml4e)
1171 *ppPml4e = (PX86PML4E)pPml4e;
[18729]1172
[26180]1173 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
[18729]1174
[9570]1175 if (!pPml4e->n.u1Present)
[9589]1176 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
[9570]1177
[18992]1178 PVM pVM = pVCpu->CTX_SUFF(pVM);
1179 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
[32000]1180 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
[39402]1181 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
[9570]1182
[13991]1183 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
[31170]1184 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
[13991]1185 if (!pPdpt->a[iPdPt].n.u1Present)
[9589]1186 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
[9570]1187
[32009]1188 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
[39402]1189 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
[9570]1190
[31170]1191 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
[31997]1192 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
[9570]1193 return VINF_SUCCESS;
1194}
[12936]1195
[13232]1196
[12936]1197/**
1198 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
[12989]1199 * backing pages in case the PDPT or PML4 entry is missing.
[12936]1200 *
1201 * @returns VBox status.
[41802]1202 * @param pVCpu Pointer to the VMCPU.
[12936]1203 * @param GCPtr The address.
1204 * @param ppPdpt Receives address of pdpt
1205 * @param ppPD Receives address of page directory
1206 */
[31066]1207static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
[12936]1208{
[18992]1209 PVM pVM = pVCpu->CTX_SUFF(pVM);
[13991]1210 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
[18927]1211 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
[13991]1212 PEPTPML4 pPml4;
[12936]1213 PEPTPML4E pPml4e;
1214 PPGMPOOLPAGE pShwPage;
1215 int rc;
1216
[31066]1217 Assert(pVM->pgm.s.fNestedPaging);
[37354]1218 PGM_LOCK_ASSERT_OWNER(pVM);
[13991]1219
[31170]1220 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
[12936]1221 Assert(pPml4);
1222
1223 /* Allocate page directory pointer table if not present. */
[13991]1224 pPml4e = &pPml4->a[iPml4];
[12936]1225 if ( !pPml4e->n.u1Present
1226 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1227 {
1228 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
[15990]1229 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
[12936]1230
[41458]1231 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1232 PGMPOOL_IDX_NESTED_ROOT, iPml4, false /*fLockPage*/,
1233 &pShwPage);
[12936]1234 AssertRCReturn(rc, rc);
1235 }
1236 else
1237 {
1238 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
[39402]1239 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
[18731]1240
1241 pgmPoolCacheUsed(pPool, pShwPage);
[12936]1242 }
1243 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1244 pPml4e->u = pShwPage->Core.Key;
1245 pPml4e->n.u1Present = 1;
1246 pPml4e->n.u1Write = 1;
1247 pPml4e->n.u1Execute = 1;
1248
1249 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
[31170]1250 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
[12936]1251 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1252
1253 if (ppPdpt)
1254 *ppPdpt = pPdpt;
1255
1256 /* Allocate page directory if not present. */
1257 if ( !pPdpe->n.u1Present
1258 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1259 {
[15990]1260 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
[41458]1261 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1262 pShwPage->idx, iPdPt, false /*fLockPage*/,
1263 &pShwPage);
[12936]1264 AssertRCReturn(rc, rc);
1265 }
1266 else
1267 {
[13122]1268 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
[39402]1269 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
[18731]1270
1271 pgmPoolCacheUsed(pPool, pShwPage);
[12936]1272 }
1273 /* The PD was cached or created; hook it up now and fill with the default value. */
1274 pPdpe->u = pShwPage->Core.Key;
1275 pPdpe->n.u1Present = 1;
1276 pPdpe->n.u1Write = 1;
1277 pPdpe->n.u1Execute = 1;
1278
[31170]1279 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
[12936]1280 return VINF_SUCCESS;
1281}
1282
[13832]1283#endif /* IN_RC */
[8533]1284
[31565]1285#ifdef IN_RING0
[1]1286/**
[31565]1287 * Synchronizes a range of nested page table entries.
1288 *
1289 * The caller must own the PGM lock.
1290 *
1291 * @param pVCpu The current CPU.
1292 * @param GCPhys Where to start.
1293 * @param cPages How many pages which entries should be synced.
1294 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1295 * host paging mode for AMD-V).
1296 */
1297int pgmShwSyncNestedPageLocked(PVMCPU pVCpu, RTGCPHYS GCPhysFault, uint32_t cPages, PGMMODE enmShwPagingMode)
1298{
[37354]1299 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
[31565]1300
1301 int rc;
1302 switch (enmShwPagingMode)
1303 {
1304 case PGMMODE_32_BIT:
1305 {
1306 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1307 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1308 break;
1309 }
1310
1311 case PGMMODE_PAE:
1312 case PGMMODE_PAE_NX:
1313 {
1314 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1315 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1316 break;
1317 }
1318
1319 case PGMMODE_AMD64:
1320 case PGMMODE_AMD64_NX:
1321 {
1322 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1323 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1324 break;
1325 }
1326
1327 case PGMMODE_EPT:
1328 {
1329 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1330 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1331 break;
1332 }
1333
1334 default:
[39402]1335 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
[31565]1336 }
1337 return rc;
1338}
1339#endif /* IN_RING0 */
1340
1341
1342/**
[1]1343 * Gets effective Guest OS page information.
1344 *
1345 * When GCPtr is in a big page, the function will return as if it was a normal
1346 * 4KB page. If the need for distinguishing between big and normal page becomes
1347 * necessary at a later point, a PGMGstGetPage() will be created for that
1348 * purpose.
1349 *
1350 * @returns VBox status.
[37354]1351 * @param pVCpu The current CPU.
[1]1352 * @param GCPtr Guest Context virtual address of the page.
1353 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1354 * @param pGCPhys Where to store the GC physical address of the page.
1355 * This is page aligned. The fact that the
1356 */
[18988]1357VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
[1]1358{
[37354]1359 VMCPU_ASSERT_EMT(pVCpu);
[18988]1360 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
[1]1361}
1362
1363
1364/**
1365 * Checks if the page is present.
1366 *
1367 * @returns true if the page is present.
1368 * @returns false if the page is not present.
[41802]1369 * @param pVCpu Pointer to the VMCPU.
[1]1370 * @param GCPtr Address within the page.
1371 */
[18988]1372VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
[1]1373{
[37354]1374 VMCPU_ASSERT_EMT(pVCpu);
[18988]1375 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
[13816]1376 return RT_SUCCESS(rc);
[1]1377}
1378
1379
1380/**
1381 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1382 *
1383 * @returns VBox status.
[41802]1384 * @param pVCpu Pointer to the VMCPU.
[1]1385 * @param GCPtr The address of the first page.
1386 * @param cb The size of the range in bytes.
1387 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1388 */
[18988]1389VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
[1]1390{
[37354]1391 VMCPU_ASSERT_EMT(pVCpu);
[18988]1392 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
[1]1393}
1394
1395
1396/**
1397 * Modify page flags for a range of pages in the guest's tables
1398 *
1399 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1400 *
1401 * @returns VBox status code.
[41802]1402 * @param pVCpu Pointer to the VMCPU.
[1]1403 * @param GCPtr Virtual address of the first page in the range.
1404 * @param cb Size (in bytes) of the range to apply the modification to.
1405 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1406 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1407 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1408 */
[18988]1409VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
[1]1410{
[31123]1411 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
[37354]1412 VMCPU_ASSERT_EMT(pVCpu);
[1]1413
1414 /*
1415 * Validate input.
1416 */
[32036]1417 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
[13232]1418 Assert(cb);
[1]1419
[13823]1420 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
[1]1421
1422 /*
1423 * Adjust input.
1424 */
[13936]1425 cb += GCPtr & PAGE_OFFSET_MASK;
[1]1426 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
[13936]1427 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
[1]1428
1429 /*
1430 * Call worker.
1431 */
[18988]1432 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
[1]1433
[31123]1434 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
[1]1435 return rc;
1436}
1437
[13232]1438
[30830]1439#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
[30889]1440
[13198]1441/**
[18131]1442 * Performs the lazy mapping of the 32-bit guest PD.
1443 *
[30889]1444 * @returns VBox status code.
1445 * @param pVCpu The current CPU.
1446 * @param ppPd Where to return the pointer to the mapping. This is
1447 * always set.
[18125]1448 */
[30889]1449int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd)
[18125]1450{
[30889]1451 PVM pVM = pVCpu->CTX_SUFF(pVM);
[18125]1452 pgmLock(pVM);
1453
[30889]1454 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
[18125]1455
[30889]1456 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
1457 PPGMPAGE pPage;
[36891]1458 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
[30889]1459 if (RT_SUCCESS(rc))
1460 {
1461 RTHCPTR HCPtrGuestCR3;
[38953]1462 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
[30889]1463 if (RT_SUCCESS(rc))
1464 {
1465 pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
[18125]1466# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
[30889]1467 pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
[18125]1468# endif
[30889]1469 *ppPd = (PX86PD)HCPtrGuestCR3;
[18125]1470
[30889]1471 pgmUnlock(pVM);
1472 return VINF_SUCCESS;
1473 }
1474
1475 AssertRC(rc);
1476 }
[18125]1477 pgmUnlock(pVM);
[30889]1478
1479 *ppPd = NULL;
1480 return rc;
[18125]1481}
1482
1483
1484/**
1485 * Performs the lazy mapping of the PAE guest PDPT.
[18131]1486 *
[30889]1487 * @returns VBox status code.
1488 * @param pVCpu The current CPU.
1489 * @param ppPdpt Where to return the pointer to the mapping. This is
1490 * always set.
[18125]1491 */
[30889]1492int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt)
[18125]1493{
[30889]1494 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
1495 PVM pVM = pVCpu->CTX_SUFF(pVM);
[18125]1496 pgmLock(pVM);
1497
[30889]1498 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
1499 PPGMPAGE pPage;
[36891]1500 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
[30889]1501 if (RT_SUCCESS(rc))
1502 {
1503 RTHCPTR HCPtrGuestCR3;
[38953]1504 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
[30889]1505 if (RT_SUCCESS(rc))
1506 {
1507 pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
[18125]1508# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
[30889]1509 pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
[18125]1510# endif
[30889]1511 *ppPdpt = (PX86PDPT)HCPtrGuestCR3;
[18125]1512
[30889]1513 pgmUnlock(pVM);
1514 return VINF_SUCCESS;
1515 }
1516
1517 AssertRC(rc);
1518 }
1519
[18125]1520 pgmUnlock(pVM);
[30889]1521 *ppPdpt = NULL;
1522 return rc;
[18125]1523}
1524
1525
1526/**
1527 * Performs the lazy mapping / updating of a PAE guest PD.
[18131]1528 *
[18125]1529 * @returns Pointer to the mapping.
[30889]1530 * @returns VBox status code.
1531 * @param pVCpu The current CPU.
[18131]1532 * @param iPdpt Which PD entry to map (0..3).
[30889]1533 * @param ppPd Where to return the pointer to the mapping. This is
1534 * always set.
[18125]1535 */
[30889]1536int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
[18125]1537{
[30889]1538 PVM pVM = pVCpu->CTX_SUFF(pVM);
[18125]1539 pgmLock(pVM);
1540
[30889]1541 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
[18125]1542 Assert(pGuestPDPT);
1543 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
[32009]1544 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
[30889]1545 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
[18125]1546
[30889]1547 PPGMPAGE pPage;
[36891]1548 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
[30889]1549 if (RT_SUCCESS(rc))
[18125]1550 {
1551 RTRCPTR RCPtr = NIL_RTRCPTR;
1552 RTHCPTR HCPtr = NIL_RTHCPTR;
1553#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
[38953]1554 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, &HCPtr);
[18125]1555 AssertRC(rc);
1556#endif
1557 if (RT_SUCCESS(rc) && fChanged)
1558 {
[18927]1559 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
[18125]1560 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1561 }
1562 if (RT_SUCCESS(rc))
1563 {
[30889]1564 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
[18125]1565# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
[30889]1566 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
[18131]1567# endif
[18125]1568 if (fChanged)
[18131]1569 {
[30889]1570 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
1571 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
[18125]1572 }
1573
[30889]1574 *ppPd = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
[18125]1575 pgmUnlock(pVM);
[30889]1576 return VINF_SUCCESS;
[18125]1577 }
1578 }
1579
1580 /* Invalid page or some failure, invalidate the entry. */
[30889]1581 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1582 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = 0;
[18125]1583# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
[30889]1584 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = 0;
[18131]1585# endif
[30889]1586 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = 0;
[18125]1587
1588 pgmUnlock(pVM);
[30889]1589 return rc;
[18125]1590}
[30889]1591
[18131]1592#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
[30889]1593#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
[18125]1594/**
[18131]1595 * Performs the lazy mapping of the 32-bit guest PD.
1596 *
[30889]1597 * @returns VBox status code.
1598 * @param pVCpu The current CPU.
1599 * @param ppPml4 Where to return the pointer to the mapping. This will
1600 * always be set.
[18125]1601 */
[30889]1602int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4)
[18125]1603{
[30889]1604 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
1605 PVM pVM = pVCpu->CTX_SUFF(pVM);
[18125]1606 pgmLock(pVM);
1607
[30889]1608 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
1609 PPGMPAGE pPage;
[36891]1610 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
[30889]1611 if (RT_SUCCESS(rc))
1612 {
1613 RTHCPTR HCPtrGuestCR3;
[38953]1614 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
[30889]1615 if (RT_SUCCESS(rc))
1616 {
1617 pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1618# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1619 pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1620# endif
1621 *ppPml4 = (PX86PML4)HCPtrGuestCR3;
[18125]1622
[30889]1623 pgmUnlock(pVM);
1624 return VINF_SUCCESS;
1625 }
1626 }
[18125]1627
1628 pgmUnlock(pVM);
[30889]1629 *ppPml4 = NULL;
1630 return rc;
[18125]1631}
[30824]1632#endif
[18125]1633
[38707]1634
[18125]1635/**
[38707]1636 * Gets the PAE PDPEs values cached by the CPU.
[13198]1637 *
[38707]1638 * @returns VBox status code.
1639 * @param pVCpu The virtual CPU.
1640 * @param paPdpes Where to return the four PDPEs. The array
1641 * pointed to must have 4 entries.
[13198]1642 */
[38707]1643VMM_INT_DECL(int) PGMGstGetPaePdpes(PVMCPU pVCpu, PX86PDPE paPdpes)
[13198]1644{
[38707]1645 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1646
1647 paPdpes[0] = pVCpu->pgm.s.aGstPaePdpeRegs[0];
1648 paPdpes[1] = pVCpu->pgm.s.aGstPaePdpeRegs[1];
1649 paPdpes[2] = pVCpu->pgm.s.aGstPaePdpeRegs[2];
1650 paPdpes[3] = pVCpu->pgm.s.aGstPaePdpeRegs[3];
1651 return VINF_SUCCESS;
[13198]1652}
[1]1653
[13198]1654
[1]1655/**
[38707]1656 * Sets the PAE PDPEs values cached by the CPU.
1657 *
1658 * @remarks This must be called *AFTER* PGMUpdateCR3.
1659 *
1660 * @returns VBox status code.
1661 * @param pVCpu The virtual CPU.
1662 * @param paPdpes The four PDPE values. The array pointed to
1663 * must have exactly 4 entries.
1664 */
1665VMM_INT_DECL(int) PGMGstUpdatePaePdpes(PVMCPU pVCpu, PCX86PDPE paPdpes)
1666{
1667 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1668
1669 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGstPaePdpeRegs); i++)
1670 {
1671 if (pVCpu->pgm.s.aGstPaePdpeRegs[i].u != paPdpes[i].u)
1672 {
1673 pVCpu->pgm.s.aGstPaePdpeRegs[i] = paPdpes[i];
1674
1675 /* Force lazy remapping if it changed in any way. */
1676 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
1677# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1678 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
1679# endif
1680 pVCpu->pgm.s.apGstPaePDsRC[i] = 0;
1681 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
1682 }
1683 }
1684 return VINF_SUCCESS;
1685}
1686
1687
1688/**
[1]1689 * Gets the current CR3 register value for the shadow memory context.
1690 * @returns CR3 value.
[41802]1691 * @param pVCpu Pointer to the VMCPU.
[1]1692 */
[18927]1693VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
[1]1694{
[18927]1695 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
[17658]1696 AssertPtrReturn(pPoolPage, 0);
1697 return pPoolPage->Core.Key;
[1]1698}
1699
[13232]1700
[9026]1701/**
1702 * Gets the current CR3 register value for the nested memory context.
1703 * @returns CR3 value.
[41802]1704 * @param pVCpu Pointer to the VMCPU.
[9026]1705 */
[18927]1706VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
[9026]1707{
[39078]1708 NOREF(enmShadowMode);
[18927]1709 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1710 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
[9026]1711}
1712
[13232]1713
[13113]1714/**
[1]1715 * Gets the current CR3 register value for the HC intermediate memory context.
1716 * @returns CR3 value.
[41783]1717 * @param pVM Pointer to the VM.
[1]1718 */
[12989]1719VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
[1]1720{
1721 switch (pVM->pgm.s.enmHostMode)
1722 {
1723 case SUPPAGINGMODE_32_BIT:
1724 case SUPPAGINGMODE_32_BIT_GLOBAL:
1725 return pVM->pgm.s.HCPhysInterPD;
1726
1727 case SUPPAGINGMODE_PAE:
1728 case SUPPAGINGMODE_PAE_GLOBAL:
1729 case SUPPAGINGMODE_PAE_NX:
1730 case SUPPAGINGMODE_PAE_GLOBAL_NX:
[7715]1731 return pVM->pgm.s.HCPhysInterPaePDPT;
[1]1732
1733 case SUPPAGINGMODE_AMD64:
1734 case SUPPAGINGMODE_AMD64_GLOBAL:
1735 case SUPPAGINGMODE_AMD64_NX:
1736 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
[7715]1737 return pVM->pgm.s.HCPhysInterPaePDPT;
[1]1738
1739 default:
1740 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
[39038]1741 return NIL_RTHCPHYS;
[1]1742 }
1743}
1744
1745
1746/**
[13232]1747 * Gets the current CR3 register value for the RC intermediate memory context.
[1]1748 * @returns CR3 value.
[41783]1749 * @param pVM Pointer to the VM.
[41802]1750 * @param pVCpu Pointer to the VMCPU.
[1]1751 */
[18927]1752VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
[1]1753{
[18927]1754 switch (pVCpu->pgm.s.enmShadowMode)
[1]1755 {
1756 case PGMMODE_32_BIT:
1757 return pVM->pgm.s.HCPhysInterPD;
1758
1759 case PGMMODE_PAE:
1760 case PGMMODE_PAE_NX:
[7715]1761 return pVM->pgm.s.HCPhysInterPaePDPT;
[1]1762
1763 case PGMMODE_AMD64:
1764 case PGMMODE_AMD64_NX:
1765 return pVM->pgm.s.HCPhysInterPaePML4;
1766
[10822]1767 case PGMMODE_EPT:
[9032]1768 case PGMMODE_NESTED:
1769 return 0; /* not relevant */
1770
[1]1771 default:
[18927]1772 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
[39038]1773 return NIL_RTHCPHYS;
[1]1774 }
1775}
1776
1777
1778/**
1779 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1780 * @returns CR3 value.
[41783]1781 * @param pVM Pointer to the VM.
[1]1782 */
[12989]1783VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
[1]1784{
1785 return pVM->pgm.s.HCPhysInterPD;
1786}
1787
1788
1789/**
1790 * Gets the CR3 register value for the PAE intermediate memory context.
1791 * @returns CR3 value.
[41783]1792 * @param pVM Pointer to the VM.
[1]1793 */
[12989]1794VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
[1]1795{
[7715]1796 return pVM->pgm.s.HCPhysInterPaePDPT;
[1]1797}
1798
1799
1800/**
1801 * Gets the CR3 register value for the AMD64 intermediate memory context.
1802 * @returns CR3 value.
[41783]1803 * @param pVM Pointer to the VM.
[1]1804 */
[12989]1805VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
[1]1806{
1807 return pVM->pgm.s.HCPhysInterPaePML4;
1808}
1809
1810
1811/**
1812 * Performs and schedules necessary updates following a CR3 load or reload.
1813 *
[7715]1814 * This will normally involve mapping the guest PD or nPDPT
[1]1815 *
1816 * @returns VBox status code.
1817 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1818 * safely be ignored and overridden since the FF will be set too then.
[41802]1819 * @param pVCpu Pointer to the VMCPU.
[1]1820 * @param cr3 The new cr3.
1821 * @param fGlobal Indicates whether this is a global flush or not.
1822 */
[18992]1823VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
[1]1824{
[37354]1825 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
[18992]1826 PVM pVM = pVCpu->CTX_SUFF(pVM);
1827
[37354]1828 VMCPU_ASSERT_EMT(pVCpu);
[7883]1829
[1]1830 /*
[3956]1831 * Always flag the necessary updates; necessary for hardware acceleration
1832 */
[15410]1833 /** @todo optimize this, it shouldn't always be necessary. */
[19141]1834 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
[3956]1835 if (fGlobal)
[19141]1836 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
[18927]1837 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
[1]1838
1839 /*
1840 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1841 */
1842 int rc = VINF_SUCCESS;
1843 RTGCPHYS GCPhysCR3;
[18927]1844 switch (pVCpu->pgm.s.enmGuestMode)
[17259]1845 {
[30889]1846 case PGMMODE_PAE:
1847 case PGMMODE_PAE_NX:
1848 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1849 break;
1850 case PGMMODE_AMD64:
1851 case PGMMODE_AMD64_NX:
1852 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1853 break;
1854 default:
1855 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1856 break;
[17259]1857 }
[41391]1858 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
[17259]1859
[18927]1860 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
[1]1861 {
[18927]1862 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
1863 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
[18992]1864 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
[15410]1865 if (RT_LIKELY(rc == VINF_SUCCESS))
[1]1866 {
[36891]1867 if (pgmMapAreMappingsFloating(pVM))
[18927]1868 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
[1]1869 }
[15410]1870 else
1871 {
1872 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
[19141]1873 Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
[18927]1874 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1875 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
[36891]1876 if (pgmMapAreMappingsFloating(pVM))
[18927]1877 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
[15410]1878 }
1879
[1]1880 if (fGlobal)
[31123]1881 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3Global));
[1]1882 else
[31123]1883 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3));
[1]1884 }
1885 else
1886 {
[22544]1887# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
[22545]1888 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1889 if (pPool->cDirtyPages)
1890 {
1891 pgmLock(pVM);
1892 pgmPoolResetDirtyPages(pVM);
1893 pgmUnlock(pVM);
1894 }
[22473]1895# endif
[1]1896 /*
1897 * Check if we have a pending update of the CR3 monitoring.
1898 */
[18927]1899 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
[1]1900 {
[18927]1901 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
[25935]1902 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
[1]1903 }
1904 if (fGlobal)
[31123]1905 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3Global));
[1]1906 else
[31123]1907 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3));
[1]1908 }
1909
[31123]1910 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
[1]1911 return rc;
1912}
1913
[13232]1914
[9064]1915/**
[13232]1916 * Performs and schedules necessary updates following a CR3 load or reload when
1917 * using nested or extended paging.
[9064]1918 *
[33540]1919 * This API is an alternative to PDMFlushTLB that avoids actually flushing the
[13232]1920 * TLB and triggering a SyncCR3.
1921 *
[9064]1922 * This will normally involve mapping the guest PD or nPDPT
1923 *
1924 * @returns VBox status code.
[15410]1925 * @retval VINF_SUCCESS.
1926 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1927 * requires a CR3 sync. This can safely be ignored and overridden since
1928 * the FF will be set too then.)
[41802]1929 * @param pVCpu Pointer to the VMCPU.
[9064]1930 * @param cr3 The new cr3.
1931 */
[18992]1932VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
[9064]1933{
[37354]1934 VMCPU_ASSERT_EMT(pVCpu);
[18927]1935 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
[1]1936
[9064]1937 /* We assume we're only called in nested paging mode. */
[39034]1938 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1939 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsDisabled);
[18927]1940 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
[9064]1941
1942 /*
1943 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1944 */
1945 int rc = VINF_SUCCESS;
1946 RTGCPHYS GCPhysCR3;
[18927]1947 switch (pVCpu->pgm.s.enmGuestMode)
[17259]1948 {
[37354]1949 case PGMMODE_PAE:
1950 case PGMMODE_PAE_NX:
1951 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1952 break;
1953 case PGMMODE_AMD64:
1954 case PGMMODE_AMD64_NX:
1955 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1956 break;
1957 default:
1958 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1959 break;
[17259]1960 }
[41391]1961 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
1962
[18927]1963 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
[9064]1964 {
[18927]1965 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
[18992]1966 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
[17505]1967 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
[9064]1968 }
1969 return rc;
1970}
1971
[13232]1972
[1]1973/**
1974 * Synchronize the paging structures.
1975 *
1976 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1977 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1978 * in several places, most importantly whenever the CR3 is loaded.
1979 *
1980 * @returns VBox status code.
[41802]1981 * @param pVCpu Pointer to the VMCPU.
[1]1982 * @param cr0 Guest context CR0 register
1983 * @param cr3 Guest context CR3 register
1984 * @param cr4 Guest context CR4 register
1985 * @param fGlobal Including global page directories or not
1986 */
[18992]1987VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
[1]1988{
[15410]1989 int rc;
1990
[37354]1991 VMCPU_ASSERT_EMT(pVCpu);
1992
[1]1993 /*
[19516]1994 * The pool may have pending stuff and even require a return to ring-3 to
1995 * clear the whole thing.
1996 */
[20151]1997 rc = pgmPoolSyncCR3(pVCpu);
[19516]1998 if (rc != VINF_SUCCESS)
1999 return rc;
2000
2001 /*
[1]2002 * We might be called when we shouldn't.
2003 *
[41462]2004 * The mode switching will ensure that the PD is resynced after every mode
2005 * switch. So, if we find ourselves here when in protected or real mode
2006 * we can safely clear the FF and return immediately.
[1]2007 */
[18927]2008 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
[1]2009 {
2010 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
[27816]2011 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
[19141]2012 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2013 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
[1]2014 return VINF_SUCCESS;
2015 }
2016
[15410]2017 /* If global pages are not supported, then all flushes are global. */
[1]2018 if (!(cr4 & X86_CR4_PGE))
2019 fGlobal = true;
[13825]2020 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
[19141]2021 VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
[1]2022
2023 /*
[15410]2024 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2025 * This should be done before SyncCR3.
2026 */
[18927]2027 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
[15410]2028 {
[18927]2029 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
[15410]2030
[39034]2031 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old);
[15410]2032 RTGCPHYS GCPhysCR3;
[18927]2033 switch (pVCpu->pgm.s.enmGuestMode)
[17259]2034 {
[37354]2035 case PGMMODE_PAE:
2036 case PGMMODE_PAE_NX:
2037 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2038 break;
2039 case PGMMODE_AMD64:
2040 case PGMMODE_AMD64_NX:
2041 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2042 break;
2043 default:
2044 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2045 break;
[17259]2046 }
[41391]2047 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
[16317]2048
[18927]2049 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
[16317]2050 {
[18927]2051 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
[18992]2052 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
[16317]2053 }
[37354]2054
[27369]2055 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
[27390]2056 if ( rc == VINF_PGM_SYNC_CR3
2057 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
[27369]2058 {
[27370]2059 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
[15410]2060#ifdef IN_RING3
[20151]2061 rc = pgmPoolSyncCR3(pVCpu);
[15410]2062#else
[27371]2063 if (rc == VINF_PGM_SYNC_CR3)
2064 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
[27369]2065 return VINF_PGM_SYNC_CR3;
2066#endif
[15410]2067 }
[27813]2068 AssertRCReturn(rc, rc);
[39402]2069 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
[15410]2070 }
2071
2072 /*
[1]2073 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2074 */
[31123]2075 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
[18992]2076 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
[31123]2077 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
[13821]2078 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
[1]2079 if (rc == VINF_SUCCESS)
2080 {
[27816]2081 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2082 {
2083 /* Go back to ring 3 if a pgm pool sync is again pending. */
2084 return VINF_PGM_SYNC_CR3;
2085 }
2086
[18927]2087 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
[1]2088 {
[27816]2089 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
[19141]2090 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2091 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
[1]2092 }
2093
2094 /*
2095 * Check if we have a pending update of the CR3 monitoring.
2096 */
[18927]2097 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
[1]2098 {
[18927]2099 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
[39034]2100 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsFixed);
2101 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsDisabled);
[1]2102 }
2103 }
2104
2105 /*
2106 * Now flush the CR3 (guest context).
2107 */
2108 if (rc == VINF_SUCCESS)
[19833]2109 PGM_INVL_VCPU_TLBS(pVCpu);
[1]2110 return rc;
2111}
2112
2113
2114/**
[31080]2115 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
[1]2116 *
[18651]2117 * @returns VBox status code, with the following informational code for
2118 * VM scheduling.
[1]2119 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
[18651]2120 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
2121 * (I.e. not in R3.)
2122 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2123 *
[41802]2124 * @param pVCpu Pointer to the VMCPU.
[1]2125 * @param cr0 The new cr0.
2126 * @param cr4 The new cr4.
2127 * @param efer The new extended feature enable register.
2128 */
[18992]2129VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
[1]2130{
2131 PGMMODE enmGuestMode;
2132
[37354]2133 VMCPU_ASSERT_EMT(pVCpu);
2134
[1]2135 /*
2136 * Calc the new guest mode.
2137 */
2138 if (!(cr0 & X86_CR0_PE))
2139 enmGuestMode = PGMMODE_REAL;
2140 else if (!(cr0 & X86_CR0_PG))
2141 enmGuestMode = PGMMODE_PROTECTED;
2142 else if (!(cr4 & X86_CR4_PAE))
[31080]2143 {
2144 bool const fPse = !!(cr4 & X86_CR4_PSE);
2145 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2146 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2147 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
[1]2148 enmGuestMode = PGMMODE_32_BIT;
[31080]2149 }
[1]2150 else if (!(efer & MSR_K6_EFER_LME))
2151 {
2152 if (!(efer & MSR_K6_EFER_NXE))
2153 enmGuestMode = PGMMODE_PAE;
2154 else
2155 enmGuestMode = PGMMODE_PAE_NX;
2156 }
2157 else
2158 {
2159 if (!(efer & MSR_K6_EFER_NXE))
2160 enmGuestMode = PGMMODE_AMD64;
2161 else
2162 enmGuestMode = PGMMODE_AMD64_NX;
2163 }
2164
2165 /*
2166 * Did it change?
2167 */
[18927]2168 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
[1]2169 return VINF_SUCCESS;
[10299]2170
2171 /* Flush the TLB */
[19833]2172 PGM_INVL_VCPU_TLBS(pVCpu);
[10299]2173
[1]2174#ifdef IN_RING3
[39034]2175 return PGMR3ChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
[1]2176#else
[13418]2177 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
[1]2178 return VINF_PGM_CHANGE_MODE;
2179#endif
2180}
2181
2182
2183/**
2184 * Gets the current guest paging mode.
2185 *
[4207]2186 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
2187 *
[1]2188 * @returns The current paging mode.
[41802]2189 * @param pVCpu Pointer to the VMCPU.
[1]2190 */
[18927]2191VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
[1]2192{
[18927]2193 return pVCpu->pgm.s.enmGuestMode;
[1]2194}
2195
[1310]2196
[1251]2197/**
2198 * Gets the current shadow paging mode.
2199 *
2200 * @returns The current paging mode.
[41802]2201 * @param pVCpu Pointer to the VMCPU.
[1251]2202 */
[18927]2203VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
[1251]2204{
[18927]2205 return pVCpu->pgm.s.enmShadowMode;
[1251]2206}
[1]2207
[37354]2208
[8108]2209/**
2210 * Gets the current host paging mode.
2211 *
2212 * @returns The current paging mode.
[41783]2213 * @param pVM Pointer to the VM.
[8108]2214 */
[12989]2215VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
[8108]2216{
2217 switch (pVM->pgm.s.enmHostMode)
2218 {
2219 case SUPPAGINGMODE_32_BIT:
2220 case SUPPAGINGMODE_32_BIT_GLOBAL:
2221 return PGMMODE_32_BIT;
[1310]2222
[8108]2223 case SUPPAGINGMODE_PAE:
2224 case SUPPAGINGMODE_PAE_GLOBAL:
2225 return PGMMODE_PAE;
2226
2227 case SUPPAGINGMODE_PAE_NX:
2228 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2229 return PGMMODE_PAE_NX;
2230
2231 case SUPPAGINGMODE_AMD64:
2232 case SUPPAGINGMODE_AMD64_GLOBAL:
2233 return PGMMODE_AMD64;
2234
2235 case SUPPAGINGMODE_AMD64_NX:
2236 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2237 return PGMMODE_AMD64_NX;
2238
2239 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2240 }
2241
2242 return PGMMODE_INVALID;
2243}
2244
2245
[1]2246/**
2247 * Get mode name.
2248 *
2249 * @returns read-only name string.
2250 * @param enmMode The mode which name is desired.
2251 */
[12989]2252VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
[1]2253{
2254 switch (enmMode)
2255 {
[10824]2256 case PGMMODE_REAL: return "Real";
2257 case PGMMODE_PROTECTED: return "Protected";
[1]2258 case PGMMODE_32_BIT: return "32-bit";
2259 case PGMMODE_PAE: return "PAE";
2260 case PGMMODE_PAE_NX: return "PAE+NX";
2261 case PGMMODE_AMD64: return "AMD64";
2262 case PGMMODE_AMD64_NX: return "AMD64+NX";
[10705]2263 case PGMMODE_NESTED: return "Nested";
[10822]2264 case PGMMODE_EPT: return "EPT";
[1]2265 default: return "unknown mode value";
2266 }
2267}
2268
2269
[30861]2270
[1]2271/**
[30861]2272 * Notification from CPUM that the EFER.NXE bit has changed.
2273 *
2274 * @param pVCpu The virtual CPU for which EFER changed.
2275 * @param fNxe The new NXE state.
2276 */
2277VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
2278{
[37452]2279/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
[30889]2280 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
[37354]2281
[31054]2282 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
[30889]2283 if (fNxe)
2284 {
2285 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2286 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
2287 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
2288 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2289 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
2290 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
2291 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
2292 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2293 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
2294 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
2295 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
[31849]2296
2297 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
2298 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
2299 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
2300 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
[31997]2301 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
2302 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
[30889]2303 }
2304 else
2305 {
2306 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2307 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
2308 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
2309 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
[31849]2310 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
[30889]2311 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
2312 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
2313 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
2314 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
2315 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
2316 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
[31849]2317
2318 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
2319 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
2320 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
2321 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
[31997]2322 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
2323 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
[30889]2324 }
[30861]2325}
2326
2327
2328/**
[24197]2329 * Check if any pgm pool pages are marked dirty (not monitored)
2330 *
2331 * @returns bool locked/not locked
[41783]2332 * @param pVM Pointer to the VM.
[24197]2333 */
2334VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
2335{
[24201]2336 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
[24197]2337}
2338
[19471]2339
2340/**
[19474]2341 * Check if this VCPU currently owns the PGM lock.
2342 *
2343 * @returns bool owner/not owner
[41783]2344 * @param pVM Pointer to the VM.
[19474]2345 */
2346VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2347{
[38953]2348 return PDMCritSectIsOwner(&pVM->pgm.s.CritSectX);
[19474]2349}
2350
2351
2352/**
[26685]2353 * Enable or disable large page usage
2354 *
[37354]2355 * @returns VBox status code.
[41783]2356 * @param pVM Pointer to the VM.
[26685]2357 * @param fUseLargePages Use/not use large pages
2358 */
[37354]2359VMMDECL(int) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
[26685]2360{
[37354]2361 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2362
[36009]2363 pVM->fUseLargePages = fUseLargePages;
[37354]2364 return VINF_SUCCESS;
[26685]2365}
2366
[37354]2367
[26685]2368/**
[1]2369 * Acquire the PGM lock.
2370 *
2371 * @returns VBox status code
[41783]2372 * @param pVM Pointer to the VM.
[1]2373 */
2374int pgmLock(PVM pVM)
2375{
[38953]2376 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY);
[20873]2377#if defined(IN_RC) || defined(IN_RING0)
[1]2378 if (rc == VERR_SEM_BUSY)
[20874]2379 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
[1]2380#endif
[17509]2381 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
[1]2382 return rc;
2383}
2384
2385
2386/**
2387 * Release the PGM lock.
2388 *
2389 * @returns VBox status code
[41783]2390 * @param pVM Pointer to the VM.
[1]2391 */
2392void pgmUnlock(PVM pVM)
2393{
[38953]2394 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
2395 pVM->pgm.s.cDeprecatedPageLocks = 0;
2396 int rc = PDMCritSectLeave(&pVM->pgm.s.CritSectX);
2397 if (rc == VINF_SEM_NESTED)
2398 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
[1]2399}
2400
[13832]2401#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
[1]2402
[31402]2403/**
2404 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined.
2405 *
2406 * @returns VBox status code.
[41783]2407 * @param pVM Pointer to the VM.
[31402]2408 * @param pVCpu The current CPU.
2409 * @param GCPhys The guest physical address of the page to map. The
2410 * offset bits are not ignored.
2411 * @param ppv Where to return the address corresponding to @a GCPhys.
2412 */
2413int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
[30326]2414{
2415 pgmLock(pVM);
2416
2417 /*
[31402]2418 * Convert it to a writable page and it on to the dynamic mapper.
[30326]2419 */
2420 int rc;
[36891]2421 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
[30326]2422 if (RT_LIKELY(pPage))
2423 {
2424 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
2425 if (RT_SUCCESS(rc))
2426 {
[31402]2427 void *pv;
2428 rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS);
2429 if (RT_SUCCESS(rc))
2430 *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK));
[30326]2431 }
2432 else
2433 AssertRC(rc);
2434 }
2435 else
2436 {
2437 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2438 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2439 }
2440
2441 pgmUnlock(pVM);
2442 return rc;
2443}
2444
[17305]2445#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
[17433]2446#if !defined(IN_R0) || defined(LOG_ENABLED)
[17305]2447
2448/** Format handler for PGMPAGE.
2449 * @copydoc FNRTSTRFORMATTYPE */
2450static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
[30326]2451 const char *pszType, void const *pvValue,
2452 int cchWidth, int cchPrecision, unsigned fFlags,
2453 void *pvUser)
[17305]2454{
2455 size_t cch;
2456 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
[39078]2457 if (RT_VALID_PTR(pPage))
[17305]2458 {
[17316]2459 char szTmp[64+80];
2460
2461 cch = 0;
2462
2463 /* The single char state stuff. */
2464 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
[37354]2465 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
[17316]2466
2467#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2468 if (IS_PART_INCLUDED(5))
[17305]2469 {
[17316]2470 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2471 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2472 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
[17305]2473 }
[17316]2474
2475 /* The type. */
2476 if (IS_PART_INCLUDED(4))
2477 {
2478 szTmp[cch++] = ':';
[17507]2479 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
[37354]2480 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
2481 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
2482 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
[17316]2483 }
2484
2485 /* The numbers. */
2486 if (IS_PART_INCLUDED(3))
2487 {
2488 szTmp[cch++] = ':';
[37354]2489 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
[17316]2490 }
2491
2492 if (IS_PART_INCLUDED(2))
2493 {
2494 szTmp[cch++] = ':';
2495 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2496 }
2497
2498 if (IS_PART_INCLUDED(6))
2499 {
2500 szTmp[cch++] = ':';
2501 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
[37354]2502 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
2503 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
[17316]2504 }
2505#undef IS_PART_INCLUDED
2506
[17305]2507 cch = pfnOutput(pvArgOutput, szTmp, cch);
2508 }
2509 else
2510 cch = pfnOutput(pvArgOutput, "<bad-pgmpage-ptr>", sizeof("<bad-pgmpage-ptr>") - 1);
[39078]2511 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
[17305]2512 return cch;
2513}
2514
2515
2516/** Format handler for PGMRAMRANGE.
2517 * @copydoc FNRTSTRFORMATTYPE */
2518static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2519 const char *pszType, void const *pvValue,
2520 int cchWidth, int cchPrecision, unsigned fFlags,
2521 void *pvUser)
2522{
2523 size_t cch;
2524 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2525 if (VALID_PTR(pRam))
2526 {
2527 char szTmp[80];
2528 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2529 cch = pfnOutput(pvArgOutput, szTmp, cch);
2530 }
2531 else
2532 cch = pfnOutput(pvArgOutput, "<bad-pgmramrange-ptr>", sizeof("<bad-pgmramrange-ptr>") - 1);
[39078]2533 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
[17305]2534 return cch;
2535}
2536
2537/** Format type andlers to be registered/deregistered. */
2538static const struct
2539{
2540 char szType[24];
2541 PFNRTSTRFORMATTYPE pfnHandler;
2542} g_aPgmFormatTypes[] =
2543{
2544 { "pgmpage", pgmFormatTypeHandlerPage },
2545 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2546};
2547
[17433]2548#endif /* !IN_R0 || LOG_ENABLED */
[17305]2549
2550/**
2551 * Registers the global string format types.
2552 *
2553 * This should be called at module load time or in some other manner that ensure
2554 * that it's called exactly one time.
2555 *
2556 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2557 */
2558VMMDECL(int) PGMRegisterStringFormatTypes(void)
2559{
[17432]2560#if !defined(IN_R0) || defined(LOG_ENABLED)
[17305]2561 int rc = VINF_SUCCESS;
2562 unsigned i;
2563 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2564 {
2565 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2566# ifdef IN_RING0
2567 if (rc == VERR_ALREADY_EXISTS)
2568 {
2569 /* in case of cleanup failure in ring-0 */
2570 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2571 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2572 }
2573# endif
2574 }
2575 if (RT_FAILURE(rc))
2576 while (i-- > 0)
2577 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2578
2579 return rc;
2580#else
2581 return VINF_SUCCESS;
2582#endif
2583}
2584
2585
2586/**
2587 * Deregisters the global string format types.
2588 *
2589 * This should be called at module unload time or in some other manner that
2590 * ensure that it's called exactly one time.
2591 */
2592VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2593{
[17432]2594#if !defined(IN_R0) || defined(LOG_ENABLED)
[17305]2595 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2596 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2597#endif
2598}
2599
[1]2600#ifdef VBOX_STRICT
2601
2602/**
2603 * Asserts that there are no mapping conflicts.
2604 *
2605 * @returns Number of conflicts.
[41783]2606 * @param pVM Pointer to the VM.
[1]2607 */
[12989]2608VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
[1]2609{
2610 unsigned cErrors = 0;
2611
[18927]2612 /* Only applies to raw mode -> 1 VPCU */
[22890]2613 Assert(pVM->cCpus == 1);
[18927]2614 PVMCPU pVCpu = &pVM->aCpus[0];
2615
[1]2616 /*
2617 * Check for mapping conflicts.
2618 */
[13019]2619 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
[1]2620 pMapping;
[13019]2621 pMapping = pMapping->CTX_SUFF(pNext))
[1]2622 {
2623 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
[13937]2624 for (RTGCPTR GCPtr = pMapping->GCPtr;
[13936]2625 GCPtr <= pMapping->GCPtrLast;
[1]2626 GCPtr += PAGE_SIZE)
2627 {
[18988]2628 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
[1]2629 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2630 {
[13823]2631 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
[1]2632 cErrors++;
2633 break;
2634 }
2635 }
2636 }
2637
2638 return cErrors;
2639}
2640
2641
2642/**
2643 * Asserts that everything related to the guest CR3 is correctly shadowed.
2644 *
2645 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2646 * and assert the correctness of the guest CR3 mapping before asserting that the
2647 * shadow page tables is in sync with the guest page tables.
2648 *
2649 * @returns Number of conflicts.
[41783]2650 * @param pVM Pointer to the VM.
[41802]2651 * @param pVCpu Pointer to the VMCPU.
[1]2652 * @param cr3 The current guest CR3 register value.
2653 * @param cr4 The current guest CR4 register value.
2654 */
[18927]2655VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
[1]2656{
[31123]2657 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
[20767]2658 pgmLock(pVM);
[18992]2659 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
[20767]2660 pgmUnlock(pVM);
[31123]2661 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
[1]2662 return cErrors;
2663}
2664
2665#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use