VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMShw.h@ 28800

Last change on this file since 28800 was 28800, checked in by vboxsync, 14 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 9.8 KB
Line 
1/* $Id: PGMShw.h 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * VBox - Page Manager / Monitor, Shadow Paging Template.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Defined Constants And Macros *
20*******************************************************************************/
21#undef SHWPT
22#undef PSHWPT
23#undef SHWPTE
24#undef PSHWPTE
25#undef SHWPD
26#undef PSHWPD
27#undef SHWPDE
28#undef PSHWPDE
29#undef SHW_PDE_PG_MASK
30#undef SHW_PD_SHIFT
31#undef SHW_PD_MASK
32#undef SHW_PTE_PG_MASK
33#undef SHW_PT_SHIFT
34#undef SHW_PT_MASK
35#undef SHW_TOTAL_PD_ENTRIES
36#undef SHW_PDPT_SHIFT
37#undef SHW_PDPT_MASK
38#undef SHW_PDPE_PG_MASK
39#undef SHW_POOL_ROOT_IDX
40
41#if PGM_SHW_TYPE == PGM_TYPE_32BIT
42# define SHWPT X86PT
43# define PSHWPT PX86PT
44# define SHWPTE X86PTE
45# define PSHWPTE PX86PTE
46# define SHWPD X86PD
47# define PSHWPD PX86PD
48# define SHWPDE X86PDE
49# define PSHWPDE PX86PDE
50# define SHW_PDE_PG_MASK X86_PDE_PG_MASK
51# define SHW_PD_SHIFT X86_PD_SHIFT
52# define SHW_PD_MASK X86_PD_MASK
53# define SHW_TOTAL_PD_ENTRIES X86_PG_ENTRIES
54# define SHW_PTE_PG_MASK X86_PTE_PG_MASK
55# define SHW_PT_SHIFT X86_PT_SHIFT
56# define SHW_PT_MASK X86_PT_MASK
57# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PD
58
59#elif PGM_SHW_TYPE == PGM_TYPE_EPT
60# define SHWPT EPTPT
61# define PSHWPT PEPTPT
62# define SHWPTE EPTPTE
63# define PSHWPTE PEPTPTE
64# define SHWPD EPTPD
65# define PSHWPD PEPTPD
66# define SHWPDE EPTPDE
67# define PSHWPDE PEPTPDE
68# define SHW_PDE_PG_MASK EPT_PDE_PG_MASK
69# define SHW_PD_SHIFT EPT_PD_SHIFT
70# define SHW_PD_MASK EPT_PD_MASK
71# define SHW_PTE_PG_MASK EPT_PTE_PG_MASK
72# define SHW_PT_SHIFT EPT_PT_SHIFT
73# define SHW_PT_MASK EPT_PT_MASK
74# define SHW_PDPT_SHIFT EPT_PDPT_SHIFT
75# define SHW_PDPT_MASK EPT_PDPT_MASK
76# define SHW_PDPE_PG_MASK EPT_PDPE_PG_MASK
77# define SHW_TOTAL_PD_ENTRIES (EPT_PG_AMD64_ENTRIES*EPT_PG_AMD64_PDPE_ENTRIES)
78# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_NESTED_ROOT /* do not use! exception is real mode & protected mode without paging. */
79
80#else
81# define SHWPT X86PTPAE
82# define PSHWPT PX86PTPAE
83# define SHWPTE X86PTEPAE
84# define PSHWPTE PX86PTEPAE
85# define SHWPD X86PDPAE
86# define PSHWPD PX86PDPAE
87# define SHWPDE X86PDEPAE
88# define PSHWPDE PX86PDEPAE
89# define SHW_PDE_PG_MASK X86_PDE_PAE_PG_MASK
90# define SHW_PD_SHIFT X86_PD_PAE_SHIFT
91# define SHW_PD_MASK X86_PD_PAE_MASK
92# define SHW_PTE_PG_MASK X86_PTE_PAE_PG_MASK
93# define SHW_PT_SHIFT X86_PT_PAE_SHIFT
94# define SHW_PT_MASK X86_PT_PAE_MASK
95
96# if PGM_SHW_TYPE == PGM_TYPE_AMD64
97# define SHW_PDPT_SHIFT X86_PDPT_SHIFT
98# define SHW_PDPT_MASK X86_PDPT_MASK_AMD64
99# define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
100# define SHW_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES*X86_PG_AMD64_PDPE_ENTRIES)
101# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_AMD64_CR3
102
103# else /* 32 bits PAE mode */
104# define SHW_PDPT_SHIFT X86_PDPT_SHIFT
105# define SHW_PDPT_MASK X86_PDPT_MASK_PAE
106# define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
107# define SHW_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES*X86_PG_PAE_PDPE_ENTRIES)
108# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PDPT
109# endif
110#endif
111
112
113/*******************************************************************************
114* Internal Functions *
115*******************************************************************************/
116RT_C_DECLS_BEGIN
117/* r3 */
118PGM_SHW_DECL(int, InitData)(PVM pVM, PPGMMODEDATA pModeData, bool fResolveGCAndR0);
119PGM_SHW_DECL(int, Enter)(PVMCPU pVCpu, bool fIs64BitsPagingMode);
120PGM_SHW_DECL(int, Relocate)(PVMCPU pVCpu, RTGCPTR offDelta);
121PGM_SHW_DECL(int, Exit)(PVMCPU pVCpu);
122
123/* all */
124PGM_SHW_DECL(int, GetPage)(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys);
125PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
126RT_C_DECLS_END
127
128
129/**
130 * Initializes the guest bit of the paging mode data.
131 *
132 * @returns VBox status code.
133 * @param pVM The VM handle.
134 * @param fResolveGCAndR0 Indicate whether or not GC and Ring-0 symbols can be resolved now.
135 * This is used early in the init process to avoid trouble with PDM
136 * not being initialized yet.
137 */
138PGM_SHW_DECL(int, InitData)(PVM pVM, PPGMMODEDATA pModeData, bool fResolveGCAndR0)
139{
140 Assert(pModeData->uShwType == PGM_SHW_TYPE || pModeData->uShwType == PGM_TYPE_NESTED);
141
142 /* Ring-3 */
143 pModeData->pfnR3ShwRelocate = PGM_SHW_NAME(Relocate);
144 pModeData->pfnR3ShwExit = PGM_SHW_NAME(Exit);
145 pModeData->pfnR3ShwGetPage = PGM_SHW_NAME(GetPage);
146 pModeData->pfnR3ShwModifyPage = PGM_SHW_NAME(ModifyPage);
147
148 if (fResolveGCAndR0)
149 {
150 int rc;
151
152#if PGM_SHW_TYPE != PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT /* No AMD64 for traditional virtualization, only VT-x and AMD-V. */
153 /* GC */
154 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_SHW_NAME_RC_STR(GetPage), &pModeData->pfnRCShwGetPage);
155 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_SHW_NAME_RC_STR(GetPage), rc), rc);
156 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_SHW_NAME_RC_STR(ModifyPage), &pModeData->pfnRCShwModifyPage);
157 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_SHW_NAME_RC_STR(ModifyPage), rc), rc);
158#endif /* Not AMD64 shadow paging. */
159
160 /* Ring-0 */
161 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_SHW_NAME_R0_STR(GetPage), &pModeData->pfnR0ShwGetPage);
162 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_SHW_NAME_R0_STR(GetPage), rc), rc);
163 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_SHW_NAME_R0_STR(ModifyPage), &pModeData->pfnR0ShwModifyPage);
164 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_SHW_NAME_R0_STR(ModifyPage), rc), rc);
165 }
166 return VINF_SUCCESS;
167}
168
169/**
170 * Enters the shadow mode.
171 *
172 * @returns VBox status code.
173 * @param pVCpu The VMCPU to operate on.
174 * @param fIs64BitsPagingMode New shadow paging mode is for 64 bits? (only relevant for 64 bits guests on a 32 bits AMD-V nested paging host)
175 */
176PGM_SHW_DECL(int, Enter)(PVMCPU pVCpu, bool fIs64BitsPagingMode)
177{
178#if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
179
180# if PGM_SHW_TYPE == PGM_TYPE_NESTED && HC_ARCH_BITS == 32
181 /* Must distinguish between 32 and 64 bits guest paging modes as we'll use a different shadow paging root/mode in both cases. */
182 RTGCPHYS GCPhysCR3 = (fIs64BitsPagingMode) ? RT_BIT_64(63) : RT_BIT_64(62);
183# else
184 RTGCPHYS GCPhysCR3 = RT_BIT_64(63);
185# endif
186 PPGMPOOLPAGE pNewShwPageCR3;
187 PVM pVM = pVCpu->pVMR3;
188 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
189
190 Assert(HWACCMIsNestedPagingActive(pVM));
191 Assert(!pVCpu->pgm.s.pShwPageCR3R3);
192
193 pgmLock(pVM);
194
195 int rc = pgmPoolAlloc(pVM, GCPhysCR3, PGMPOOLKIND_ROOT_NESTED, PGMPOOL_IDX_NESTED_ROOT, GCPhysCR3 >> PAGE_SHIFT, &pNewShwPageCR3, true /* lock page */);
196 AssertFatalRC(rc);
197
198 pVCpu->pgm.s.iShwUser = PGMPOOL_IDX_NESTED_ROOT;
199 pVCpu->pgm.s.iShwUserTable = GCPhysCR3 >> PAGE_SHIFT;
200 pVCpu->pgm.s.pShwPageCR3R3 = pNewShwPageCR3;
201
202 pVCpu->pgm.s.pShwPageCR3RC = MMHyperCCToRC(pVM, pVCpu->pgm.s.pShwPageCR3R3);
203 pVCpu->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVCpu->pgm.s.pShwPageCR3R3);
204
205 pgmUnlock(pVM);
206
207 Log(("Enter nested shadow paging mode: root %RHv phys %RHp\n", pVCpu->pgm.s.pShwPageCR3R3, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key));
208#endif
209 return VINF_SUCCESS;
210}
211
212
213/**
214 * Relocate any GC pointers related to shadow mode paging.
215 *
216 * @returns VBox status code.
217 * @param pVCpu The VMCPU to operate on.
218 * @param offDelta The reloation offset.
219 */
220PGM_SHW_DECL(int, Relocate)(PVMCPU pVCpu, RTGCPTR offDelta)
221{
222 pVCpu->pgm.s.pShwPageCR3RC += offDelta;
223 return VINF_SUCCESS;
224}
225
226
227/**
228 * Exits the shadow mode.
229 *
230 * @returns VBox status code.
231 * @param pVCpu The VMCPU to operate on.
232 */
233PGM_SHW_DECL(int, Exit)(PVMCPU pVCpu)
234{
235 PVM pVM = pVCpu->pVMR3;
236
237 if ( ( pVCpu->pgm.s.enmShadowMode == PGMMODE_NESTED
238 || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT)
239 && pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
240 {
241 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
242
243 Assert(pVCpu->pgm.s.iShwUser == PGMPOOL_IDX_NESTED_ROOT);
244
245 pgmLock(pVM);
246
247 /* Mark the page as unlocked; allow flushing again. */
248 pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
249
250 pgmPoolFreeByPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), pVCpu->pgm.s.iShwUser, pVCpu->pgm.s.iShwUserTable);
251 pVCpu->pgm.s.pShwPageCR3R3 = 0;
252 pVCpu->pgm.s.pShwPageCR3R0 = 0;
253 pVCpu->pgm.s.pShwPageCR3RC = 0;
254 pVCpu->pgm.s.iShwUser = 0;
255 pVCpu->pgm.s.iShwUserTable = 0;
256
257 pgmUnlock(pVM);
258
259 Log(("Leave nested shadow paging mode\n"));
260 }
261 return VINF_SUCCESS;
262}
263
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use