VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h@ 96860

Last change on this file since 96860 was 96737, checked in by vboxsync, 20 months ago

VMM/PGM: Nested VMX: bugref:10092 Naming nits and PGM_SLAT_TYPE rather than PGM_GST_TYPE compile time check.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.6 KB
Line 
1/* $Id: PGMAllGstSlatEpt.cpp.h 96737 2022-09-14 12:05:19Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Guest EPT SLAT - All context code.
4 */
5
6/*
7 * Copyright (C) 2021-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#if PGM_SLAT_TYPE != PGM_SLAT_TYPE_EPT
29# error "Unsupported SLAT type."
30#endif
31
32DECLINLINE(bool) PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(PCVMCPUCC pVCpu, uint64_t uEntry)
33{
34 if (!(uEntry & EPT_E_READ))
35 {
36 Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt);
37 Assert(!RT_BF_GET(pVCpu->pgm.s.uEptVpidCapMsr, VMX_BF_EPT_VPID_CAP_EXEC_ONLY));
38 NOREF(pVCpu);
39 if (uEntry & (EPT_E_WRITE | EPT_E_EXECUTE))
40 return false;
41 }
42 return true;
43}
44
45
46DECLINLINE(bool) PGM_GST_SLAT_NAME_EPT(WalkIsMemTypeValid)(uint64_t uEntry, uint8_t uLevel)
47{
48 Assert(uLevel <= 3 && uLevel >= 1); NOREF(uLevel);
49 uint8_t const fEptMemTypeMask = uEntry & VMX_BF_EPT_PT_MEMTYPE_MASK;
50 switch (fEptMemTypeMask)
51 {
52 case EPT_E_MEMTYPE_WB:
53 case EPT_E_MEMTYPE_UC:
54 case EPT_E_MEMTYPE_WP:
55 case EPT_E_MEMTYPE_WT:
56 case EPT_E_MEMTYPE_WC:
57 return true;
58 }
59 return false;
60}
61
62
63DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(PCVMCPUCC pVCpu, PPGMPTWALK pWalk, uint64_t uEntry, uint8_t uLevel)
64{
65 static PGMWALKFAIL const s_afEptViolations[] = { PGM_WALKFAIL_EPT_VIOLATION, PGM_WALKFAIL_EPT_VIOLATION_CONVERTIBLE };
66 uint8_t const fEptVeSupported = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxEptXcptVe;
67 uint8_t const fConvertible = RT_BOOL(uLevel == 1 || (uEntry & EPT_E_BIT_LEAF));
68 uint8_t const idxViolationType = fEptVeSupported & fConvertible & !RT_BF_GET(uEntry, VMX_BF_EPT_PT_SUPPRESS_VE);
69
70 pWalk->fNotPresent = true;
71 pWalk->uLevel = uLevel;
72 pWalk->fFailed = s_afEptViolations[idxViolationType];
73 return VERR_PAGE_TABLE_NOT_PRESENT;
74}
75
76
77DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(PCVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel, int rc)
78{
79 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
80 pWalk->fBadPhysAddr = true;
81 pWalk->uLevel = uLevel;
82 pWalk->fFailed = PGM_WALKFAIL_EPT_VIOLATION;
83 return VERR_PAGE_TABLE_NOT_PRESENT;
84}
85
86
87DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel)
88{
89 NOREF(pVCpu);
90 pWalk->fRsvdError = true;
91 pWalk->uLevel = uLevel;
92 pWalk->fFailed = PGM_WALKFAIL_EPT_MISCONFIG;
93 return VERR_PAGE_TABLE_NOT_PRESENT;
94}
95
96
97/**
98 * Walks the guest's EPT page table (second-level address translation).
99 *
100 * @returns VBox status code.
101 * @retval VINF_SUCCESS on success.
102 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
103 *
104 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
105 * @param GCPhysNested The nested-guest physical address to walk.
106 * @param fIsLinearAddrValid Whether the linear-address in @c GCPtrNested caused
107 * this page walk.
108 * @param GCPtrNested The nested-guest linear address that caused this
109 * page walk. If @c fIsLinearAddrValid is false, pass
110 * 0.
111 * @param pWalk The page walk info.
112 * @param pSlatWalk The SLAT mode specific page walk info.
113 */
114DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(Walk)(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
115 PPGMPTWALK pWalk, PSLATPTWALK pSlatWalk)
116{
117 Assert(fIsLinearAddrValid || GCPtrNested == 0);
118
119 /*
120 * Init walk structures.
121 */
122 RT_ZERO(*pWalk);
123 RT_ZERO(*pSlatWalk);
124
125 pWalk->GCPtr = GCPtrNested;
126 pWalk->GCPhysNested = GCPhysNested;
127 pWalk->fIsLinearAddrValid = fIsLinearAddrValid;
128 pWalk->fIsSlat = true;
129
130 /*
131 * Figure out EPT attributes that are cumulative (logical-AND) across page walks.
132 * - R, W, X_SUPER are unconditionally cumulative.
133 * See Intel spec. Table 26-7 "Exit Qualification for EPT Violations".
134 *
135 * - X_USER is cumulative but relevant only when mode-based execute control for EPT
136 * which we currently don't support it (asserted below).
137 *
138 * - MEMTYPE is not cumulative and only applicable to the final paging entry.
139 *
140 * - A, D EPT bits map to the regular page-table bit positions. Thus, they're not
141 * included in the mask below and handled separately. Accessed bits are
142 * cumulative but dirty bits are not cumulative as they're only applicable to
143 * the final paging entry.
144 */
145 Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt);
146 uint64_t const fEptAndMask = ( PGM_PTATTRS_EPT_R_MASK
147 | PGM_PTATTRS_EPT_W_MASK
148 | PGM_PTATTRS_EPT_X_SUPER_MASK) & PGM_PTATTRS_EPT_MASK;
149
150 /*
151 * Do the walk.
152 */
153 uint64_t fEffective;
154 {
155 /*
156 * EPTP.
157 *
158 * We currently only support 4-level EPT paging.
159 * EPT 5-level paging was documented at some point (bit 7 of MSR_IA32_VMX_EPT_VPID_CAP)
160 * but for some reason seems to have been removed from subsequent specs.
161 */
162 int const rc = pgmGstGetEptPML4PtrEx(pVCpu, &pSlatWalk->pPml4);
163 if (RT_SUCCESS(rc))
164 { /* likely */ }
165 else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
166 }
167 {
168 /*
169 * PML4E.
170 */
171 PEPTPML4E pPml4e;
172 pSlatWalk->pPml4e = pPml4e = &pSlatWalk->pPml4->a[(GCPhysNested >> SLAT_PML4_SHIFT) & SLAT_PML4_MASK];
173 EPTPML4E Pml4e;
174 pSlatWalk->Pml4e.u = Pml4e.u = pPml4e->u;
175
176 if (SLAT_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ }
177 else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, Pml4e.u, 4);
178
179 if (RT_LIKELY( SLAT_IS_PML4E_VALID(pVCpu, Pml4e)
180 && PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(pVCpu, Pml4e.u)))
181 { /* likely */ }
182 else return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 4);
183
184 uint64_t const fEptAttrs = Pml4e.u & EPT_PML4E_ATTR_MASK;
185 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
186 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
187 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
188 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
189 uint64_t const fEptAndBits = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & fEptAndMask;
190 fEffective = RT_BF_MAKE(PGM_PTATTRS_R, fRead)
191 | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
192 | RT_BF_MAKE(PGM_PTATTRS_NX, !fExecute)
193 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
194 | fEptAndBits;
195 pWalk->fEffective = fEffective;
196
197 int const rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & EPT_PML4E_PG_MASK, &pSlatWalk->pPdpt);
198 if (RT_SUCCESS(rc)) { /* probable */ }
199 else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
200 }
201 {
202 /*
203 * PDPTE.
204 */
205 PEPTPDPTE pPdpte;
206 pSlatWalk->pPdpte = pPdpte = &pSlatWalk->pPdpt->a[(GCPhysNested >> SLAT_PDPT_SHIFT) & SLAT_PDPT_MASK];
207 EPTPDPTE Pdpte;
208 pSlatWalk->Pdpte.u = Pdpte.u = pPdpte->u;
209
210 if (SLAT_IS_PGENTRY_PRESENT(pVCpu, Pdpte)) { /* probable */ }
211 else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, Pdpte.u, 3);
212
213 /* The order of the following "if" and "else if" statements matter. */
214 if ( SLAT_IS_PDPE_VALID(pVCpu, Pdpte)
215 && PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(pVCpu, Pdpte.u))
216 {
217 uint64_t const fEptAttrs = Pdpte.u & EPT_PDPTE_ATTR_MASK;
218 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
219 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
220 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
221 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
222 uint64_t const fEptAndBits = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & fEptAndMask;
223 fEffective &= RT_BF_MAKE(PGM_PTATTRS_R, fRead)
224 | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
225 | RT_BF_MAKE(PGM_PTATTRS_NX, !fExecute)
226 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
227 | fEptAndBits;
228 pWalk->fEffective = fEffective;
229 }
230 else if ( SLAT_IS_BIG_PDPE_VALID(pVCpu, Pdpte)
231 && PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(pVCpu, Pdpte.u)
232 && PGM_GST_SLAT_NAME_EPT(WalkIsMemTypeValid)(Pdpte.u, 3))
233 {
234 uint64_t const fEptAttrs = Pdpte.u & EPT_PDPTE1G_ATTR_MASK;
235 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
236 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
237 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
238 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
239 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY);
240 uint8_t const fMemType = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_MEMTYPE);
241 uint64_t const fEptAndBits = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & fEptAndMask;
242 fEffective &= RT_BF_MAKE(PGM_PTATTRS_R, fRead)
243 | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
244 | RT_BF_MAKE(PGM_PTATTRS_NX, !fExecute)
245 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
246 | fEptAndBits;
247 fEffective |= RT_BF_MAKE(PGM_PTATTRS_D, fDirty)
248 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType);
249 pWalk->fEffective = fEffective;
250
251 pWalk->fGigantPage = true;
252 pWalk->fSucceeded = true;
253 pWalk->GCPhys = SLAT_GET_PDPE1G_GCPHYS(pVCpu, Pdpte)
254 | (GCPhysNested & SLAT_PAGE_1G_OFFSET_MASK);
255 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys);
256 return VINF_SUCCESS;
257 }
258 else return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 3);
259
260 int const rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pdpte.u & EPT_PDPTE_PG_MASK, &pSlatWalk->pPd);
261 if (RT_SUCCESS(rc)) { /* probable */ }
262 else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
263 }
264 {
265 /*
266 * PDE.
267 */
268 PSLATPDE pPde;
269 pSlatWalk->pPde = pPde = &pSlatWalk->pPd->a[(GCPhysNested >> SLAT_PD_SHIFT) & SLAT_PD_MASK];
270 SLATPDE Pde;
271 pSlatWalk->Pde.u = Pde.u = pPde->u;
272
273 if (SLAT_IS_PGENTRY_PRESENT(pVCpu, Pde)) { /* probable */ }
274 else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, Pde.u, 2);
275
276 /* The order of the following "if" and "else if" statements matter. */
277 if ( SLAT_IS_PDE_VALID(pVCpu, Pde)
278 && PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(pVCpu, Pde.u))
279 {
280 uint64_t const fEptAttrs = Pde.u & EPT_PDE_ATTR_MASK;
281 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
282 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
283 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
284 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
285 uint64_t const fEptAndBits = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & fEptAndMask;
286 fEffective &= RT_BF_MAKE(PGM_PTATTRS_R, fRead)
287 | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
288 | RT_BF_MAKE(PGM_PTATTRS_NX, !fExecute)
289 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
290 | fEptAndBits;
291 pWalk->fEffective = fEffective;
292 }
293 else if ( SLAT_IS_BIG_PDE_VALID(pVCpu, Pde)
294 && PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(pVCpu, Pde.u)
295 && PGM_GST_SLAT_NAME_EPT(WalkIsMemTypeValid)(Pde.u, 2))
296 {
297 uint64_t const fEptAttrs = Pde.u & EPT_PDE2M_ATTR_MASK;
298 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
299 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
300 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
301 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
302 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY);
303 uint8_t const fMemType = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_MEMTYPE);
304 uint64_t const fEptAndBits = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & fEptAndMask;
305 fEffective &= RT_BF_MAKE(PGM_PTATTRS_R, fRead)
306 | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
307 | RT_BF_MAKE(PGM_PTATTRS_NX, !fExecute)
308 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
309 | fEptAndBits;
310 fEffective |= RT_BF_MAKE(PGM_PTATTRS_D, fDirty)
311 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType);
312 pWalk->fEffective = fEffective;
313
314 pWalk->fBigPage = true;
315 pWalk->fSucceeded = true;
316 pWalk->GCPhys = SLAT_GET_PDE2M_GCPHYS(pVCpu, Pde)
317 | (GCPhysNested & SLAT_PAGE_2M_OFFSET_MASK);
318 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys);
319 return VINF_SUCCESS;
320 }
321 else return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 2);
322
323 int const rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pde.u & EPT_PDE_PG_MASK, &pSlatWalk->pPt);
324 if (RT_SUCCESS(rc)) { /* probable */ }
325 else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc);
326 }
327 {
328 /*
329 * PTE.
330 */
331 PSLATPTE pPte;
332 pSlatWalk->pPte = pPte = &pSlatWalk->pPt->a[(GCPhysNested >> SLAT_PT_SHIFT) & SLAT_PT_MASK];
333 SLATPTE Pte;
334 pSlatWalk->Pte.u = Pte.u = pPte->u;
335
336 if (SLAT_IS_PGENTRY_PRESENT(pVCpu, Pte)) { /* probable */ }
337 else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, Pte.u, 1);
338
339 if ( SLAT_IS_PTE_VALID(pVCpu, Pte)
340 && PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(pVCpu, Pte.u)
341 && PGM_GST_SLAT_NAME_EPT(WalkIsMemTypeValid)(Pte.u, 1))
342 { /* likely*/ }
343 else
344 return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 1);
345
346 uint64_t const fEptAttrs = Pte.u & EPT_PTE_ATTR_MASK;
347 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
348 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
349 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
350 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
351 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY);
352 uint8_t const fMemType = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_MEMTYPE);
353 uint64_t const fEptAndBits = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & fEptAndMask;
354 fEffective &= RT_BF_MAKE(PGM_PTATTRS_R, fRead)
355 | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
356 | RT_BF_MAKE(PGM_PTATTRS_NX, !fExecute)
357 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
358 | fEptAndBits;
359 fEffective |= RT_BF_MAKE(PGM_PTATTRS_D, fDirty)
360 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType);
361 pWalk->fEffective = fEffective;
362
363 pWalk->fSucceeded = true;
364 pWalk->GCPhys = SLAT_GET_PTE_GCPHYS(pVCpu, Pte) | (GCPhysNested & GUEST_PAGE_OFFSET_MASK);
365 return VINF_SUCCESS;
366 }
367}
368
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use