VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h@ 96860

Last change on this file since 96860 was 96737, checked in by vboxsync, 21 months ago

VMM/PGM: Nested VMX: bugref:10092 Naming nits and PGM_SLAT_TYPE rather than PGM_GST_TYPE compile time check.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.6 KB
RevLine 
[92186]1/* $Id: PGMAllGstSlatEpt.cpp.h 96737 2022-09-14 12:05:19Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Guest EPT SLAT - All context code.
4 */
5
6/*
[96407]7 * Copyright (C) 2021-2022 Oracle and/or its affiliates.
[92186]8 *
[96407]9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
[92186]26 */
27
[96737]28#if PGM_SLAT_TYPE != PGM_SLAT_TYPE_EPT
29# error "Unsupported SLAT type."
30#endif
31
[92480]32DECLINLINE(bool) PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(PCVMCPUCC pVCpu, uint64_t uEntry)
33{
[93459]34 if (!(uEntry & EPT_E_READ))
[92480]35 {
36 Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt);
[93922]37 Assert(!RT_BF_GET(pVCpu->pgm.s.uEptVpidCapMsr, VMX_BF_EPT_VPID_CAP_EXEC_ONLY));
38 NOREF(pVCpu);
39 if (uEntry & (EPT_E_WRITE | EPT_E_EXECUTE))
[92480]40 return false;
41 }
42 return true;
43}
44
45
46DECLINLINE(bool) PGM_GST_SLAT_NAME_EPT(WalkIsMemTypeValid)(uint64_t uEntry, uint8_t uLevel)
47{
[93539]48 Assert(uLevel <= 3 && uLevel >= 1); NOREF(uLevel);
[93922]49 uint8_t const fEptMemTypeMask = uEntry & VMX_BF_EPT_PT_MEMTYPE_MASK;
50 switch (fEptMemTypeMask)
51 {
52 case EPT_E_MEMTYPE_WB:
53 case EPT_E_MEMTYPE_UC:
54 case EPT_E_MEMTYPE_WP:
55 case EPT_E_MEMTYPE_WT:
56 case EPT_E_MEMTYPE_WC:
57 return true;
58 }
59 return false;
[92480]60}
61
62
[92476]63DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(PCVMCPUCC pVCpu, PPGMPTWALK pWalk, uint64_t uEntry, uint8_t uLevel)
[92186]64{
[92685]65 static PGMWALKFAIL const s_afEptViolations[] = { PGM_WALKFAIL_EPT_VIOLATION, PGM_WALKFAIL_EPT_VIOLATION_CONVERTIBLE };
[92472]66 uint8_t const fEptVeSupported = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxEptXcptVe;
[93459]67 uint8_t const fConvertible = RT_BOOL(uLevel == 1 || (uEntry & EPT_E_BIT_LEAF));
68 uint8_t const idxViolationType = fEptVeSupported & fConvertible & !RT_BF_GET(uEntry, VMX_BF_EPT_PT_SUPPRESS_VE);
[92476]69
70 pWalk->fNotPresent = true;
71 pWalk->uLevel = uLevel;
[93459]72 pWalk->fFailed = s_afEptViolations[idxViolationType];
[92186]73 return VERR_PAGE_TABLE_NOT_PRESENT;
74}
75
76
[92476]77DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(PCVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel, int rc)
[92186]78{
79 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
[92472]80 pWalk->fBadPhysAddr = true;
81 pWalk->uLevel = uLevel;
[93459]82 pWalk->fFailed = PGM_WALKFAIL_EPT_VIOLATION;
[92186]83 return VERR_PAGE_TABLE_NOT_PRESENT;
84}
85
86
[93459]87DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel)
[92186]88{
[93459]89 NOREF(pVCpu);
90 pWalk->fRsvdError = true;
91 pWalk->uLevel = uLevel;
92 pWalk->fFailed = PGM_WALKFAIL_EPT_MISCONFIG;
[92186]93 return VERR_PAGE_TABLE_NOT_PRESENT;
94}
95
96
[92685]97/**
[96737]98 * Walks the guest's EPT page table (second-level address translation).
[92685]99 *
100 * @returns VBox status code.
101 * @retval VINF_SUCCESS on success.
102 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
103 *
104 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
105 * @param GCPhysNested The nested-guest physical address to walk.
106 * @param fIsLinearAddrValid Whether the linear-address in @c GCPtrNested caused
[93922]107 * this page walk.
[92685]108 * @param GCPtrNested The nested-guest linear address that caused this
[93922]109 * page walk. If @c fIsLinearAddrValid is false, pass
110 * 0.
[92685]111 * @param pWalk The page walk info.
[96737]112 * @param pSlatWalk The SLAT mode specific page walk info.
[92685]113 */
[92186]114DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(Walk)(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
[96737]115 PPGMPTWALK pWalk, PSLATPTWALK pSlatWalk)
[92186]116{
[92685]117 Assert(fIsLinearAddrValid || GCPtrNested == 0);
118
[92311]119 /*
[92426]120 * Init walk structures.
[92311]121 */
[92186]122 RT_ZERO(*pWalk);
[96737]123 RT_ZERO(*pSlatWalk);
[92186]124
[92426]125 pWalk->GCPtr = GCPtrNested;
126 pWalk->GCPhysNested = GCPhysNested;
127 pWalk->fIsLinearAddrValid = fIsLinearAddrValid;
128 pWalk->fIsSlat = true;
129
[92311]130 /*
131 * Figure out EPT attributes that are cumulative (logical-AND) across page walks.
132 * - R, W, X_SUPER are unconditionally cumulative.
133 * See Intel spec. Table 26-7 "Exit Qualification for EPT Violations".
134 *
[92541]135 * - X_USER is cumulative but relevant only when mode-based execute control for EPT
[92311]136 * which we currently don't support it (asserted below).
137 *
138 * - MEMTYPE is not cumulative and only applicable to the final paging entry.
139 *
140 * - A, D EPT bits map to the regular page-table bit positions. Thus, they're not
141 * included in the mask below and handled separately. Accessed bits are
142 * cumulative but dirty bits are not cumulative as they're only applicable to
143 * the final paging entry.
144 */
145 Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt);
[96737]146 uint64_t const fEptAndMask = ( PGM_PTATTRS_EPT_R_MASK
147 | PGM_PTATTRS_EPT_W_MASK
148 | PGM_PTATTRS_EPT_X_SUPER_MASK) & PGM_PTATTRS_EPT_MASK;
[92311]149
150 /*
151 * Do the walk.
152 */
[92257]153 uint64_t fEffective;
[92186]154 {
[92480]155 /*
[93459]156 * EPTP.
[92481]157 *
[93459]158 * We currently only support 4-level EPT paging.
159 * EPT 5-level paging was documented at some point (bit 7 of MSR_IA32_VMX_EPT_VPID_CAP)
[92481]160 * but for some reason seems to have been removed from subsequent specs.
161 */
[96737]162 int const rc = pgmGstGetEptPML4PtrEx(pVCpu, &pSlatWalk->pPml4);
[92481]163 if (RT_SUCCESS(rc))
164 { /* likely */ }
[96737]165 else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
[92481]166 }
167 {
168 /*
[92480]169 * PML4E.
170 */
[92186]171 PEPTPML4E pPml4e;
[96737]172 pSlatWalk->pPml4e = pPml4e = &pSlatWalk->pPml4->a[(GCPhysNested >> SLAT_PML4_SHIFT) & SLAT_PML4_MASK];
[92186]173 EPTPML4E Pml4e;
[96737]174 pSlatWalk->Pml4e.u = Pml4e.u = pPml4e->u;
[92186]175
[96737]176 if (SLAT_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ }
[92472]177 else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, Pml4e.u, 4);
[92186]178
[96737]179 if (RT_LIKELY( SLAT_IS_PML4E_VALID(pVCpu, Pml4e)
[93459]180 && PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(pVCpu, Pml4e.u)))
181 { /* likely */ }
182 else return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 4);
[92186]183
[96737]184 uint64_t const fEptAttrs = Pml4e.u & EPT_PML4E_ATTR_MASK;
185 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
186 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
187 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
188 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
189 uint64_t const fEptAndBits = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & fEptAndMask;
[94982]190 fEffective = RT_BF_MAKE(PGM_PTATTRS_R, fRead)
191 | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
192 | RT_BF_MAKE(PGM_PTATTRS_NX, !fExecute)
193 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
[96737]194 | fEptAndBits;
[92426]195 pWalk->fEffective = fEffective;
[92186]196
[96737]197 int const rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & EPT_PML4E_PG_MASK, &pSlatWalk->pPdpt);
[92186]198 if (RT_SUCCESS(rc)) { /* probable */ }
199 else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
200 }
201 {
[92480]202 /*
203 * PDPTE.
204 */
[92186]205 PEPTPDPTE pPdpte;
[96737]206 pSlatWalk->pPdpte = pPdpte = &pSlatWalk->pPdpt->a[(GCPhysNested >> SLAT_PDPT_SHIFT) & SLAT_PDPT_MASK];
[92186]207 EPTPDPTE Pdpte;
[96737]208 pSlatWalk->Pdpte.u = Pdpte.u = pPdpte->u;
[92186]209
[96737]210 if (SLAT_IS_PGENTRY_PRESENT(pVCpu, Pdpte)) { /* probable */ }
[92472]211 else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, Pdpte.u, 3);
[92186]212
[93459]213 /* The order of the following "if" and "else if" statements matter. */
[96737]214 if ( SLAT_IS_PDPE_VALID(pVCpu, Pdpte)
[93459]215 && PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(pVCpu, Pdpte.u))
[92186]216 {
[96737]217 uint64_t const fEptAttrs = Pdpte.u & EPT_PDPTE_ATTR_MASK;
218 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
219 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
220 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
221 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
222 uint64_t const fEptAndBits = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & fEptAndMask;
[94982]223 fEffective &= RT_BF_MAKE(PGM_PTATTRS_R, fRead)
224 | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
225 | RT_BF_MAKE(PGM_PTATTRS_NX, !fExecute)
226 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
[96737]227 | fEptAndBits;
[92426]228 pWalk->fEffective = fEffective;
[92186]229 }
[96737]230 else if ( SLAT_IS_BIG_PDPE_VALID(pVCpu, Pdpte)
[93459]231 && PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(pVCpu, Pdpte.u)
[92480]232 && PGM_GST_SLAT_NAME_EPT(WalkIsMemTypeValid)(Pdpte.u, 3))
[92186]233 {
[96737]234 uint64_t const fEptAttrs = Pdpte.u & EPT_PDPTE1G_ATTR_MASK;
235 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
236 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
237 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
238 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
239 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY);
240 uint8_t const fMemType = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_MEMTYPE);
241 uint64_t const fEptAndBits = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & fEptAndMask;
[92333]242 fEffective &= RT_BF_MAKE(PGM_PTATTRS_R, fRead)
243 | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
[94982]244 | RT_BF_MAKE(PGM_PTATTRS_NX, !fExecute)
[92333]245 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
[96737]246 | fEptAndBits;
[92311]247 fEffective |= RT_BF_MAKE(PGM_PTATTRS_D, fDirty)
[92333]248 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType);
[92426]249 pWalk->fEffective = fEffective;
[92311]250
[96737]251 pWalk->fGigantPage = true;
252 pWalk->fSucceeded = true;
253 pWalk->GCPhys = SLAT_GET_PDPE1G_GCPHYS(pVCpu, Pdpte)
254 | (GCPhysNested & SLAT_PAGE_1G_OFFSET_MASK);
[92426]255 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys);
[92186]256 return VINF_SUCCESS;
257 }
[93459]258 else return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 3);
[92480]259
[96737]260 int const rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pdpte.u & EPT_PDPTE_PG_MASK, &pSlatWalk->pPd);
[92480]261 if (RT_SUCCESS(rc)) { /* probable */ }
262 else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
[92186]263 }
264 {
[92480]265 /*
266 * PDE.
267 */
[96737]268 PSLATPDE pPde;
269 pSlatWalk->pPde = pPde = &pSlatWalk->pPd->a[(GCPhysNested >> SLAT_PD_SHIFT) & SLAT_PD_MASK];
270 SLATPDE Pde;
271 pSlatWalk->Pde.u = Pde.u = pPde->u;
[92426]272
[96737]273 if (SLAT_IS_PGENTRY_PRESENT(pVCpu, Pde)) { /* probable */ }
[92472]274 else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, Pde.u, 2);
[92426]275
[93459]276 /* The order of the following "if" and "else if" statements matter. */
[96737]277 if ( SLAT_IS_PDE_VALID(pVCpu, Pde)
[93459]278 && PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(pVCpu, Pde.u))
[92186]279 {
[96737]280 uint64_t const fEptAttrs = Pde.u & EPT_PDE_ATTR_MASK;
281 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
282 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
283 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
284 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
285 uint64_t const fEptAndBits = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & fEptAndMask;
[94982]286 fEffective &= RT_BF_MAKE(PGM_PTATTRS_R, fRead)
287 | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
288 | RT_BF_MAKE(PGM_PTATTRS_NX, !fExecute)
289 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
[96737]290 | fEptAndBits;
[92480]291 pWalk->fEffective = fEffective;
292 }
[96737]293 else if ( SLAT_IS_BIG_PDE_VALID(pVCpu, Pde)
[93459]294 && PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(pVCpu, Pde.u)
[92480]295 && PGM_GST_SLAT_NAME_EPT(WalkIsMemTypeValid)(Pde.u, 2))
296 {
[96737]297 uint64_t const fEptAttrs = Pde.u & EPT_PDE2M_ATTR_MASK;
298 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
299 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
300 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
301 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
302 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY);
303 uint8_t const fMemType = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_MEMTYPE);
304 uint64_t const fEptAndBits = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & fEptAndMask;
[92333]305 fEffective &= RT_BF_MAKE(PGM_PTATTRS_R, fRead)
306 | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
[94982]307 | RT_BF_MAKE(PGM_PTATTRS_NX, !fExecute)
[92333]308 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
[96737]309 | fEptAndBits;
[92311]310 fEffective |= RT_BF_MAKE(PGM_PTATTRS_D, fDirty)
[92333]311 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType);
[92426]312 pWalk->fEffective = fEffective;
[92333]313
[96737]314 pWalk->fBigPage = true;
315 pWalk->fSucceeded = true;
316 pWalk->GCPhys = SLAT_GET_PDE2M_GCPHYS(pVCpu, Pde)
317 | (GCPhysNested & SLAT_PAGE_2M_OFFSET_MASK);
[92426]318 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys);
[92186]319 return VINF_SUCCESS;
320 }
[93459]321 else return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 2);
[92186]322
[96737]323 int const rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pde.u & EPT_PDE_PG_MASK, &pSlatWalk->pPt);
[92186]324 if (RT_SUCCESS(rc)) { /* probable */ }
325 else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc);
326 }
327 {
[92480]328 /*
329 * PTE.
330 */
[96737]331 PSLATPTE pPte;
332 pSlatWalk->pPte = pPte = &pSlatWalk->pPt->a[(GCPhysNested >> SLAT_PT_SHIFT) & SLAT_PT_MASK];
333 SLATPTE Pte;
334 pSlatWalk->Pte.u = Pte.u = pPte->u;
[92186]335
[96737]336 if (SLAT_IS_PGENTRY_PRESENT(pVCpu, Pte)) { /* probable */ }
[92472]337 else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, Pte.u, 1);
[92186]338
[96737]339 if ( SLAT_IS_PTE_VALID(pVCpu, Pte)
[93459]340 && PGM_GST_SLAT_NAME_EPT(WalkIsPermValid)(pVCpu, Pte.u)
[92480]341 && PGM_GST_SLAT_NAME_EPT(WalkIsMemTypeValid)(Pte.u, 1))
[93459]342 { /* likely*/ }
[92480]343 else
[93459]344 return PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(pVCpu, pWalk, 1);
[92186]345
[96737]346 uint64_t const fEptAttrs = Pte.u & EPT_PTE_ATTR_MASK;
347 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
348 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
349 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
350 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
351 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY);
352 uint8_t const fMemType = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_MEMTYPE);
353 uint64_t const fEptAndBits = (fEptAttrs << PGM_PTATTRS_EPT_SHIFT) & fEptAndMask;
[92333]354 fEffective &= RT_BF_MAKE(PGM_PTATTRS_R, fRead)
355 | RT_BF_MAKE(PGM_PTATTRS_W, fWrite)
[94982]356 | RT_BF_MAKE(PGM_PTATTRS_NX, !fExecute)
[92333]357 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed)
[96737]358 | fEptAndBits;
[92311]359 fEffective |= RT_BF_MAKE(PGM_PTATTRS_D, fDirty)
[92333]360 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType);
[92426]361 pWalk->fEffective = fEffective;
[92311]362
[92426]363 pWalk->fSucceeded = true;
[96737]364 pWalk->GCPhys = SLAT_GET_PTE_GCPHYS(pVCpu, Pte) | (GCPhysNested & GUEST_PAGE_OFFSET_MASK);
[92186]365 return VINF_SUCCESS;
366 }
367}
368
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use