[23] | 1 | /* $Id: PGMAllShw.h 96407 2022-08-22 17:43:14Z vboxsync $ */
|
---|
[1] | 2 | /** @file
|
---|
| 3 | * VBox - Page Manager, Shadow Paging Template - All context code.
|
---|
| 4 | */
|
---|
| 5 |
|
---|
| 6 | /*
|
---|
[96407] | 7 | * Copyright (C) 2006-2022 Oracle and/or its affiliates.
|
---|
[1] | 8 | *
|
---|
[96407] | 9 | * This file is part of VirtualBox base platform packages, as
|
---|
| 10 | * available from https://www.virtualbox.org.
|
---|
| 11 | *
|
---|
| 12 | * This program is free software; you can redistribute it and/or
|
---|
| 13 | * modify it under the terms of the GNU General Public License
|
---|
| 14 | * as published by the Free Software Foundation, in version 3 of the
|
---|
| 15 | * License.
|
---|
| 16 | *
|
---|
| 17 | * This program is distributed in the hope that it will be useful, but
|
---|
| 18 | * WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
| 19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
| 20 | * General Public License for more details.
|
---|
| 21 | *
|
---|
| 22 | * You should have received a copy of the GNU General Public License
|
---|
| 23 | * along with this program; if not, see <https://www.gnu.org/licenses>.
|
---|
| 24 | *
|
---|
| 25 | * SPDX-License-Identifier: GPL-3.0-only
|
---|
[1] | 26 | */
|
---|
| 27 |
|
---|
[73273] | 28 |
|
---|
[73272] | 29 | /*********************************************************************************************************************************
|
---|
| 30 | * Defined Constants And Macros *
|
---|
| 31 | *********************************************************************************************************************************/
|
---|
[86476] | 32 | #undef SHWUINT
|
---|
[1] | 33 | #undef SHWPT
|
---|
| 34 | #undef PSHWPT
|
---|
| 35 | #undef SHWPTE
|
---|
| 36 | #undef PSHWPTE
|
---|
| 37 | #undef SHWPD
|
---|
| 38 | #undef PSHWPD
|
---|
| 39 | #undef SHWPDE
|
---|
| 40 | #undef PSHWPDE
|
---|
| 41 | #undef SHW_PDE_PG_MASK
|
---|
| 42 | #undef SHW_PD_SHIFT
|
---|
| 43 | #undef SHW_PD_MASK
|
---|
[86453] | 44 | #undef SHW_PDE_ATOMIC_SET
|
---|
| 45 | #undef SHW_PDE_ATOMIC_SET2
|
---|
[86464] | 46 | #undef SHW_PDE_IS_P
|
---|
[86476] | 47 | #undef SHW_PDE_IS_A
|
---|
[86463] | 48 | #undef SHW_PDE_IS_BIG
|
---|
[1] | 49 | #undef SHW_PTE_PG_MASK
|
---|
[31775] | 50 | #undef SHW_PTE_IS_P
|
---|
| 51 | #undef SHW_PTE_IS_RW
|
---|
| 52 | #undef SHW_PTE_IS_US
|
---|
| 53 | #undef SHW_PTE_IS_A
|
---|
| 54 | #undef SHW_PTE_IS_D
|
---|
| 55 | #undef SHW_PTE_IS_P_RW
|
---|
| 56 | #undef SHW_PTE_IS_TRACK_DIRTY
|
---|
| 57 | #undef SHW_PTE_GET_HCPHYS
|
---|
| 58 | #undef SHW_PTE_GET_U
|
---|
| 59 | #undef SHW_PTE_LOG64
|
---|
| 60 | #undef SHW_PTE_SET
|
---|
| 61 | #undef SHW_PTE_ATOMIC_SET
|
---|
| 62 | #undef SHW_PTE_ATOMIC_SET2
|
---|
| 63 | #undef SHW_PTE_SET_RO
|
---|
| 64 | #undef SHW_PTE_SET_RW
|
---|
[1] | 65 | #undef SHW_PT_SHIFT
|
---|
| 66 | #undef SHW_PT_MASK
|
---|
| 67 | #undef SHW_TOTAL_PD_ENTRIES
|
---|
[7715] | 68 | #undef SHW_PDPT_SHIFT
|
---|
| 69 | #undef SHW_PDPT_MASK
|
---|
[9858] | 70 | #undef SHW_PDPE_PG_MASK
|
---|
[1] | 71 |
|
---|
[73246] | 72 | #if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_NESTED_32BIT
|
---|
[86476] | 73 | # define SHWUINT uint32_t
|
---|
[31775] | 74 | # define SHWPT X86PT
|
---|
| 75 | # define PSHWPT PX86PT
|
---|
| 76 | # define SHWPTE X86PTE
|
---|
| 77 | # define PSHWPTE PX86PTE
|
---|
| 78 | # define SHWPD X86PD
|
---|
| 79 | # define PSHWPD PX86PD
|
---|
| 80 | # define SHWPDE X86PDE
|
---|
| 81 | # define PSHWPDE PX86PDE
|
---|
| 82 | # define SHW_PDE_PG_MASK X86_PDE_PG_MASK
|
---|
| 83 | # define SHW_PD_SHIFT X86_PD_SHIFT
|
---|
| 84 | # define SHW_PD_MASK X86_PD_MASK
|
---|
| 85 | # define SHW_TOTAL_PD_ENTRIES X86_PG_ENTRIES
|
---|
[86488] | 86 | # define SHW_PDE_IS_P(Pde) ( (Pde).u & X86_PDE_P )
|
---|
| 87 | # define SHW_PDE_IS_A(Pde) ( (Pde).u & X86_PDE_A )
|
---|
| 88 | # define SHW_PDE_IS_BIG(Pde) ( (Pde).u & X86_PDE_PS )
|
---|
[86453] | 89 | # define SHW_PDE_ATOMIC_SET(Pde, uNew) do { ASMAtomicWriteU32(&(Pde).u, (uNew)); } while (0)
|
---|
| 90 | # define SHW_PDE_ATOMIC_SET2(Pde, Pde2) do { ASMAtomicWriteU32(&(Pde).u, (Pde2).u); } while (0)
|
---|
[31775] | 91 | # define SHW_PTE_PG_MASK X86_PTE_PG_MASK
|
---|
[86489] | 92 | # define SHW_PTE_IS_P(Pte) ( (Pte).u & X86_PTE_P )
|
---|
| 93 | # define SHW_PTE_IS_RW(Pte) ( (Pte).u & X86_PTE_RW )
|
---|
| 94 | # define SHW_PTE_IS_US(Pte) ( (Pte).u & X86_PTE_US )
|
---|
| 95 | # define SHW_PTE_IS_A(Pte) ( (Pte).u & X86_PTE_A )
|
---|
| 96 | # define SHW_PTE_IS_D(Pte) ( (Pte).u & X86_PTE_D )
|
---|
| 97 | # define SHW_PTE_IS_P_RW(Pte) ( ((Pte).u & (X86_PTE_P | X86_PTE_RW)) == (X86_PTE_P | X86_PTE_RW) )
|
---|
[31775] | 98 | # define SHW_PTE_IS_TRACK_DIRTY(Pte) ( !!((Pte).u & PGM_PTFLAGS_TRACK_DIRTY) )
|
---|
| 99 | # define SHW_PTE_GET_HCPHYS(Pte) ( (Pte).u & X86_PTE_PG_MASK )
|
---|
| 100 | # define SHW_PTE_LOG64(Pte) ( (uint64_t)(Pte).u )
|
---|
| 101 | # define SHW_PTE_GET_U(Pte) ( (Pte).u ) /**< Use with care. */
|
---|
| 102 | # define SHW_PTE_SET(Pte, uNew) do { (Pte).u = (uNew); } while (0)
|
---|
| 103 | # define SHW_PTE_ATOMIC_SET(Pte, uNew) do { ASMAtomicWriteU32(&(Pte).u, (uNew)); } while (0)
|
---|
| 104 | # define SHW_PTE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU32(&(Pte).u, (Pte2).u); } while (0)
|
---|
[86489] | 105 | # define SHW_PTE_SET_RO(Pte) do { (Pte).u &= ~(X86PGUINT)X86_PTE_RW; } while (0)
|
---|
| 106 | # define SHW_PTE_SET_RW(Pte) do { (Pte).u |= X86_PTE_RW; } while (0)
|
---|
[31775] | 107 | # define SHW_PT_SHIFT X86_PT_SHIFT
|
---|
| 108 | # define SHW_PT_MASK X86_PT_MASK
|
---|
[13232] | 109 |
|
---|
[12932] | 110 | #elif PGM_SHW_TYPE == PGM_TYPE_EPT
|
---|
[86476] | 111 | # define SHWUINT uint64_t
|
---|
[31775] | 112 | # define SHWPT EPTPT
|
---|
| 113 | # define PSHWPT PEPTPT
|
---|
| 114 | # define SHWPTE EPTPTE
|
---|
| 115 | # define PSHWPTE PEPTPTE
|
---|
| 116 | # define SHWPD EPTPD
|
---|
| 117 | # define PSHWPD PEPTPD
|
---|
| 118 | # define SHWPDE EPTPDE
|
---|
| 119 | # define PSHWPDE PEPTPDE
|
---|
| 120 | # define SHW_PDE_PG_MASK EPT_PDE_PG_MASK
|
---|
| 121 | # define SHW_PD_SHIFT EPT_PD_SHIFT
|
---|
| 122 | # define SHW_PD_MASK EPT_PD_MASK
|
---|
[86464] | 123 | # define SHW_PDE_IS_P(Pde) ( (Pde).u & EPT_E_READ /* always set*/ )
|
---|
[86476] | 124 | # define SHW_PDE_IS_A(Pde) ( 1 ) /* We don't use EPT_E_ACCESSED, use with care! */
|
---|
[86463] | 125 | # define SHW_PDE_IS_BIG(Pde) ( (Pde).u & EPT_E_LEAF )
|
---|
[86453] | 126 | # define SHW_PDE_ATOMIC_SET(Pde, uNew) do { ASMAtomicWriteU64(&(Pde).u, (uNew)); } while (0)
|
---|
| 127 | # define SHW_PDE_ATOMIC_SET2(Pde, Pde2) do { ASMAtomicWriteU64(&(Pde).u, (Pde2).u); } while (0)
|
---|
[31775] | 128 | # define SHW_PTE_PG_MASK EPT_PTE_PG_MASK
|
---|
[86463] | 129 | # define SHW_PTE_IS_P(Pte) ( (Pte).u & EPT_E_READ ) /* Approximation, works for us. */
|
---|
| 130 | # define SHW_PTE_IS_RW(Pte) ( (Pte).u & EPT_E_WRITE )
|
---|
[31775] | 131 | # define SHW_PTE_IS_US(Pte) ( true )
|
---|
| 132 | # define SHW_PTE_IS_A(Pte) ( true )
|
---|
| 133 | # define SHW_PTE_IS_D(Pte) ( true )
|
---|
[86463] | 134 | # define SHW_PTE_IS_P_RW(Pte) ( ((Pte).u & (EPT_E_READ | EPT_E_WRITE)) == (EPT_E_READ | EPT_E_WRITE) )
|
---|
[31775] | 135 | # define SHW_PTE_IS_TRACK_DIRTY(Pte) ( false )
|
---|
[86463] | 136 | # define SHW_PTE_GET_HCPHYS(Pte) ( (Pte).u & EPT_PTE_PG_MASK )
|
---|
[31775] | 137 | # define SHW_PTE_LOG64(Pte) ( (Pte).u )
|
---|
| 138 | # define SHW_PTE_GET_U(Pte) ( (Pte).u ) /**< Use with care. */
|
---|
| 139 | # define SHW_PTE_SET(Pte, uNew) do { (Pte).u = (uNew); } while (0)
|
---|
| 140 | # define SHW_PTE_ATOMIC_SET(Pte, uNew) do { ASMAtomicWriteU64(&(Pte).u, (uNew)); } while (0)
|
---|
| 141 | # define SHW_PTE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU64(&(Pte).u, (Pte2).u); } while (0)
|
---|
[86489] | 142 | # define SHW_PTE_SET_RO(Pte) do { (Pte).u &= ~(uint64_t)EPT_E_WRITE; } while (0)
|
---|
[86463] | 143 | # define SHW_PTE_SET_RW(Pte) do { (Pte).u |= EPT_E_WRITE; } while (0)
|
---|
[31775] | 144 | # define SHW_PT_SHIFT EPT_PT_SHIFT
|
---|
| 145 | # define SHW_PT_MASK EPT_PT_MASK
|
---|
| 146 | # define SHW_PDPT_SHIFT EPT_PDPT_SHIFT
|
---|
| 147 | # define SHW_PDPT_MASK EPT_PDPT_MASK
|
---|
| 148 | # define SHW_PDPE_PG_MASK EPT_PDPE_PG_MASK
|
---|
[86463] | 149 | # define SHW_TOTAL_PD_ENTRIES (EPT_PG_AMD64_ENTRIES * EPT_PG_AMD64_PDPE_ENTRIES)
|
---|
[13232] | 150 |
|
---|
[1] | 151 | #else
|
---|
[86476] | 152 | # define SHWUINT uint64_t
|
---|
[31775] | 153 | # define SHWPT PGMSHWPTPAE
|
---|
| 154 | # define PSHWPT PPGMSHWPTPAE
|
---|
| 155 | # define SHWPTE PGMSHWPTEPAE
|
---|
| 156 | # define PSHWPTE PPGMSHWPTEPAE
|
---|
| 157 | # define SHWPD X86PDPAE
|
---|
| 158 | # define PSHWPD PX86PDPAE
|
---|
| 159 | # define SHWPDE X86PDEPAE
|
---|
| 160 | # define PSHWPDE PX86PDEPAE
|
---|
[32034] | 161 | # define SHW_PDE_PG_MASK X86_PDE_PAE_PG_MASK
|
---|
[31775] | 162 | # define SHW_PD_SHIFT X86_PD_PAE_SHIFT
|
---|
| 163 | # define SHW_PD_MASK X86_PD_PAE_MASK
|
---|
[86464] | 164 | # define SHW_PDE_IS_P(Pde) ( (Pde).u & X86_PDE_P )
|
---|
[86476] | 165 | # define SHW_PDE_IS_A(Pde) ( (Pde).u & X86_PDE_A )
|
---|
[86463] | 166 | # define SHW_PDE_IS_BIG(Pde) ( (Pde).u & X86_PDE_PS )
|
---|
[86453] | 167 | # define SHW_PDE_ATOMIC_SET(Pde, uNew) do { ASMAtomicWriteU64(&(Pde).u, (uNew)); } while (0)
|
---|
| 168 | # define SHW_PDE_ATOMIC_SET2(Pde, Pde2) do { ASMAtomicWriteU64(&(Pde).u, (Pde2).u); } while (0)
|
---|
[32036] | 169 | # define SHW_PTE_PG_MASK X86_PTE_PAE_PG_MASK
|
---|
[31775] | 170 | # define SHW_PTE_IS_P(Pte) PGMSHWPTEPAE_IS_P(Pte)
|
---|
| 171 | # define SHW_PTE_IS_RW(Pte) PGMSHWPTEPAE_IS_RW(Pte)
|
---|
| 172 | # define SHW_PTE_IS_US(Pte) PGMSHWPTEPAE_IS_US(Pte)
|
---|
| 173 | # define SHW_PTE_IS_A(Pte) PGMSHWPTEPAE_IS_A(Pte)
|
---|
| 174 | # define SHW_PTE_IS_D(Pte) PGMSHWPTEPAE_IS_D(Pte)
|
---|
| 175 | # define SHW_PTE_IS_P_RW(Pte) PGMSHWPTEPAE_IS_P_RW(Pte)
|
---|
| 176 | # define SHW_PTE_IS_TRACK_DIRTY(Pte) PGMSHWPTEPAE_IS_TRACK_DIRTY(Pte)
|
---|
| 177 | # define SHW_PTE_GET_HCPHYS(Pte) PGMSHWPTEPAE_GET_HCPHYS(Pte)
|
---|
| 178 | # define SHW_PTE_LOG64(Pte) PGMSHWPTEPAE_GET_LOG(Pte)
|
---|
| 179 | # define SHW_PTE_GET_U(Pte) PGMSHWPTEPAE_GET_U(Pte) /**< Use with care. */
|
---|
| 180 | # define SHW_PTE_SET(Pte, uNew) PGMSHWPTEPAE_SET(Pte, uNew)
|
---|
| 181 | # define SHW_PTE_ATOMIC_SET(Pte, uNew) PGMSHWPTEPAE_ATOMIC_SET(Pte, uNew)
|
---|
| 182 | # define SHW_PTE_ATOMIC_SET2(Pte, Pte2) PGMSHWPTEPAE_ATOMIC_SET2(Pte, Pte2)
|
---|
| 183 | # define SHW_PTE_SET_RO(Pte) PGMSHWPTEPAE_SET_RO(Pte)
|
---|
| 184 | # define SHW_PTE_SET_RW(Pte) PGMSHWPTEPAE_SET_RW(Pte)
|
---|
| 185 | # define SHW_PT_SHIFT X86_PT_PAE_SHIFT
|
---|
| 186 | # define SHW_PT_MASK X86_PT_PAE_MASK
|
---|
[16317] | 187 |
|
---|
[73324] | 188 | # if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64 || /* whatever: */ PGM_SHW_TYPE == PGM_TYPE_NONE
|
---|
[31775] | 189 | # define SHW_PDPT_SHIFT X86_PDPT_SHIFT
|
---|
| 190 | # define SHW_PDPT_MASK X86_PDPT_MASK_AMD64
|
---|
[32009] | 191 | # define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
|
---|
[31775] | 192 | # define SHW_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES)
|
---|
[16317] | 193 |
|
---|
[73246] | 194 | # elif PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE
|
---|
[31775] | 195 | # define SHW_PDPT_SHIFT X86_PDPT_SHIFT
|
---|
| 196 | # define SHW_PDPT_MASK X86_PDPT_MASK_PAE
|
---|
[32009] | 197 | # define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
|
---|
[31775] | 198 | # define SHW_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES)
|
---|
[16317] | 199 |
|
---|
[73246] | 200 | # else
|
---|
| 201 | # error "Misconfigured PGM_SHW_TYPE or something..."
|
---|
[13232] | 202 | # endif
|
---|
[1] | 203 | #endif
|
---|
| 204 |
|
---|
[73324] | 205 | #if PGM_SHW_TYPE == PGM_TYPE_NONE && PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
|
---|
| 206 | # error "PGM_TYPE_IS_NESTED_OR_EPT is true for PGM_TYPE_NONE!"
|
---|
| 207 | #endif
|
---|
[1] | 208 |
|
---|
| 209 |
|
---|
[73324] | 210 |
|
---|
[73272] | 211 | /*********************************************************************************************************************************
|
---|
| 212 | * Internal Functions *
|
---|
| 213 | *********************************************************************************************************************************/
|
---|
[20374] | 214 | RT_C_DECLS_BEGIN
|
---|
[80268] | 215 | PGM_SHW_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys);
|
---|
| 216 | PGM_SHW_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags);
|
---|
| 217 | PGM_SHW_DECL(int, Enter)(PVMCPUCC pVCpu, bool fIs64BitsPagingMode);
|
---|
| 218 | PGM_SHW_DECL(int, Exit)(PVMCPUCC pVCpu);
|
---|
[73261] | 219 | #ifdef IN_RING3
|
---|
[80268] | 220 | PGM_SHW_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta);
|
---|
[73246] | 221 | #endif
|
---|
[20374] | 222 | RT_C_DECLS_END
|
---|
[1] | 223 |
|
---|
| 224 |
|
---|
[73261] | 225 | /**
|
---|
| 226 | * Enters the shadow mode.
|
---|
| 227 | *
|
---|
| 228 | * @returns VBox status code.
|
---|
| 229 | * @param pVCpu The cross context virtual CPU structure.
|
---|
| 230 | * @param fIs64BitsPagingMode New shadow paging mode is for 64 bits? (only relevant for 64 bits guests on a 32 bits AMD-V nested paging host)
|
---|
| 231 | */
|
---|
[80268] | 232 | PGM_SHW_DECL(int, Enter)(PVMCPUCC pVCpu, bool fIs64BitsPagingMode)
|
---|
[73261] | 233 | {
|
---|
| 234 | #if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
|
---|
[1] | 235 |
|
---|
[73261] | 236 | # if PGM_TYPE_IS_NESTED(PGM_SHW_TYPE) && HC_ARCH_BITS == 32
|
---|
| 237 | /* Must distinguish between 32 and 64 bits guest paging modes as we'll use
|
---|
| 238 | a different shadow paging root/mode in both cases. */
|
---|
| 239 | RTGCPHYS GCPhysCR3 = (fIs64BitsPagingMode) ? RT_BIT_64(63) : RT_BIT_64(62);
|
---|
| 240 | # else
|
---|
| 241 | RTGCPHYS GCPhysCR3 = RT_BIT_64(63); NOREF(fIs64BitsPagingMode);
|
---|
| 242 | # endif
|
---|
| 243 | PPGMPOOLPAGE pNewShwPageCR3;
|
---|
[80281] | 244 | PVMCC pVM = pVCpu->CTX_SUFF(pVM);
|
---|
[73261] | 245 |
|
---|
[93905] | 246 | Assert(HMIsNestedPagingActive(pVM));
|
---|
[73261] | 247 | Assert(pVM->pgm.s.fNestedPaging);
|
---|
| 248 | Assert(!pVCpu->pgm.s.pShwPageCR3R3);
|
---|
| 249 |
|
---|
[90439] | 250 | PGM_LOCK_VOID(pVM);
|
---|
[73261] | 251 |
|
---|
| 252 | int rc = pgmPoolAlloc(pVM, GCPhysCR3, PGMPOOLKIND_ROOT_NESTED, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
|
---|
| 253 | NIL_PGMPOOL_IDX, UINT32_MAX, true /*fLockPage*/,
|
---|
| 254 | &pNewShwPageCR3);
|
---|
[90439] | 255 | AssertLogRelRCReturnStmt(rc, PGM_UNLOCK(pVM), rc);
|
---|
[73261] | 256 |
|
---|
[93617] | 257 | pVCpu->pgm.s.pShwPageCR3R3 = pgmPoolConvertPageToR3(pVM->pgm.s.CTX_SUFF(pPool), pNewShwPageCR3);
|
---|
| 258 | pVCpu->pgm.s.pShwPageCR3R0 = pgmPoolConvertPageToR0(pVM->pgm.s.CTX_SUFF(pPool), pNewShwPageCR3);
|
---|
[73261] | 259 |
|
---|
[90439] | 260 | PGM_UNLOCK(pVM);
|
---|
[73261] | 261 |
|
---|
| 262 | Log(("Enter nested shadow paging mode: root %RHv phys %RHp\n", pVCpu->pgm.s.pShwPageCR3R3, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key));
|
---|
| 263 | #else
|
---|
| 264 | NOREF(pVCpu); NOREF(fIs64BitsPagingMode);
|
---|
| 265 | #endif
|
---|
| 266 | return VINF_SUCCESS;
|
---|
| 267 | }
|
---|
| 268 |
|
---|
| 269 |
|
---|
[1] | 270 | /**
|
---|
[73261] | 271 | * Exits the shadow mode.
|
---|
| 272 | *
|
---|
| 273 | * @returns VBox status code.
|
---|
| 274 | * @param pVCpu The cross context virtual CPU structure.
|
---|
| 275 | */
|
---|
[80268] | 276 | PGM_SHW_DECL(int, Exit)(PVMCPUCC pVCpu)
|
---|
[73261] | 277 | {
|
---|
| 278 | #if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
|
---|
[80281] | 279 | PVMCC pVM = pVCpu->CTX_SUFF(pVM);
|
---|
[73261] | 280 | if (pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
|
---|
| 281 | {
|
---|
| 282 | PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
|
---|
| 283 |
|
---|
[90439] | 284 | PGM_LOCK_VOID(pVM);
|
---|
[73261] | 285 |
|
---|
| 286 | /* Do *not* unlock this page as we have two of them floating around in the 32-bit host & 64-bit guest case.
|
---|
| 287 | * We currently assert when you try to free one of them; don't bother to really allow this.
|
---|
| 288 | *
|
---|
| 289 | * Note that this is two nested paging root pages max. This isn't a leak. They are reused.
|
---|
| 290 | */
|
---|
| 291 | /* pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)); */
|
---|
| 292 |
|
---|
| 293 | pgmPoolFreeByPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), NIL_PGMPOOL_IDX, UINT32_MAX);
|
---|
| 294 | pVCpu->pgm.s.pShwPageCR3R3 = 0;
|
---|
| 295 | pVCpu->pgm.s.pShwPageCR3R0 = 0;
|
---|
| 296 |
|
---|
[90439] | 297 | PGM_UNLOCK(pVM);
|
---|
[73261] | 298 |
|
---|
| 299 | Log(("Leave nested shadow paging mode\n"));
|
---|
| 300 | }
|
---|
| 301 | #else
|
---|
| 302 | RT_NOREF_PV(pVCpu);
|
---|
| 303 | #endif
|
---|
| 304 | return VINF_SUCCESS;
|
---|
| 305 | }
|
---|
| 306 |
|
---|
| 307 |
|
---|
| 308 | /**
|
---|
[1] | 309 | * Gets effective page information (from the VMM page directory).
|
---|
| 310 | *
|
---|
[58170] | 311 | * @returns VBox status code.
|
---|
[58123] | 312 | * @param pVCpu The cross context virtual CPU structure.
|
---|
[1] | 313 | * @param GCPtr Guest Context virtual address of the page.
|
---|
| 314 | * @param pfFlags Where to store the flags. These are X86_PTE_*.
|
---|
| 315 | * @param pHCPhys Where to store the HC physical address of the page.
|
---|
| 316 | * This is page aligned.
|
---|
| 317 | * @remark You should use PGMMapGetPage() for pages in a mapping.
|
---|
| 318 | */
|
---|
[80268] | 319 | PGM_SHW_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
|
---|
[1] | 320 | {
|
---|
[73324] | 321 | #if PGM_SHW_TYPE == PGM_TYPE_NONE
|
---|
| 322 | RT_NOREF(pVCpu, GCPtr);
|
---|
| 323 | AssertFailed();
|
---|
| 324 | *pfFlags = 0;
|
---|
| 325 | *pHCPhys = NIL_RTHCPHYS;
|
---|
| 326 | return VERR_PGM_SHW_NONE_IPE;
|
---|
| 327 |
|
---|
| 328 | #else /* PGM_SHW_TYPE != PGM_TYPE_NONE */
|
---|
[90346] | 329 | PVMCC pVM = pVCpu->CTX_SUFF(pVM);
|
---|
[18988] | 330 |
|
---|
[37354] | 331 | PGM_LOCK_ASSERT_OWNER(pVM);
|
---|
[20068] | 332 |
|
---|
[1] | 333 | /*
|
---|
| 334 | * Get the PDE.
|
---|
| 335 | */
|
---|
[73324] | 336 | # if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
|
---|
[1] | 337 | X86PDEPAE Pde;
|
---|
| 338 |
|
---|
[8557] | 339 | /* PML4 */
|
---|
[31167] | 340 | X86PML4E Pml4e = pgmShwGetLongModePML4E(pVCpu, GCPtr);
|
---|
[86466] | 341 | if (!(Pml4e.u & X86_PML4E_P))
|
---|
[8557] | 342 | return VERR_PAGE_TABLE_NOT_PRESENT;
|
---|
[1] | 343 |
|
---|
[8557] | 344 | /* PDPT */
|
---|
[13232] | 345 | PX86PDPT pPDPT;
|
---|
[32000] | 346 | int rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
|
---|
[13816] | 347 | if (RT_FAILURE(rc))
|
---|
[8557] | 348 | return rc;
|
---|
[13232] | 349 | const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
|
---|
| 350 | X86PDPE Pdpe = pPDPT->a[iPDPT];
|
---|
[86466] | 351 | if (!(Pdpe.u & X86_PDPE_P))
|
---|
[8557] | 352 | return VERR_PAGE_TABLE_NOT_PRESENT;
|
---|
[1] | 353 |
|
---|
[8557] | 354 | /* PD */
|
---|
[13232] | 355 | PX86PDPAE pPd;
|
---|
[32009] | 356 | rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
|
---|
[13816] | 357 | if (RT_FAILURE(rc))
|
---|
[8557] | 358 | return rc;
|
---|
[13232] | 359 | const unsigned iPd = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;
|
---|
[8557] | 360 | Pde = pPd->a[iPd];
|
---|
| 361 |
|
---|
| 362 | /* Merge accessed, write, user and no-execute bits into the PDE. */
|
---|
[86466] | 363 | AssertCompile(X86_PML4E_A == X86_PDPE_A && X86_PML4E_A == X86_PDE_A);
|
---|
| 364 | AssertCompile(X86_PML4E_RW == X86_PDPE_RW && X86_PML4E_RW == X86_PDE_RW);
|
---|
| 365 | AssertCompile(X86_PML4E_US == X86_PDPE_US && X86_PML4E_US == X86_PDE_US);
|
---|
| 366 | AssertCompile(X86_PML4E_NX == X86_PDPE_LM_NX && X86_PML4E_NX == X86_PDE_PAE_NX);
|
---|
| 367 | Pde.u &= (Pml4e.u & Pdpe.u) | ~(X86PGPAEUINT)(X86_PML4E_A | X86_PML4E_RW | X86_PML4E_US);
|
---|
| 368 | Pde.u |= (Pml4e.u | Pdpe.u) & X86_PML4E_NX;
|
---|
[8557] | 369 |
|
---|
[73324] | 370 | # elif PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE
|
---|
[31167] | 371 | X86PDEPAE Pde = pgmShwGetPaePDE(pVCpu, GCPtr);
|
---|
[13232] | 372 |
|
---|
[73324] | 373 | # elif PGM_SHW_TYPE == PGM_TYPE_EPT
|
---|
[13203] | 374 | PEPTPD pPDDst;
|
---|
[18992] | 375 | int rc = pgmShwGetEPTPDPtr(pVCpu, GCPtr, NULL, &pPDDst);
|
---|
[86461] | 376 | if (rc == VINF_SUCCESS) /** @todo this function isn't expected to return informational status codes. Check callers / fix. */
|
---|
| 377 | { /* likely */ }
|
---|
| 378 | else
|
---|
[13203] | 379 | {
|
---|
| 380 | AssertRC(rc);
|
---|
| 381 | return rc;
|
---|
| 382 | }
|
---|
| 383 | Assert(pPDDst);
|
---|
[13232] | 384 |
|
---|
[86461] | 385 | const unsigned iPd = ((GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK);
|
---|
| 386 | EPTPDE Pde = pPDDst->a[iPd];
|
---|
| 387 |
|
---|
[73324] | 388 | # elif PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_NESTED_32BIT
|
---|
[31167] | 389 | X86PDE Pde = pgmShwGet32BitPDE(pVCpu, GCPtr);
|
---|
[86461] | 390 |
|
---|
[73324] | 391 | # else
|
---|
| 392 | # error "Misconfigured PGM_SHW_TYPE or something..."
|
---|
| 393 | # endif
|
---|
[86464] | 394 | if (!SHW_PDE_IS_P(Pde))
|
---|
[1] | 395 | return VERR_PAGE_TABLE_NOT_PRESENT;
|
---|
| 396 |
|
---|
[73246] | 397 | /* Deal with large pages. */
|
---|
[86463] | 398 | if (SHW_PDE_IS_BIG(Pde))
|
---|
[30368] | 399 | {
|
---|
| 400 | /*
|
---|
| 401 | * Store the results.
|
---|
| 402 | * RW and US flags depend on the entire page translation hierarchy - except for
|
---|
| 403 | * legacy PAE which has a simplified PDPE.
|
---|
| 404 | */
|
---|
| 405 | if (pfFlags)
|
---|
| 406 | {
|
---|
| 407 | *pfFlags = (Pde.u & ~SHW_PDE_PG_MASK);
|
---|
[73324] | 408 | # if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE) || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
|
---|
[73246] | 409 | if ( (Pde.u & X86_PTE_PAE_NX)
|
---|
[73324] | 410 | # if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE)
|
---|
[73246] | 411 | && CPUMIsGuestNXEnabled(pVCpu) /** @todo why do we have to check the guest state here? */
|
---|
[73324] | 412 | # endif
|
---|
[73246] | 413 | )
|
---|
[30368] | 414 | *pfFlags |= X86_PTE_PAE_NX;
|
---|
[73324] | 415 | # endif
|
---|
[30368] | 416 | }
|
---|
[8557] | 417 |
|
---|
[30368] | 418 | if (pHCPhys)
|
---|
| 419 | *pHCPhys = (Pde.u & SHW_PDE_PG_MASK) + (GCPtr & (RT_BIT(SHW_PD_SHIFT) - 1) & X86_PAGE_4K_BASE_MASK);
|
---|
[30369] | 420 |
|
---|
| 421 | return VINF_SUCCESS;
|
---|
[30368] | 422 | }
|
---|
| 423 |
|
---|
[1] | 424 | /*
|
---|
| 425 | * Get PT entry.
|
---|
| 426 | */
|
---|
[13232] | 427 | PSHWPT pPT;
|
---|
[91854] | 428 | int rc2 = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT);
|
---|
| 429 | if (RT_FAILURE(rc2))
|
---|
| 430 | return rc2;
|
---|
[13232] | 431 | const unsigned iPt = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK;
|
---|
| 432 | SHWPTE Pte = pPT->a[iPt];
|
---|
[31775] | 433 | if (!SHW_PTE_IS_P(Pte))
|
---|
[1] | 434 | return VERR_PAGE_NOT_PRESENT;
|
---|
| 435 |
|
---|
| 436 | /*
|
---|
| 437 | * Store the results.
|
---|
[8557] | 438 | * RW and US flags depend on the entire page translation hierarchy - except for
|
---|
[1] | 439 | * legacy PAE which has a simplified PDPE.
|
---|
| 440 | */
|
---|
| 441 | if (pfFlags)
|
---|
[8557] | 442 | {
|
---|
[31775] | 443 | *pfFlags = (SHW_PTE_GET_U(Pte) & ~SHW_PTE_PG_MASK)
|
---|
[1] | 444 | & ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US));
|
---|
[73246] | 445 |
|
---|
[73324] | 446 | # if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE) || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
|
---|
[8557] | 447 | /* The NX bit is determined by a bitwise OR between the PT and PD */
|
---|
[73246] | 448 | if ( ((SHW_PTE_GET_U(Pte) | Pde.u) & X86_PTE_PAE_NX)
|
---|
[73324] | 449 | # if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE)
|
---|
[73246] | 450 | && CPUMIsGuestNXEnabled(pVCpu) /** @todo why do we have to check the guest state here? */
|
---|
[73324] | 451 | # endif
|
---|
[73246] | 452 | )
|
---|
[25837] | 453 | *pfFlags |= X86_PTE_PAE_NX;
|
---|
[73324] | 454 | # endif
|
---|
[8557] | 455 | }
|
---|
| 456 |
|
---|
[1] | 457 | if (pHCPhys)
|
---|
[31775] | 458 | *pHCPhys = SHW_PTE_GET_HCPHYS(Pte);
|
---|
[1] | 459 |
|
---|
| 460 | return VINF_SUCCESS;
|
---|
[73324] | 461 | #endif /* PGM_SHW_TYPE != PGM_TYPE_NONE */
|
---|
[1] | 462 | }
|
---|
| 463 |
|
---|
| 464 |
|
---|
| 465 | /**
|
---|
| 466 | * Modify page flags for a range of pages in the shadow context.
|
---|
| 467 | *
|
---|
| 468 | * The existing flags are ANDed with the fMask and ORed with the fFlags.
|
---|
| 469 | *
|
---|
| 470 | * @returns VBox status code.
|
---|
[58123] | 471 | * @param pVCpu The cross context virtual CPU structure.
|
---|
[1] | 472 | * @param GCPtr Virtual address of the first page in the range. Page aligned!
|
---|
| 473 | * @param cb Size (in bytes) of the range to apply the modification to. Page aligned!
|
---|
| 474 | * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
|
---|
| 475 | * @param fMask The AND mask - page flags X86_PTE_*.
|
---|
| 476 | * Be extremely CAREFUL with ~'ing values because they can be 32-bit!
|
---|
[30326] | 477 | * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
|
---|
[1] | 478 | * @remark You must use PGMMapModifyPage() for pages in a mapping.
|
---|
| 479 | */
|
---|
[80268] | 480 | PGM_SHW_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
|
---|
[1] | 481 | {
|
---|
[73324] | 482 | #if PGM_SHW_TYPE == PGM_TYPE_NONE
|
---|
| 483 | RT_NOREF(pVCpu, GCPtr, cb, fFlags, fMask, fOpFlags);
|
---|
| 484 | AssertFailed();
|
---|
| 485 | return VERR_PGM_SHW_NONE_IPE;
|
---|
| 486 |
|
---|
| 487 | #else /* PGM_SHW_TYPE != PGM_TYPE_NONE */
|
---|
[80268] | 488 | PVMCC pVM = pVCpu->CTX_SUFF(pVM);
|
---|
[37354] | 489 | PGM_LOCK_ASSERT_OWNER(pVM);
|
---|
| 490 |
|
---|
[1] | 491 | /*
|
---|
| 492 | * Walk page tables and pages till we're done.
|
---|
| 493 | */
|
---|
[73324] | 494 | int rc;
|
---|
[1] | 495 | for (;;)
|
---|
| 496 | {
|
---|
| 497 | /*
|
---|
| 498 | * Get the PDE.
|
---|
| 499 | */
|
---|
[73324] | 500 | # if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
|
---|
[13232] | 501 | X86PDEPAE Pde;
|
---|
[8557] | 502 | /* PML4 */
|
---|
[31167] | 503 | X86PML4E Pml4e = pgmShwGetLongModePML4E(pVCpu, GCPtr);
|
---|
[86466] | 504 | if (!(Pml4e.u & X86_PML4E_P))
|
---|
[8557] | 505 | return VERR_PAGE_TABLE_NOT_PRESENT;
|
---|
[1] | 506 |
|
---|
[8557] | 507 | /* PDPT */
|
---|
[13232] | 508 | PX86PDPT pPDPT;
|
---|
[32000] | 509 | rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
|
---|
[13816] | 510 | if (RT_FAILURE(rc))
|
---|
[8557] | 511 | return rc;
|
---|
[13232] | 512 | const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
|
---|
| 513 | X86PDPE Pdpe = pPDPT->a[iPDPT];
|
---|
[86466] | 514 | if (!(Pdpe.u & X86_PDPE_P))
|
---|
[8557] | 515 | return VERR_PAGE_TABLE_NOT_PRESENT;
|
---|
[1] | 516 |
|
---|
[8557] | 517 | /* PD */
|
---|
[13232] | 518 | PX86PDPAE pPd;
|
---|
[32009] | 519 | rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
|
---|
[13816] | 520 | if (RT_FAILURE(rc))
|
---|
[8557] | 521 | return rc;
|
---|
| 522 | const unsigned iPd = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;
|
---|
| 523 | Pde = pPd->a[iPd];
|
---|
[1] | 524 |
|
---|
[73324] | 525 | # elif PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE
|
---|
[31167] | 526 | X86PDEPAE Pde = pgmShwGetPaePDE(pVCpu, GCPtr);
|
---|
[1] | 527 |
|
---|
[73324] | 528 | # elif PGM_SHW_TYPE == PGM_TYPE_EPT
|
---|
[13203] | 529 | const unsigned iPd = ((GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK);
|
---|
| 530 | PEPTPD pPDDst;
|
---|
| 531 | EPTPDE Pde;
|
---|
| 532 |
|
---|
[18992] | 533 | rc = pgmShwGetEPTPDPtr(pVCpu, GCPtr, NULL, &pPDDst);
|
---|
[13203] | 534 | if (rc != VINF_SUCCESS)
|
---|
| 535 | {
|
---|
| 536 | AssertRC(rc);
|
---|
| 537 | return rc;
|
---|
| 538 | }
|
---|
| 539 | Assert(pPDDst);
|
---|
| 540 | Pde = pPDDst->a[iPd];
|
---|
[13232] | 541 |
|
---|
[73324] | 542 | # else /* PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_NESTED_32BIT */
|
---|
[31167] | 543 | X86PDE Pde = pgmShwGet32BitPDE(pVCpu, GCPtr);
|
---|
[73324] | 544 | # endif
|
---|
[86464] | 545 | if (!SHW_PDE_IS_P(Pde))
|
---|
[1] | 546 | return VERR_PAGE_TABLE_NOT_PRESENT;
|
---|
| 547 |
|
---|
[86463] | 548 | AssertFatal(!SHW_PDE_IS_BIG(Pde));
|
---|
[30364] | 549 |
|
---|
[1] | 550 | /*
|
---|
| 551 | * Map the page table.
|
---|
| 552 | */
|
---|
[13232] | 553 | PSHWPT pPT;
|
---|
[31178] | 554 | rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT);
|
---|
[13816] | 555 | if (RT_FAILURE(rc))
|
---|
[1] | 556 | return rc;
|
---|
| 557 |
|
---|
[13232] | 558 | unsigned iPTE = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK;
|
---|
[11311] | 559 | while (iPTE < RT_ELEMENTS(pPT->a))
|
---|
[1] | 560 | {
|
---|
[31775] | 561 | if (SHW_PTE_IS_P(pPT->a[iPTE]))
|
---|
[1] | 562 | {
|
---|
[30326] | 563 | SHWPTE const OrgPte = pPT->a[iPTE];
|
---|
| 564 | SHWPTE NewPte;
|
---|
[19874] | 565 |
|
---|
[31775] | 566 | SHW_PTE_SET(NewPte, (SHW_PTE_GET_U(OrgPte) & (fMask | SHW_PTE_PG_MASK)) | (fFlags & ~SHW_PTE_PG_MASK));
|
---|
| 567 | if (!SHW_PTE_IS_P(NewPte))
|
---|
[30326] | 568 | {
|
---|
| 569 | /** @todo Some CSAM code path might end up here and upset
|
---|
| 570 | * the page pool. */
|
---|
| 571 | AssertFailed();
|
---|
| 572 | }
|
---|
[31775] | 573 | else if ( SHW_PTE_IS_RW(NewPte)
|
---|
| 574 | && !SHW_PTE_IS_RW(OrgPte)
|
---|
[30326] | 575 | && !(fOpFlags & PGM_MK_PG_IS_MMIO2) )
|
---|
| 576 | {
|
---|
| 577 | /** @todo Optimize \#PF handling by caching data. We can
|
---|
| 578 | * then use this when PGM_MK_PG_IS_WRITE_FAULT is
|
---|
| 579 | * set instead of resolving the guest physical
|
---|
| 580 | * address yet again. */
|
---|
[92426] | 581 | PGMPTWALK GstWalk;
|
---|
| 582 | rc = PGMGstGetPage(pVCpu, GCPtr, &GstWalk);
|
---|
[30326] | 583 | AssertRC(rc);
|
---|
| 584 | if (RT_SUCCESS(rc))
|
---|
| 585 | {
|
---|
[92426] | 586 | Assert((GstWalk.fEffective & X86_PTE_RW) || !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP /* allow netware hack */));
|
---|
| 587 | PPGMPAGE pPage = pgmPhysGetPage(pVM, GstWalk.GCPhys);
|
---|
[30326] | 588 | Assert(pPage);
|
---|
| 589 | if (pPage)
|
---|
| 590 | {
|
---|
[92426] | 591 | rc = pgmPhysPageMakeWritable(pVM, pPage, GstWalk.GCPhys);
|
---|
[30326] | 592 | AssertRCReturn(rc, rc);
|
---|
[92426] | 593 | Log(("%s: pgmPhysPageMakeWritable on %RGv / %RGp %R[pgmpage]\n", __PRETTY_FUNCTION__, GCPtr, GstWalk.GCPhys, pPage));
|
---|
[30326] | 594 | }
|
---|
| 595 | }
|
---|
| 596 | }
|
---|
| 597 |
|
---|
[31775] | 598 | SHW_PTE_ATOMIC_SET2(pPT->a[iPTE], NewPte);
|
---|
[73324] | 599 | # if PGM_SHW_TYPE == PGM_TYPE_EPT
|
---|
[43387] | 600 | HMInvalidatePhysPage(pVM, (RTGCPHYS)GCPtr);
|
---|
[73324] | 601 | # else
|
---|
[22600] | 602 | PGM_INVL_PG_ALL_VCPU(pVM, GCPtr);
|
---|
[73324] | 603 | # endif
|
---|
[1] | 604 | }
|
---|
| 605 |
|
---|
| 606 | /* next page */
|
---|
[93554] | 607 | cb -= HOST_PAGE_SIZE;
|
---|
[1] | 608 | if (!cb)
|
---|
| 609 | return VINF_SUCCESS;
|
---|
[93554] | 610 | GCPtr += HOST_PAGE_SIZE;
|
---|
[1] | 611 | iPTE++;
|
---|
| 612 | }
|
---|
| 613 | }
|
---|
[73324] | 614 | #endif /* PGM_SHW_TYPE != PGM_TYPE_NONE */
|
---|
[1] | 615 | }
|
---|
| 616 |
|
---|
[73268] | 617 |
|
---|
| 618 | #ifdef IN_RING3
|
---|
| 619 | /**
|
---|
| 620 | * Relocate any GC pointers related to shadow mode paging.
|
---|
| 621 | *
|
---|
| 622 | * @returns VBox status code.
|
---|
| 623 | * @param pVCpu The cross context virtual CPU structure.
|
---|
| 624 | * @param offDelta The relocation offset.
|
---|
| 625 | */
|
---|
[80268] | 626 | PGM_SHW_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta)
|
---|
[73268] | 627 | {
|
---|
[73324] | 628 | RT_NOREF(pVCpu, offDelta);
|
---|
[73268] | 629 | return VINF_SUCCESS;
|
---|
| 630 | }
|
---|
| 631 | #endif
|
---|
| 632 |
|
---|