VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllShw.h

Last change on this file was 103720, checked in by vboxsync, 3 months ago

VMM/PGM: Nested VMX: bugref:10607 Fixed getting shadow page when the guest paging mode (SLAT) is EPT.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 31.5 KB
RevLine 
[23]1/* $Id: PGMAllShw.h 103720 2024-03-07 09:36:08Z vboxsync $ */
[1]2/** @file
3 * VBox - Page Manager, Shadow Paging Template - All context code.
4 */
5
6/*
[98103]7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
[1]8 *
[96407]9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
[1]26 */
27
[73273]28
[73272]29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
[86476]32#undef SHWUINT
[1]33#undef SHWPT
34#undef PSHWPT
35#undef SHWPTE
36#undef PSHWPTE
37#undef SHWPD
38#undef PSHWPD
39#undef SHWPDE
40#undef PSHWPDE
41#undef SHW_PDE_PG_MASK
42#undef SHW_PD_SHIFT
43#undef SHW_PD_MASK
[86453]44#undef SHW_PDE_ATOMIC_SET
45#undef SHW_PDE_ATOMIC_SET2
[86464]46#undef SHW_PDE_IS_P
[86476]47#undef SHW_PDE_IS_A
[86463]48#undef SHW_PDE_IS_BIG
[1]49#undef SHW_PTE_PG_MASK
[31775]50#undef SHW_PTE_IS_P
51#undef SHW_PTE_IS_RW
52#undef SHW_PTE_IS_US
53#undef SHW_PTE_IS_A
54#undef SHW_PTE_IS_D
55#undef SHW_PTE_IS_P_RW
56#undef SHW_PTE_IS_TRACK_DIRTY
57#undef SHW_PTE_GET_HCPHYS
58#undef SHW_PTE_GET_U
59#undef SHW_PTE_LOG64
60#undef SHW_PTE_SET
61#undef SHW_PTE_ATOMIC_SET
62#undef SHW_PTE_ATOMIC_SET2
63#undef SHW_PTE_SET_RO
64#undef SHW_PTE_SET_RW
[1]65#undef SHW_PT_SHIFT
66#undef SHW_PT_MASK
67#undef SHW_TOTAL_PD_ENTRIES
[7715]68#undef SHW_PDPT_SHIFT
69#undef SHW_PDPT_MASK
[9858]70#undef SHW_PDPE_PG_MASK
[1]71
[73246]72#if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_NESTED_32BIT
[86476]73# define SHWUINT uint32_t
[31775]74# define SHWPT X86PT
75# define PSHWPT PX86PT
76# define SHWPTE X86PTE
77# define PSHWPTE PX86PTE
78# define SHWPD X86PD
79# define PSHWPD PX86PD
80# define SHWPDE X86PDE
81# define PSHWPDE PX86PDE
82# define SHW_PDE_PG_MASK X86_PDE_PG_MASK
83# define SHW_PD_SHIFT X86_PD_SHIFT
84# define SHW_PD_MASK X86_PD_MASK
85# define SHW_TOTAL_PD_ENTRIES X86_PG_ENTRIES
[86488]86# define SHW_PDE_IS_P(Pde) ( (Pde).u & X86_PDE_P )
87# define SHW_PDE_IS_A(Pde) ( (Pde).u & X86_PDE_A )
88# define SHW_PDE_IS_BIG(Pde) ( (Pde).u & X86_PDE_PS )
[86453]89# define SHW_PDE_ATOMIC_SET(Pde, uNew) do { ASMAtomicWriteU32(&(Pde).u, (uNew)); } while (0)
90# define SHW_PDE_ATOMIC_SET2(Pde, Pde2) do { ASMAtomicWriteU32(&(Pde).u, (Pde2).u); } while (0)
[31775]91# define SHW_PTE_PG_MASK X86_PTE_PG_MASK
[86489]92# define SHW_PTE_IS_P(Pte) ( (Pte).u & X86_PTE_P )
93# define SHW_PTE_IS_RW(Pte) ( (Pte).u & X86_PTE_RW )
94# define SHW_PTE_IS_US(Pte) ( (Pte).u & X86_PTE_US )
95# define SHW_PTE_IS_A(Pte) ( (Pte).u & X86_PTE_A )
96# define SHW_PTE_IS_D(Pte) ( (Pte).u & X86_PTE_D )
97# define SHW_PTE_IS_P_RW(Pte) ( ((Pte).u & (X86_PTE_P | X86_PTE_RW)) == (X86_PTE_P | X86_PTE_RW) )
[31775]98# define SHW_PTE_IS_TRACK_DIRTY(Pte) ( !!((Pte).u & PGM_PTFLAGS_TRACK_DIRTY) )
99# define SHW_PTE_GET_HCPHYS(Pte) ( (Pte).u & X86_PTE_PG_MASK )
100# define SHW_PTE_LOG64(Pte) ( (uint64_t)(Pte).u )
101# define SHW_PTE_GET_U(Pte) ( (Pte).u ) /**< Use with care. */
102# define SHW_PTE_SET(Pte, uNew) do { (Pte).u = (uNew); } while (0)
103# define SHW_PTE_ATOMIC_SET(Pte, uNew) do { ASMAtomicWriteU32(&(Pte).u, (uNew)); } while (0)
104# define SHW_PTE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU32(&(Pte).u, (Pte2).u); } while (0)
[86489]105# define SHW_PTE_SET_RO(Pte) do { (Pte).u &= ~(X86PGUINT)X86_PTE_RW; } while (0)
106# define SHW_PTE_SET_RW(Pte) do { (Pte).u |= X86_PTE_RW; } while (0)
[31775]107# define SHW_PT_SHIFT X86_PT_SHIFT
108# define SHW_PT_MASK X86_PT_MASK
[13232]109
[12932]110#elif PGM_SHW_TYPE == PGM_TYPE_EPT
[86476]111# define SHWUINT uint64_t
[31775]112# define SHWPT EPTPT
113# define PSHWPT PEPTPT
114# define SHWPTE EPTPTE
115# define PSHWPTE PEPTPTE
116# define SHWPD EPTPD
117# define PSHWPD PEPTPD
118# define SHWPDE EPTPDE
119# define PSHWPDE PEPTPDE
120# define SHW_PDE_PG_MASK EPT_PDE_PG_MASK
121# define SHW_PD_SHIFT EPT_PD_SHIFT
122# define SHW_PD_MASK EPT_PD_MASK
[86464]123# define SHW_PDE_IS_P(Pde) ( (Pde).u & EPT_E_READ /* always set*/ )
[86476]124# define SHW_PDE_IS_A(Pde) ( 1 ) /* We don't use EPT_E_ACCESSED, use with care! */
[86463]125# define SHW_PDE_IS_BIG(Pde) ( (Pde).u & EPT_E_LEAF )
[86453]126# define SHW_PDE_ATOMIC_SET(Pde, uNew) do { ASMAtomicWriteU64(&(Pde).u, (uNew)); } while (0)
127# define SHW_PDE_ATOMIC_SET2(Pde, Pde2) do { ASMAtomicWriteU64(&(Pde).u, (Pde2).u); } while (0)
[31775]128# define SHW_PTE_PG_MASK EPT_PTE_PG_MASK
[86463]129# define SHW_PTE_IS_P(Pte) ( (Pte).u & EPT_E_READ ) /* Approximation, works for us. */
130# define SHW_PTE_IS_RW(Pte) ( (Pte).u & EPT_E_WRITE )
[31775]131# define SHW_PTE_IS_US(Pte) ( true )
132# define SHW_PTE_IS_A(Pte) ( true )
133# define SHW_PTE_IS_D(Pte) ( true )
[86463]134# define SHW_PTE_IS_P_RW(Pte) ( ((Pte).u & (EPT_E_READ | EPT_E_WRITE)) == (EPT_E_READ | EPT_E_WRITE) )
[31775]135# define SHW_PTE_IS_TRACK_DIRTY(Pte) ( false )
[86463]136# define SHW_PTE_GET_HCPHYS(Pte) ( (Pte).u & EPT_PTE_PG_MASK )
[31775]137# define SHW_PTE_LOG64(Pte) ( (Pte).u )
138# define SHW_PTE_GET_U(Pte) ( (Pte).u ) /**< Use with care. */
139# define SHW_PTE_SET(Pte, uNew) do { (Pte).u = (uNew); } while (0)
140# define SHW_PTE_ATOMIC_SET(Pte, uNew) do { ASMAtomicWriteU64(&(Pte).u, (uNew)); } while (0)
141# define SHW_PTE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU64(&(Pte).u, (Pte2).u); } while (0)
[86489]142# define SHW_PTE_SET_RO(Pte) do { (Pte).u &= ~(uint64_t)EPT_E_WRITE; } while (0)
[86463]143# define SHW_PTE_SET_RW(Pte) do { (Pte).u |= EPT_E_WRITE; } while (0)
[31775]144# define SHW_PT_SHIFT EPT_PT_SHIFT
145# define SHW_PT_MASK EPT_PT_MASK
146# define SHW_PDPT_SHIFT EPT_PDPT_SHIFT
147# define SHW_PDPT_MASK EPT_PDPT_MASK
148# define SHW_PDPE_PG_MASK EPT_PDPE_PG_MASK
[86463]149# define SHW_TOTAL_PD_ENTRIES (EPT_PG_AMD64_ENTRIES * EPT_PG_AMD64_PDPE_ENTRIES)
[13232]150
[1]151#else
[86476]152# define SHWUINT uint64_t
[31775]153# define SHWPT PGMSHWPTPAE
154# define PSHWPT PPGMSHWPTPAE
155# define SHWPTE PGMSHWPTEPAE
156# define PSHWPTE PPGMSHWPTEPAE
157# define SHWPD X86PDPAE
158# define PSHWPD PX86PDPAE
159# define SHWPDE X86PDEPAE
160# define PSHWPDE PX86PDEPAE
[32034]161# define SHW_PDE_PG_MASK X86_PDE_PAE_PG_MASK
[31775]162# define SHW_PD_SHIFT X86_PD_PAE_SHIFT
163# define SHW_PD_MASK X86_PD_PAE_MASK
[86464]164# define SHW_PDE_IS_P(Pde) ( (Pde).u & X86_PDE_P )
[86476]165# define SHW_PDE_IS_A(Pde) ( (Pde).u & X86_PDE_A )
[86463]166# define SHW_PDE_IS_BIG(Pde) ( (Pde).u & X86_PDE_PS )
[86453]167# define SHW_PDE_ATOMIC_SET(Pde, uNew) do { ASMAtomicWriteU64(&(Pde).u, (uNew)); } while (0)
168# define SHW_PDE_ATOMIC_SET2(Pde, Pde2) do { ASMAtomicWriteU64(&(Pde).u, (Pde2).u); } while (0)
[32036]169# define SHW_PTE_PG_MASK X86_PTE_PAE_PG_MASK
[31775]170# define SHW_PTE_IS_P(Pte) PGMSHWPTEPAE_IS_P(Pte)
171# define SHW_PTE_IS_RW(Pte) PGMSHWPTEPAE_IS_RW(Pte)
172# define SHW_PTE_IS_US(Pte) PGMSHWPTEPAE_IS_US(Pte)
173# define SHW_PTE_IS_A(Pte) PGMSHWPTEPAE_IS_A(Pte)
174# define SHW_PTE_IS_D(Pte) PGMSHWPTEPAE_IS_D(Pte)
175# define SHW_PTE_IS_P_RW(Pte) PGMSHWPTEPAE_IS_P_RW(Pte)
176# define SHW_PTE_IS_TRACK_DIRTY(Pte) PGMSHWPTEPAE_IS_TRACK_DIRTY(Pte)
177# define SHW_PTE_GET_HCPHYS(Pte) PGMSHWPTEPAE_GET_HCPHYS(Pte)
178# define SHW_PTE_LOG64(Pte) PGMSHWPTEPAE_GET_LOG(Pte)
179# define SHW_PTE_GET_U(Pte) PGMSHWPTEPAE_GET_U(Pte) /**< Use with care. */
180# define SHW_PTE_SET(Pte, uNew) PGMSHWPTEPAE_SET(Pte, uNew)
181# define SHW_PTE_ATOMIC_SET(Pte, uNew) PGMSHWPTEPAE_ATOMIC_SET(Pte, uNew)
182# define SHW_PTE_ATOMIC_SET2(Pte, Pte2) PGMSHWPTEPAE_ATOMIC_SET2(Pte, Pte2)
183# define SHW_PTE_SET_RO(Pte) PGMSHWPTEPAE_SET_RO(Pte)
184# define SHW_PTE_SET_RW(Pte) PGMSHWPTEPAE_SET_RW(Pte)
185# define SHW_PT_SHIFT X86_PT_PAE_SHIFT
186# define SHW_PT_MASK X86_PT_PAE_MASK
[16317]187
[73324]188# if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64 || /* whatever: */ PGM_SHW_TYPE == PGM_TYPE_NONE
[31775]189# define SHW_PDPT_SHIFT X86_PDPT_SHIFT
190# define SHW_PDPT_MASK X86_PDPT_MASK_AMD64
[32009]191# define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
[31775]192# define SHW_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES)
[16317]193
[73246]194# elif PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE
[31775]195# define SHW_PDPT_SHIFT X86_PDPT_SHIFT
196# define SHW_PDPT_MASK X86_PDPT_MASK_PAE
[32009]197# define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
[31775]198# define SHW_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES)
[16317]199
[73246]200# else
201# error "Misconfigured PGM_SHW_TYPE or something..."
[13232]202# endif
[1]203#endif
204
[73324]205#if PGM_SHW_TYPE == PGM_TYPE_NONE && PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
206# error "PGM_TYPE_IS_NESTED_OR_EPT is true for PGM_TYPE_NONE!"
207#endif
[1]208
209
[73324]210
[73272]211/*********************************************************************************************************************************
212* Internal Functions *
213*********************************************************************************************************************************/
[20374]214RT_C_DECLS_BEGIN
[80268]215PGM_SHW_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys);
216PGM_SHW_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags);
217PGM_SHW_DECL(int, Exit)(PVMCPUCC pVCpu);
[73261]218#ifdef IN_RING3
[80268]219PGM_SHW_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta);
[73246]220#endif
[20374]221RT_C_DECLS_END
[1]222
223
[73261]224/**
225 * Enters the shadow mode.
226 *
227 * @returns VBox status code.
228 * @param pVCpu The cross context virtual CPU structure.
229 */
[96900]230PGM_SHW_DECL(int, Enter)(PVMCPUCC pVCpu)
[73261]231{
232#if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
[1]233
[96900]234# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
[96879]235 RTGCPHYS GCPhysCR3;
236 PGMPOOLKIND enmKind;
237 if (pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_EPT)
238 {
[96900]239 GCPhysCR3 = RT_BIT_64(63);
[96879]240 enmKind = PGMPOOLKIND_ROOT_NESTED;
241 }
242 else
243 {
244 GCPhysCR3 = pVCpu->pgm.s.uEptPtr & EPT_EPTP_PG_MASK;
245 enmKind = PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4;
246 }
[73261]247# else
[96900]248 RTGCPHYS const GCPhysCR3 = RT_BIT_64(63);
249 PGMPOOLKIND const enmKind = PGMPOOLKIND_ROOT_NESTED;
[73261]250# endif
[96900]251 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
[73261]252
[93905]253 Assert(HMIsNestedPagingActive(pVM));
[73261]254 Assert(pVM->pgm.s.fNestedPaging);
255 Assert(!pVCpu->pgm.s.pShwPageCR3R3);
256
[90439]257 PGM_LOCK_VOID(pVM);
[73261]258
[96900]259 PPGMPOOLPAGE pNewShwPageCR3;
[96879]260 int rc = pgmPoolAlloc(pVM, GCPhysCR3, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
[73261]261 NIL_PGMPOOL_IDX, UINT32_MAX, true /*fLockPage*/,
262 &pNewShwPageCR3);
[90439]263 AssertLogRelRCReturnStmt(rc, PGM_UNLOCK(pVM), rc);
[73261]264
[93617]265 pVCpu->pgm.s.pShwPageCR3R3 = pgmPoolConvertPageToR3(pVM->pgm.s.CTX_SUFF(pPool), pNewShwPageCR3);
266 pVCpu->pgm.s.pShwPageCR3R0 = pgmPoolConvertPageToR0(pVM->pgm.s.CTX_SUFF(pPool), pNewShwPageCR3);
[73261]267
[90439]268 PGM_UNLOCK(pVM);
[73261]269
270 Log(("Enter nested shadow paging mode: root %RHv phys %RHp\n", pVCpu->pgm.s.pShwPageCR3R3, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key));
271#else
[96900]272 NOREF(pVCpu);
[73261]273#endif
274 return VINF_SUCCESS;
275}
276
277
[1]278/**
[73261]279 * Exits the shadow mode.
280 *
281 * @returns VBox status code.
282 * @param pVCpu The cross context virtual CPU structure.
283 */
[80268]284PGM_SHW_DECL(int, Exit)(PVMCPUCC pVCpu)
[73261]285{
286#if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
[80281]287 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
[73261]288 if (pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
289 {
290 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
291
[90439]292 PGM_LOCK_VOID(pVM);
[73261]293
[96879]294# if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) && PGM_SHW_TYPE == PGM_TYPE_EPT
295 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
296 pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
297# endif
298
[73261]299 /* Do *not* unlock this page as we have two of them floating around in the 32-bit host & 64-bit guest case.
300 * We currently assert when you try to free one of them; don't bother to really allow this.
301 *
302 * Note that this is two nested paging root pages max. This isn't a leak. They are reused.
303 */
304 /* pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)); */
305
306 pgmPoolFreeByPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), NIL_PGMPOOL_IDX, UINT32_MAX);
307 pVCpu->pgm.s.pShwPageCR3R3 = 0;
308 pVCpu->pgm.s.pShwPageCR3R0 = 0;
309
[90439]310 PGM_UNLOCK(pVM);
[73261]311
312 Log(("Leave nested shadow paging mode\n"));
313 }
314#else
315 RT_NOREF_PV(pVCpu);
316#endif
317 return VINF_SUCCESS;
318}
319
320
[99132]321#if 0
322PGM_SHW_DECL(int, NestedGetPage)(PVMCPUCC pVCpu, PEPTPD pEptPd, PPGMPTWALK pWalk, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
323{
324#if PGM_SHW_TYPE == PGM_TYPE_EPT
325 RTGCPHYS const GCPhysNested = pWalk->GCPhysNested;
326 unsigned const iEptPd = ((GCPhysNested >> SHW_PD_SHIFT) & SHW_PD_MASK);
327 Assert(iEptPd < EPT_PG_ENTRIES);
328 SHWPDE EptPde = pEptPd->a[iEptPd];
329 if (!SHW_PDE_IS_P(EptPde))
330 {
331 *pfFlags = 0;
332 *pHCPhys = NIL_RTHCPHYS;
333 return VERR_PAGE_TABLE_NOT_PRESENT;
334 }
335
336 if (SHW_PDE_IS_BIG(EptPde))
337 {
338 Assert(pWalk->fBigPage);
339 if (pfFlags)
340 *pfFlags = (EptPde.u & ~SHW_PDE_PG_MASK);
341 if (pHCPhys)
342 *pHCPhys = (EptPde.u & EPT_PDE2M_PG_MASK) + (pWalk->GCPhys & (RT_BIT(EPT_PD_SHIFT) - 1) & X86_PAGE_4K_BASE_MASK);
343 return VINF_SUCCESS;
344 }
345
346 PSHWPT pEptPt;
347 int const rc = PGM_HCPHYS_2_PTR(pVCpu->CTX_SUFF(pVM), pVCpu, EptPde.u & EPT_PDE_PG_MASK, &pEptPt);
348 if (RT_FAILURE(rc))
349 {
350 *pfFlags = 0;
351 *pHCPhys = NIL_RTHCPHYS;
352 return rc;
353 }
354
355 unsigned const iEptPt = (GCPhysNested >> SHW_PT_SHIFT) & SHW_PT_MASK;
356 Assert(iEptPt < EPT_PG_ENTRIES);
357 SHWPTE EptPte = pEptPt->a[iEptPt];
358 if (!SHW_PTE_IS_P(EptPte))
359 {
360 *pfFlags = 0;
361 *pHCPhys = NIL_RTHCPHYS;
362 return VERR_PAGE_NOT_PRESENT;
363 }
364
365 if (pfFlags)
366 {
367 /* Read, Write and Execute bits (Present mask) are cumulative. */
368 *pfFlags = (SHW_PTE_GET_U(EptPte) & ~SHW_PTE_PG_MASK)
369 & ((EptPde.u & EPT_PRESENT_MASK) | ~(uint64_t)EPT_PRESENT_MASK);
370 }
371 if (pHCPhys)
372 *pHCPhys = SHW_PTE_GET_HCPHYS(EptPte);
373 return VINF_SUCCESS;
374
375#else /* PGM_SHW_TYPE != PGM_TYPE_EPT */
376 RT_NOREF(pVCpu, pEptPd, pWalk, *pfFlags, pHCPhys);
377 AssertFailed();
378 return VERR_PGM_SHW_NONE_IPE;
379#endif /* PGM_SHW_TYPE != PGM_TYPE_EPT */
380}
381#endif
382
383
[73261]384/**
[1]385 * Gets effective page information (from the VMM page directory).
386 *
[58170]387 * @returns VBox status code.
[58123]388 * @param pVCpu The cross context virtual CPU structure.
[1]389 * @param GCPtr Guest Context virtual address of the page.
390 * @param pfFlags Where to store the flags. These are X86_PTE_*.
391 * @param pHCPhys Where to store the HC physical address of the page.
392 * This is page aligned.
393 * @remark You should use PGMMapGetPage() for pages in a mapping.
394 */
[80268]395PGM_SHW_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
[1]396{
[73324]397#if PGM_SHW_TYPE == PGM_TYPE_NONE
398 RT_NOREF(pVCpu, GCPtr);
399 AssertFailed();
400 *pfFlags = 0;
401 *pHCPhys = NIL_RTHCPHYS;
402 return VERR_PGM_SHW_NONE_IPE;
403
404#else /* PGM_SHW_TYPE != PGM_TYPE_NONE */
[90346]405 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
[18988]406
[37354]407 PGM_LOCK_ASSERT_OWNER(pVM);
[20068]408
[1]409 /*
410 * Get the PDE.
411 */
[73324]412# if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
[1]413 X86PDEPAE Pde;
414
[8557]415 /* PML4 */
[31167]416 X86PML4E Pml4e = pgmShwGetLongModePML4E(pVCpu, GCPtr);
[86466]417 if (!(Pml4e.u & X86_PML4E_P))
[8557]418 return VERR_PAGE_TABLE_NOT_PRESENT;
[1]419
[8557]420 /* PDPT */
[13232]421 PX86PDPT pPDPT;
[32000]422 int rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
[13816]423 if (RT_FAILURE(rc))
[8557]424 return rc;
[13232]425 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
426 X86PDPE Pdpe = pPDPT->a[iPDPT];
[86466]427 if (!(Pdpe.u & X86_PDPE_P))
[8557]428 return VERR_PAGE_TABLE_NOT_PRESENT;
[1]429
[8557]430 /* PD */
[13232]431 PX86PDPAE pPd;
[32009]432 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
[13816]433 if (RT_FAILURE(rc))
[8557]434 return rc;
[13232]435 const unsigned iPd = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;
[8557]436 Pde = pPd->a[iPd];
437
438 /* Merge accessed, write, user and no-execute bits into the PDE. */
[86466]439 AssertCompile(X86_PML4E_A == X86_PDPE_A && X86_PML4E_A == X86_PDE_A);
440 AssertCompile(X86_PML4E_RW == X86_PDPE_RW && X86_PML4E_RW == X86_PDE_RW);
441 AssertCompile(X86_PML4E_US == X86_PDPE_US && X86_PML4E_US == X86_PDE_US);
442 AssertCompile(X86_PML4E_NX == X86_PDPE_LM_NX && X86_PML4E_NX == X86_PDE_PAE_NX);
443 Pde.u &= (Pml4e.u & Pdpe.u) | ~(X86PGPAEUINT)(X86_PML4E_A | X86_PML4E_RW | X86_PML4E_US);
444 Pde.u |= (Pml4e.u | Pdpe.u) & X86_PML4E_NX;
[8557]445
[73324]446# elif PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE
[31167]447 X86PDEPAE Pde = pgmShwGetPaePDE(pVCpu, GCPtr);
[13232]448
[73324]449# elif PGM_SHW_TYPE == PGM_TYPE_EPT
[103720]450 EPTPDE Pde;
451 const unsigned iPd = ((GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK);
452
453 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_DIRECT)
454 {
455 PEPTPD pPDDst;
456 int rc = pgmShwGetEPTPDPtr(pVCpu, GCPtr, NULL, &pPDDst);
457 if (rc == VINF_SUCCESS) /** @todo this function isn't expected to return informational status codes. Check callers / fix. */
458 { /* likely */ }
459 else
460 {
461 AssertRC(rc);
462 return rc;
463 }
464 Assert(pPDDst);
465 Pde = pPDDst->a[iPd];
466 }
[86461]467 else
[13203]468 {
[103720]469# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
470 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT);
471 Assert(!(GCPtr & GUEST_PAGE_OFFSET_MASK));
472 PGMPTWALK Walk;
473 PGMPTWALKGST GstWalkAll;
474 RTGCPHYS const GCPhysNestedPage = GCPtr;
475 int rc = pgmGstSlatWalk(pVCpu, GCPhysNestedPage, false /*fIsLinearAddrValid*/, 0 /*GCPtrNestedFault*/, &Walk,
476 &GstWalkAll);
477 if (RT_SUCCESS(rc))
478 {
479# ifdef DEBUG_ramshankar
480 /* Paranoia. */
481 Assert(GstWalkAll.enmType == PGMPTWALKGSTTYPE_EPT);
482 Assert(Walk.fSucceeded);
483 Assert(Walk.fEffective & (PGM_PTATTRS_EPT_R_MASK | PGM_PTATTRS_EPT_W_MASK | PGM_PTATTRS_EPT_X_SUPER_MASK));
484 Assert(Walk.fIsSlat);
485 Assert(RT_BOOL(Walk.fEffective & PGM_PTATTRS_R_MASK) == RT_BOOL(Walk.fEffective & PGM_PTATTRS_EPT_R_MASK));
486 Assert(RT_BOOL(Walk.fEffective & PGM_PTATTRS_W_MASK) == RT_BOOL(Walk.fEffective & PGM_PTATTRS_EPT_W_MASK));
487 Assert(RT_BOOL(Walk.fEffective & PGM_PTATTRS_NX_MASK) == !RT_BOOL(Walk.fEffective & PGM_PTATTRS_EPT_X_SUPER_MASK));
488# endif
489 PGM_A20_ASSERT_MASKED(pVCpu, Walk.GCPhys);
490
491 /* Update the nested-guest physical address with the translated guest-physical address. */
492 GCPtr = Walk.GCPhys;
493
494 /* Get the PD. */
495 PSHWPD pEptPd;
496 rc = pgmShwGetNestedEPTPDPtr(pVCpu, GCPhysNestedPage, NULL /*ppPdpt*/, &pEptPd, &GstWalkAll);
497 AssertRCReturn(rc, rc);
498 Assert(pEptPd);
499
500 Assert(iPd < EPT_PG_ENTRIES);
501 Pde = pEptPd->a[iPd];
502 }
503 else
504 {
505 Log(("Failed to translate nested-guest physical address %#RGp rc=%Rrc\n", GCPhysNestedPage, rc));
506 return rc;
507 }
508
509# else /* !VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
510 AssertFailed();
511 return VERR_PGM_SHW_NONE_IPE;
512# endif /* !VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
[13203]513 }
[13232]514
[73324]515# elif PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_NESTED_32BIT
[31167]516 X86PDE Pde = pgmShwGet32BitPDE(pVCpu, GCPtr);
[86461]517
[73324]518# else
519# error "Misconfigured PGM_SHW_TYPE or something..."
520# endif
[86464]521 if (!SHW_PDE_IS_P(Pde))
[1]522 return VERR_PAGE_TABLE_NOT_PRESENT;
523
[73246]524 /* Deal with large pages. */
[86463]525 if (SHW_PDE_IS_BIG(Pde))
[30368]526 {
527 /*
528 * Store the results.
529 * RW and US flags depend on the entire page translation hierarchy - except for
530 * legacy PAE which has a simplified PDPE.
531 */
532 if (pfFlags)
533 {
534 *pfFlags = (Pde.u & ~SHW_PDE_PG_MASK);
[73324]535# if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE) || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
[73246]536 if ( (Pde.u & X86_PTE_PAE_NX)
[73324]537# if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE)
[73246]538 && CPUMIsGuestNXEnabled(pVCpu) /** @todo why do we have to check the guest state here? */
[73324]539# endif
[73246]540 )
[30368]541 *pfFlags |= X86_PTE_PAE_NX;
[73324]542# endif
[30368]543 }
[8557]544
[30368]545 if (pHCPhys)
546 *pHCPhys = (Pde.u & SHW_PDE_PG_MASK) + (GCPtr & (RT_BIT(SHW_PD_SHIFT) - 1) & X86_PAGE_4K_BASE_MASK);
[30369]547
548 return VINF_SUCCESS;
[30368]549 }
550
[1]551 /*
552 * Get PT entry.
553 */
[13232]554 PSHWPT pPT;
[91854]555 int rc2 = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT);
556 if (RT_FAILURE(rc2))
557 return rc2;
[13232]558 const unsigned iPt = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK;
559 SHWPTE Pte = pPT->a[iPt];
[31775]560 if (!SHW_PTE_IS_P(Pte))
[1]561 return VERR_PAGE_NOT_PRESENT;
562
563 /*
564 * Store the results.
[8557]565 * RW and US flags depend on the entire page translation hierarchy - except for
[1]566 * legacy PAE which has a simplified PDPE.
567 */
568 if (pfFlags)
[8557]569 {
[31775]570 *pfFlags = (SHW_PTE_GET_U(Pte) & ~SHW_PTE_PG_MASK)
[1]571 & ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US));
[73246]572
[73324]573# if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE) || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
[8557]574 /* The NX bit is determined by a bitwise OR between the PT and PD */
[73246]575 if ( ((SHW_PTE_GET_U(Pte) | Pde.u) & X86_PTE_PAE_NX)
[73324]576# if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE)
[73246]577 && CPUMIsGuestNXEnabled(pVCpu) /** @todo why do we have to check the guest state here? */
[73324]578# endif
[73246]579 )
[25837]580 *pfFlags |= X86_PTE_PAE_NX;
[73324]581# endif
[8557]582 }
583
[1]584 if (pHCPhys)
[31775]585 *pHCPhys = SHW_PTE_GET_HCPHYS(Pte);
[1]586
587 return VINF_SUCCESS;
[73324]588#endif /* PGM_SHW_TYPE != PGM_TYPE_NONE */
[1]589}
590
591
592/**
593 * Modify page flags for a range of pages in the shadow context.
594 *
595 * The existing flags are ANDed with the fMask and ORed with the fFlags.
596 *
597 * @returns VBox status code.
[58123]598 * @param pVCpu The cross context virtual CPU structure.
[1]599 * @param GCPtr Virtual address of the first page in the range. Page aligned!
600 * @param cb Size (in bytes) of the range to apply the modification to. Page aligned!
601 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
602 * @param fMask The AND mask - page flags X86_PTE_*.
603 * Be extremely CAREFUL with ~'ing values because they can be 32-bit!
[30326]604 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
[1]605 * @remark You must use PGMMapModifyPage() for pages in a mapping.
606 */
[80268]607PGM_SHW_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
[1]608{
[73324]609#if PGM_SHW_TYPE == PGM_TYPE_NONE
610 RT_NOREF(pVCpu, GCPtr, cb, fFlags, fMask, fOpFlags);
611 AssertFailed();
612 return VERR_PGM_SHW_NONE_IPE;
613
614#else /* PGM_SHW_TYPE != PGM_TYPE_NONE */
[80268]615 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
[37354]616 PGM_LOCK_ASSERT_OWNER(pVM);
617
[1]618 /*
619 * Walk page tables and pages till we're done.
620 */
[73324]621 int rc;
[1]622 for (;;)
623 {
624 /*
625 * Get the PDE.
626 */
[73324]627# if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
[13232]628 X86PDEPAE Pde;
[8557]629 /* PML4 */
[31167]630 X86PML4E Pml4e = pgmShwGetLongModePML4E(pVCpu, GCPtr);
[86466]631 if (!(Pml4e.u & X86_PML4E_P))
[8557]632 return VERR_PAGE_TABLE_NOT_PRESENT;
[1]633
[8557]634 /* PDPT */
[13232]635 PX86PDPT pPDPT;
[32000]636 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
[13816]637 if (RT_FAILURE(rc))
[8557]638 return rc;
[13232]639 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
640 X86PDPE Pdpe = pPDPT->a[iPDPT];
[86466]641 if (!(Pdpe.u & X86_PDPE_P))
[8557]642 return VERR_PAGE_TABLE_NOT_PRESENT;
[1]643
[8557]644 /* PD */
[13232]645 PX86PDPAE pPd;
[32009]646 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
[13816]647 if (RT_FAILURE(rc))
[8557]648 return rc;
649 const unsigned iPd = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;
650 Pde = pPd->a[iPd];
[1]651
[73324]652# elif PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE
[31167]653 X86PDEPAE Pde = pgmShwGetPaePDE(pVCpu, GCPtr);
[1]654
[73324]655# elif PGM_SHW_TYPE == PGM_TYPE_EPT
[99132]656 EPTPDE Pde;
657 const unsigned iPd = ((GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK);
658 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_DIRECT)
[13203]659 {
[99132]660 PEPTPD pPDDst;
661 rc = pgmShwGetEPTPDPtr(pVCpu, GCPtr, NULL, &pPDDst);
662 if (rc != VINF_SUCCESS)
663 {
664 AssertRC(rc);
665 return rc;
666 }
667 Assert(pPDDst);
668 Pde = pPDDst->a[iPd];
[13203]669 }
[99132]670 else
671 {
672# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
673 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT);
674 Assert(!(GCPtr & GUEST_PAGE_OFFSET_MASK));
675 PGMPTWALK Walk;
676 PGMPTWALKGST GstWalkAll;
677 RTGCPHYS const GCPhysNestedPage = GCPtr;
678 rc = pgmGstSlatWalk(pVCpu, GCPhysNestedPage, false /*fIsLinearAddrValid*/, 0 /*GCPtrNestedFault*/, &Walk,
679 &GstWalkAll);
680 if (RT_SUCCESS(rc))
681 {
682# ifdef DEBUG_ramshankar
683 /* Paranoia. */
684 Assert(GstWalkAll.enmType == PGMPTWALKGSTTYPE_EPT);
685 Assert(Walk.fSucceeded);
686 Assert(Walk.fEffective & (PGM_PTATTRS_EPT_R_MASK | PGM_PTATTRS_EPT_W_MASK | PGM_PTATTRS_EPT_X_SUPER_MASK));
687 Assert(Walk.fIsSlat);
688 Assert(RT_BOOL(Walk.fEffective & PGM_PTATTRS_R_MASK) == RT_BOOL(Walk.fEffective & PGM_PTATTRS_EPT_R_MASK));
689 Assert(RT_BOOL(Walk.fEffective & PGM_PTATTRS_W_MASK) == RT_BOOL(Walk.fEffective & PGM_PTATTRS_EPT_W_MASK));
690 Assert(RT_BOOL(Walk.fEffective & PGM_PTATTRS_NX_MASK) == !RT_BOOL(Walk.fEffective & PGM_PTATTRS_EPT_X_SUPER_MASK));
691# endif
692 PGM_A20_ASSERT_MASKED(pVCpu, Walk.GCPhys);
693 Assert(!(fFlags & X86_PTE_RW) || (Walk.fEffective & PGM_PTATTRS_W_MASK));
[13232]694
[99132]695 /* Update the nested-guest physical address with the translated guest-physical address. */
696 GCPtr = Walk.GCPhys;
697
698 /* Get the PD. */
699 PSHWPD pEptPd;
700 rc = pgmShwGetNestedEPTPDPtr(pVCpu, GCPhysNestedPage, NULL /*ppPdpt*/, &pEptPd, &GstWalkAll);
701 AssertRCReturn(rc, rc);
702 Assert(pEptPd);
703 Assert(iPd < EPT_PG_ENTRIES);
704 Pde = pEptPd->a[iPd];
705 }
706 else
707 {
708 Log(("Failed to translate nested-guest physical address %#RGp rc=%Rrc\n", GCPhysNestedPage, rc));
709 return rc;
710 }
711
712# else /* !VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
713 AssertFailed();
714 return VERR_PGM_SHW_NONE_IPE;
715# endif /* !VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
716 }
717
[73324]718# else /* PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_NESTED_32BIT */
[31167]719 X86PDE Pde = pgmShwGet32BitPDE(pVCpu, GCPtr);
[73324]720# endif
[86464]721 if (!SHW_PDE_IS_P(Pde))
[1]722 return VERR_PAGE_TABLE_NOT_PRESENT;
723
[96879]724 AssertFatalMsg(!SHW_PDE_IS_BIG(Pde), ("Pde=%#RX64\n", (uint64_t)Pde.u));
[30364]725
[1]726 /*
727 * Map the page table.
728 */
[13232]729 PSHWPT pPT;
[31178]730 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT);
[13816]731 if (RT_FAILURE(rc))
[1]732 return rc;
733
[13232]734 unsigned iPTE = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK;
[11311]735 while (iPTE < RT_ELEMENTS(pPT->a))
[1]736 {
[31775]737 if (SHW_PTE_IS_P(pPT->a[iPTE]))
[1]738 {
[30326]739 SHWPTE const OrgPte = pPT->a[iPTE];
740 SHWPTE NewPte;
[19874]741
[31775]742 SHW_PTE_SET(NewPte, (SHW_PTE_GET_U(OrgPte) & (fMask | SHW_PTE_PG_MASK)) | (fFlags & ~SHW_PTE_PG_MASK));
743 if (!SHW_PTE_IS_P(NewPte))
[30326]744 {
745 /** @todo Some CSAM code path might end up here and upset
746 * the page pool. */
[96879]747 AssertMsgFailed(("NewPte=%#RX64 OrgPte=%#RX64 GCPtr=%#RGv\n", SHW_PTE_LOG64(NewPte), SHW_PTE_LOG64(OrgPte), GCPtr));
[30326]748 }
[31775]749 else if ( SHW_PTE_IS_RW(NewPte)
750 && !SHW_PTE_IS_RW(OrgPte)
[30326]751 && !(fOpFlags & PGM_MK_PG_IS_MMIO2) )
752 {
753 /** @todo Optimize \#PF handling by caching data. We can
754 * then use this when PGM_MK_PG_IS_WRITE_FAULT is
755 * set instead of resolving the guest physical
756 * address yet again. */
[92426]757 PGMPTWALK GstWalk;
758 rc = PGMGstGetPage(pVCpu, GCPtr, &GstWalk);
[30326]759 AssertRC(rc);
760 if (RT_SUCCESS(rc))
761 {
[92426]762 Assert((GstWalk.fEffective & X86_PTE_RW) || !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP /* allow netware hack */));
763 PPGMPAGE pPage = pgmPhysGetPage(pVM, GstWalk.GCPhys);
[30326]764 Assert(pPage);
765 if (pPage)
766 {
[92426]767 rc = pgmPhysPageMakeWritable(pVM, pPage, GstWalk.GCPhys);
[30326]768 AssertRCReturn(rc, rc);
[92426]769 Log(("%s: pgmPhysPageMakeWritable on %RGv / %RGp %R[pgmpage]\n", __PRETTY_FUNCTION__, GCPtr, GstWalk.GCPhys, pPage));
[30326]770 }
771 }
772 }
773
[31775]774 SHW_PTE_ATOMIC_SET2(pPT->a[iPTE], NewPte);
[99748]775 Assert((SHW_PTE_GET_U(NewPte) & EPT_E_LEAF) == (SHW_PTE_GET_U(OrgPte) & EPT_E_LEAF));
776
[73324]777# if PGM_SHW_TYPE == PGM_TYPE_EPT
[43387]778 HMInvalidatePhysPage(pVM, (RTGCPHYS)GCPtr);
[73324]779# else
[22600]780 PGM_INVL_PG_ALL_VCPU(pVM, GCPtr);
[73324]781# endif
[1]782 }
783
784 /* next page */
[93554]785 cb -= HOST_PAGE_SIZE;
[1]786 if (!cb)
787 return VINF_SUCCESS;
[93554]788 GCPtr += HOST_PAGE_SIZE;
[1]789 iPTE++;
790 }
791 }
[73324]792#endif /* PGM_SHW_TYPE != PGM_TYPE_NONE */
[1]793}
794
[73268]795
796#ifdef IN_RING3
797/**
798 * Relocate any GC pointers related to shadow mode paging.
799 *
800 * @returns VBox status code.
801 * @param pVCpu The cross context virtual CPU structure.
802 * @param offDelta The relocation offset.
803 */
[80268]804PGM_SHW_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta)
[73268]805{
[73324]806 RT_NOREF(pVCpu, offDelta);
[73268]807 return VINF_SUCCESS;
808}
809#endif
810
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use