VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGMBth.h@ 50653

Last change on this file since 50653 was 46420, checked in by vboxsync, 11 years ago

VMM, recompiler: Purge deprecated macros.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 10.3 KB
RevLine 
[23]1/* $Id: PGMBth.h 46420 2013-06-06 16:27:25Z vboxsync $ */
[1]2/** @file
3 * VBox - Page Manager / Monitor, Shadow+Guest Paging Template.
4 */
5
6/*
[44528]7 * Copyright (C) 2006-2012 Oracle Corporation
[1]8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
[5999]12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
[1]16 */
17
18
19/*******************************************************************************
20* Internal Functions *
21*******************************************************************************/
[20374]22RT_C_DECLS_BEGIN
[1]23PGM_BTH_DECL(int, InitData)(PVM pVM, PPGMMODEDATA pModeData, bool fResolveGCAndR0);
[18992]24PGM_BTH_DECL(int, Enter)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3);
25PGM_BTH_DECL(int, Relocate)(PVMCPU pVCpu, RTGCPTR offDelta);
[1]26
[26202]27PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken);
[18992]28PGM_BTH_DECL(int, SyncCR3)(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal);
29PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR Addr, unsigned fPage, unsigned uError);
30PGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage);
31PGM_BTH_DECL(int, PrefetchPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage);
32PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0);
33PGM_BTH_DECL(int, MapCR3)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3);
34PGM_BTH_DECL(int, UnmapCR3)(PVMCPU pVCpu);
[20374]35RT_C_DECLS_END
[1]36
37
38/**
39 * Initializes the both bit of the paging mode data.
40 *
41 * @returns VBox status code.
[41783]42 * @param pVM Pointer to the VM.
[1]43 * @param fResolveGCAndR0 Indicate whether or not GC and Ring-0 symbols can be resolved now.
44 * This is used early in the init process to avoid trouble with PDM
45 * not being initialized yet.
46 */
47PGM_BTH_DECL(int, InitData)(PVM pVM, PPGMMODEDATA pModeData, bool fResolveGCAndR0)
48{
49 Assert(pModeData->uShwType == PGM_SHW_TYPE); Assert(pModeData->uGstType == PGM_GST_TYPE);
50
51 /* Ring 3 */
[13232]52 pModeData->pfnR3BthRelocate = PGM_BTH_NAME(Relocate);
53 pModeData->pfnR3BthSyncCR3 = PGM_BTH_NAME(SyncCR3);
54 pModeData->pfnR3BthInvalidatePage = PGM_BTH_NAME(InvalidatePage);
55 pModeData->pfnR3BthPrefetchPage = PGM_BTH_NAME(PrefetchPage);
[1]56 pModeData->pfnR3BthVerifyAccessSyncPage = PGM_BTH_NAME(VerifyAccessSyncPage);
57#ifdef VBOX_STRICT
[13232]58 pModeData->pfnR3BthAssertCR3 = PGM_BTH_NAME(AssertCR3);
[1]59#endif
[16317]60 pModeData->pfnR3BthMapCR3 = PGM_BTH_NAME(MapCR3);
61 pModeData->pfnR3BthUnmapCR3 = PGM_BTH_NAME(UnmapCR3);
[1]62
63 if (fResolveGCAndR0)
64 {
65 int rc;
66
[45808]67 if (!HMIsEnabled(pVM))
68 {
[10822]69#if PGM_SHW_TYPE != PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT /* No AMD64 for traditional virtualization, only VT-x and AMD-V. */
[45808]70 /* RC */
71 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(Trap0eHandler), &pModeData->pfnRCBthTrap0eHandler);
72 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(Trap0eHandler), rc), rc);
73 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(InvalidatePage), &pModeData->pfnRCBthInvalidatePage);
74 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(InvalidatePage), rc), rc);
75 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(SyncCR3), &pModeData->pfnRCBthSyncCR3);
76 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(SyncCR3), rc), rc);
77 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(PrefetchPage), &pModeData->pfnRCBthPrefetchPage);
78 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(PrefetchPage), rc), rc);
79 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(VerifyAccessSyncPage),&pModeData->pfnRCBthVerifyAccessSyncPage);
80 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(VerifyAccessSyncPage), rc), rc);
[8414]81# ifdef VBOX_STRICT
[45808]82 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(AssertCR3), &pModeData->pfnRCBthAssertCR3);
83 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(AssertCR3), rc), rc);
[8414]84# endif
[45808]85 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(MapCR3), &pModeData->pfnRCBthMapCR3);
86 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(MapCR3), rc), rc);
87 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(UnmapCR3), &pModeData->pfnRCBthUnmapCR3);
88 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(UnmapCR3), rc), rc);
[8414]89#endif /* Not AMD64 shadow paging. */
[45808]90 }
[1]91
92 /* Ring 0 */
[13232]93 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(Trap0eHandler), &pModeData->pfnR0BthTrap0eHandler);
[13818]94 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(Trap0eHandler), rc), rc);
[13232]95 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(InvalidatePage), &pModeData->pfnR0BthInvalidatePage);
[13818]96 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(InvalidatePage), rc), rc);
[13232]97 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(SyncCR3), &pModeData->pfnR0BthSyncCR3);
[13818]98 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(SyncCR3), rc), rc);
[13232]99 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(PrefetchPage), &pModeData->pfnR0BthPrefetchPage);
[13818]100 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(PrefetchPage), rc), rc);
[13232]101 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(VerifyAccessSyncPage),&pModeData->pfnR0BthVerifyAccessSyncPage);
[13818]102 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(VerifyAccessSyncPage), rc), rc);
[1]103#ifdef VBOX_STRICT
[13232]104 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(AssertCR3), &pModeData->pfnR0BthAssertCR3);
[13818]105 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(AssertCR3), rc), rc);
[1]106#endif
[16317]107 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(MapCR3), &pModeData->pfnR0BthMapCR3);
108 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(MapCR3), rc), rc);
109 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(UnmapCR3), &pModeData->pfnR0BthUnmapCR3);
110 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(UnmapCR3), rc), rc);
[1]111 }
112 return VINF_SUCCESS;
113}
114
115
116/**
117 * Enters the shadow+guest mode.
118 *
119 * @returns VBox status code.
[41800]120 * @param pVM Pointer to the VM.
[41801]121 * @param pVCpu Pointer to the VMCPU.
[1]122 * @param GCPhysCR3 The physical address from the CR3 register.
123 */
[18992]124PGM_BTH_DECL(int, Enter)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3)
[1]125{
[16376]126 /* Here we deal with allocation of the root shadow page table for real and protected mode during mode switches;
[18289]127 * Other modes rely on MapCR3/UnmapCR3 to setup the shadow root page tables.
[16376]128 */
[17586]129#if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \
130 || PGM_SHW_TYPE == PGM_TYPE_PAE \
131 || PGM_SHW_TYPE == PGM_TYPE_AMD64) \
132 && ( PGM_GST_TYPE == PGM_TYPE_REAL \
133 || PGM_GST_TYPE == PGM_TYPE_PROT))
[16376]134
[31066]135 PVM pVM = pVCpu->pVMR3;
[18992]136
[43387]137 Assert(HMIsNestedPagingActive(pVM) == pVM->pgm.s.fNestedPaging);
[31066]138 Assert(!pVM->pgm.s.fNestedPaging);
[20759]139
140 pgmLock(pVM);
[16792]141 /* Note: we only really need shadow paging in real and protected mode for VT-x and AMD-V (excluding nested paging/EPT modes),
142 * but any calls to GC need a proper shadow page setup as well.
143 */
144 /* Free the previous root mapping if still active. */
145 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
[18927]146 if (pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
[16376]147 {
[18927]148 Assert(pVCpu->pgm.s.pShwPageCR3R3->enmKind != PGMPOOLKIND_FREE);
[17137]149
150 /* Mark the page as unlocked; allow flushing again. */
[18927]151 pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
[17137]152
[43872]153# ifndef PGM_WITHOUT_MAPPINGS
[16799]154 /* Remove the hypervisor mappings from the shadow page table. */
[18927]155 pgmMapDeactivateCR3(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
[43872]156# endif
[16799]157
[45103]158 pgmPoolFreeByPage(pPool, pVCpu->pgm.s.pShwPageCR3R3, NIL_PGMPOOL_IDX, UINT32_MAX);
[18927]159 pVCpu->pgm.s.pShwPageCR3R3 = 0;
160 pVCpu->pgm.s.pShwPageCR3RC = 0;
161 pVCpu->pgm.s.pShwPageCR3R0 = 0;
[16792]162 }
[16376]163
[33540]164 /* construct a fake address. */
[20129]165 GCPhysCR3 = RT_BIT_64(63);
[41458]166 int rc = pgmPoolAlloc(pVM, GCPhysCR3, BTH_PGMPOOLKIND_ROOT, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
[45103]167 NIL_PGMPOOL_IDX, UINT32_MAX, false /*fLockPage*/,
[31851]168 &pVCpu->pgm.s.pShwPageCR3R3);
[16792]169 if (rc == VERR_PGM_POOL_FLUSHED)
170 {
171 Log(("Bth-Enter: PGM pool flushed -> signal sync cr3\n"));
[46420]172 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
[20759]173 pgmUnlock(pVM);
[16792]174 return VINF_PGM_SYNC_CR3;
175 }
176 AssertRCReturn(rc, rc);
[17137]177
178 /* Mark the page as locked; disallow flushing. */
[18927]179 pgmPoolLockPage(pPool, pVCpu->pgm.s.pShwPageCR3R3);
[17137]180
[18927]181 pVCpu->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVCpu->pgm.s.pShwPageCR3R3);
182 pVCpu->pgm.s.pShwPageCR3RC = MMHyperCCToRC(pVM, pVCpu->pgm.s.pShwPageCR3R3);
[16799]183
[16864]184 /* Set the current hypervisor CR3. */
[18927]185 CPUMSetHyperCR3(pVCpu, PGMGetHyperCR3(pVCpu));
[16864]186
[43872]187# ifndef PGM_WITHOUT_MAPPINGS
[16792]188 /* Apply all hypervisor mappings to the new CR3. */
[20759]189 rc = pgmMapActivateCR3(pVM, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
[43872]190# endif
191
[20759]192 pgmUnlock(pVM);
193 return rc;
[18289]194#else
[39078]195 NOREF(pVCpu); NOREF(GCPhysCR3);
[18289]196 return VINF_SUCCESS;
[16376]197#endif
[1]198}
199
200
201/**
202 * Relocate any GC pointers related to shadow mode paging.
203 *
204 * @returns VBox status code.
[41783]205 * @param pVM Pointer to the VM.
[41801]206 * @param pVCpu Pointer to the VMCPU.
[33540]207 * @param offDelta The relocation offset.
[1]208 */
[18992]209PGM_BTH_DECL(int, Relocate)(PVMCPU pVCpu, RTGCPTR offDelta)
[1]210{
211 /* nothing special to do here - InitData does the job. */
[39078]212 NOREF(pVCpu); NOREF(offDelta);
[1]213 return VINF_SUCCESS;
214}
215
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use