Changeset 30889 in vbox
- Timestamp:
- Jul 17, 2010 1:54:47 AM (14 years ago)
- Location:
- trunk
- Files:
-
- 15 edited
-
include/VBox/hwacc_vmx.h (modified) (2 diffs)
-
include/VBox/x86.h (modified) (6 diffs)
-
src/VBox/VMM/PGM.cpp (modified) (9 diffs)
-
src/VBox/VMM/PGMGstDefs.h (modified) (4 diffs)
-
src/VBox/VMM/PGMInline.h (modified) (9 diffs)
-
src/VBox/VMM/PGMInternal.h (modified) (14 diffs)
-
src/VBox/VMM/PGMMap.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMAll/CPUMAllRegs.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMAll/IOMAllMMIO.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMAll/PGMAll.cpp (modified) (9 diffs)
-
src/VBox/VMM/VMMAll/PGMAllBth.h (modified) (84 diffs)
-
src/VBox/VMM/VMMAll/PGMAllGst.h (modified) (10 diffs)
-
src/VBox/VMM/VMMAll/PGMAllMap.cpp (modified) (5 diffs)
-
src/VBox/VMM/VMMR0/PGMR0.cpp (modified) (8 diffs)
-
src/VBox/VMM/VMMR0/VMMR0.cpp (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/hwacc_vmx.h
r29250 r30889 294 294 typedef struct EPTPTEBITS 295 295 { 296 /** Present bit. */ 296 /** Present bit. 297 * @remark This is a convenience "misnomer". The bit actually indicates 298 * read access and the CPU will consider an entry with any of the 299 * first three bits set as present. Since all our valid entries 300 * will have this bit set, it can be used as a present indicator 301 * and allow some code sharing. */ 297 302 uint64_t u1Present : 1; 298 303 /** Writable bit. */ … … 308 313 /** Physical address of page. Restricted by maximum physical address width of the cpu. */ 309 314 uint64_t u40PhysAddr : 40; 310 /** Availab e for software. */315 /** Available for software. */ 311 316 uint64_t u12Available : 12; 312 317 } EPTPTEBITS; -
trunk/include/VBox/x86.h
r28800 r30889 1115 1115 1116 1116 /** Bits 12-51 - - PAE - Physical Page number of the next level. */ 1117 #if 1 /* we're using this internally and have to mask of the top 16-bit. */ 1117 #if 1 /* we're using this internally and have to mask of the top 16-bit. */ /** @todo this should be safe to ditch now */ 1118 1118 #define X86_PTE_PAE_PG_MASK ( 0x0000fffffffff000ULL ) 1119 1119 /** @todo Get rid of the above hack; makes code unreadable. */ … … 1122 1122 #define X86_PTE_PAE_PG_MASK ( 0x000ffffffffff000ULL ) 1123 1123 #endif 1124 /** Bits 63 - NX - PAE - No execution flag. */1124 /** Bits 63 - NX - PAE/LM - No execution flag. */ 1125 1125 #define X86_PTE_PAE_NX RT_BIT_64(63) 1126 /** Bits 62-52 - - PAE - MBZ bits when NX is active. */ 1127 #define X86_PTE_PAE_MBZ_MASK_NX UINT64_C(0x7ff0000000000000) 1128 /** Bits 63-52 - - PAE - MBZ bits when no NX. */ 1129 #define X86_PTE_PAE_MBZ_MASK_NO_NX UINT64_C(0xfff0000000000000) 1130 /** No bits - - LM - MBZ bits when NX is active. */ 1131 #define X86_PTE_LM_MBZ_MASK_NX UINT64_C(0x0000000000000000) 1132 /** Bits 63 - - LM - MBZ bits when no NX. */ 1133 #define X86_PTE_LM_MBZ_MASK_NO_NX UINT64_C(0x8000000000000000) 1126 1134 1127 1135 /** … … 1317 1325 #define X86_PDE_PAE_PG_MASK ( 0x000ffffffffff000ULL ) 1318 1326 #endif 1319 /** Bits 63 - NX - PAE - No execution flag. */1327 /** Bits 63 - NX - PAE/LM - No execution flag. */ 1320 1328 #define X86_PDE_PAE_NX RT_BIT_64(63) 1329 /** Bits 62-52, 7 - - PAE - MBZ bits when NX is active. */ 1330 #define X86_PDE_PAE_MBZ_MASK_NX UINT64_C(0x7ff0000000000080) 1331 /** Bits 63-52, 7 - - PAE - MBZ bits when no NX. */ 1332 #define X86_PDE_PAE_MBZ_MASK_NO_NX UINT64_C(0xfff0000000000080) 1333 /** Bit 7 - - LM - MBZ bits when NX is active. */ 1334 #define X86_PDE_LM_MBZ_MASK_NX UINT64_C(0x0000000000000080) 1335 /** Bits 63, 7 - - LM - MBZ bits when no NX. */ 1336 #define X86_PDE_LM_MBZ_MASK_NO_NX UINT64_C(0x8000000000000080) 1321 1337 1322 1338 /** … … 1427 1443 /** Bits 22-31 - - Physical Page number. */ 1428 1444 #define X86_PDE4M_PG_MASK ( 0xffc00000 ) 1429 /** Bits 13-20- - Physical Page number high part (32-39 bits). AMD64 hack. */1445 /** Bits 20-13 - - Physical Page number high part (32-39 bits). AMD64 hack. */ 1430 1446 #define X86_PDE4M_PG_HIGH_MASK ( 0x001fe000 ) 1431 1447 /** The number of bits to the high part of the page number. */ 1432 1448 #define X86_PDE4M_PG_HIGH_SHIFT 19 1433 1434 /** Bits 21-51 - - PAE & AMD64 - Physical Page number. 1449 /** Bit 21 - - MBZ bits for AMD CPUs, no PSE36. */ 1450 #define X86_PDE4M_MBZ_MASK RT_BIT_32(21) 1451 1452 /** Bits 21-51 - - PAE/LM - Physical Page number. 1435 1453 * (Bits 40-51 (long mode) & bits 36-51 (pae legacy) are reserved according to the Intel docs; AMD allows for more.) */ 1436 #define X86_PDE2M_PAE_PG_MASK ( 0x000fffffffe00000ULL ) 1437 /** Bits 63 - NX - PAE & AMD64 - No execution flag. */ 1438 #define X86_PDE2M_PAE_NX X86_PDE2M_PAE_NX 1454 #define X86_PDE2M_PAE_PG_MASK UINT64_C(0x000fffffffe00000) 1455 /** Bits 63 - NX - PAE/LM - No execution flag. */ 1456 #define X86_PDE2M_PAE_NX RT_BIT_64(63) 1457 /** Bits 62-52, 20-13 - - PAE - MBZ bits when NX is active. */ 1458 #define X86_PDE2M_PAE_MBZ_MASK_NX UINT64_C(0x7ff00000001fe000) 1459 /** Bits 63-52, 20-13 - - PAE - MBZ bits when no NX. */ 1460 #define X86_PDE2M_PAE_MBZ_MASK_NO_NX UINT64_C(0xfff00000001fe000) 1461 /** Bits 20-13 - - LM - MBZ bits when NX is active. */ 1462 #define X86_PDE2M_LM_MBZ_MASK_NX UINT64_C(0x00000000001fe000) 1463 /** Bits 63, 20-13 - - LM - MBZ bits when no NX. */ 1464 #define X86_PDE2M_LM_MBZ_MASK_NO_NX UINT64_C(0x80000000001fe000) 1439 1465 1440 1466 /** … … 1627 1653 /** Bit 5 - A - Access bit. Long Mode only. */ 1628 1654 #define X86_PDPE_A RT_BIT(5) 1655 /** Bit 7 - PS - Page size (1GB). Long Mode only. */ 1656 #define X86_PDPE_LM_PS RT_BIT(7) 1629 1657 /** Bits 9-11 - - Available for use to system software. */ 1630 1658 #define X86_PDPE_AVL_MASK (RT_BIT(9) | RT_BIT(10) | RT_BIT(11)) 1631 1659 /** Bits 12-51 - - PAE - Physical Page number of the next level. */ 1632 1660 #if 1 /* we're using this internally and have to mask of the top 16-bit. */ 1633 #define X86_PDPE_PG_MASK ( 0x0000fffffffff000ULL)1661 #define X86_PDPE_PG_MASK UINT64_C(0x0000fffffffff000) 1634 1662 /** @todo Get rid of the above hack; makes code unreadable. */ 1635 #define X86_PDPE_PG_MASK_FULL ( 0x000ffffffffff000ULL)1663 #define X86_PDPE_PG_MASK_FULL UINT64_C(0x000ffffffffff000) 1636 1664 #else 1637 #define X86_PDPE_PG_MASK ( 0x000ffffffffff000ULL)1665 #define X86_PDPE_PG_MASK UINT64_C(0x000ffffffffff000) 1638 1666 #endif 1639 /** Bits 63 - NX - PAE - No execution flag. Long Mode only. */ 1640 #define X86_PDPE_NX RT_BIT_64(63) 1667 /** Bits 63-52, 8-5, 2-1 - - PAE - MBZ bits (NX is long mode only). */ 1668 #define X86_PDPE_PAE_MBZ_MASK UINT64_C(0xfff00000000001e6) 1669 /** Bits 63 - NX - LM - No execution flag. Long Mode only. */ 1670 #define X86_PDPE_LM_NX RT_BIT_64(63) 1671 /** Bits 8, 7 - - LM - MBZ bits when NX is active. */ 1672 #define X86_PDPE_LM_MBZ_MASK_NX UINT64_C(0x0000000000000180) 1673 /** Bits 63, 8, 7 - - LM - MBZ bits when no NX. */ 1674 #define X86_PDPE_LM_MBZ_MASK_NO_NX UINT64_C(0x8000000000000180) 1675 /** Bits 29-13 - - LM - MBZ bits for 1GB page entry when NX is active. */ 1676 #define X86_PDPE1G_LM_MBZ_MASK_NX UINT64_C(0x000000003fffe000) 1677 /** Bits 63, 29-13 - - LM - MBZ bits for 1GB page entry when no NX. */ 1678 #define X86_PDPE1G_LM_MBZ_MASK_NO_NX UINT64_C(0x800000003fffe000) 1679 1641 1680 1642 1681 /** … … 1776 1815 #define X86_PML4E_PG_MASK ( 0x000ffffffffff000ULL ) 1777 1816 #endif 1817 /** Bits 8, 7 - - MBZ bits when NX is active. */ 1818 #define X86_PML4E_MBZ_MASK_NX UINT64_C(0x0000000000000080) 1819 /** Bits 63, 7 - - MBZ bits when no NX. */ 1820 #define X86_PML4E_MBZ_MASK_NO_NX UINT64_C(0x8000000000000080) 1778 1821 /** Bits 63 - NX - PAE - No execution flag. */ 1779 1822 #define X86_PML4E_NX RT_BIT_64(63) -
trunk/src/VBox/VMM/PGM.cpp
r30845 r30889 593 593 594 594 #include <iprt/asm.h> 595 #include <iprt/asm-amd64-x86.h> 595 596 #include <iprt/assert.h> 596 597 #include <iprt/env.h> … … 599 600 #include <iprt/string.h> 600 601 #include <iprt/thread.h> 601 602 603 /*******************************************************************************604 * Defined Constants And Macros *605 *******************************************************************************/606 /** Saved state data unit version for 2.5.x and later. */607 #define PGM_SAVED_STATE_VERSION 9608 /** Saved state data unit version for 2.2.2 and later. */609 #define PGM_SAVED_STATE_VERSION_2_2_2 8610 /** Saved state data unit version for 2.2.0. */611 #define PGM_SAVED_STATE_VERSION_RR_DESC 7612 /** Saved state data unit version. */613 #define PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE 6614 602 615 603 … … 1316 1304 1317 1305 /* 1318 * Al ocate the zero page.1306 * Allocate the zero page. 1319 1307 */ 1320 1308 rc = MMHyperAlloc(pVM, PAGE_SIZE, PAGE_SIZE, MM_TAG_PGM, &pVM->pgm.s.pvZeroPgR3); … … 1326 1314 pVM->pgm.s.HCPhysZeroPg = MMR3HyperHCVirt2HCPhys(pVM, pVM->pgm.s.pvZeroPgR3); 1327 1315 AssertRelease(pVM->pgm.s.HCPhysZeroPg != NIL_RTHCPHYS); 1316 1317 /* 1318 * Allocate the invalid MMIO page. 1319 * (The invalid bits in HCPhysInvMmioPg are set later on init complete.) 1320 */ 1321 rc = MMHyperAlloc(pVM, PAGE_SIZE, PAGE_SIZE, MM_TAG_PGM, &pVM->pgm.s.pvMmioPgR3); 1322 } 1323 if (RT_SUCCESS(rc)) 1324 { 1325 ASMMemFill32(pVM->pgm.s.pvMmioPgR3, PAGE_SIZE, 0xfeedface); 1326 pVM->pgm.s.HCPhysMmioPg = MMR3HyperHCVirt2HCPhys(pVM, pVM->pgm.s.pvMmioPgR3); 1327 AssertRelease(pVM->pgm.s.HCPhysMmioPg != NIL_RTHCPHYS); 1328 pVM->pgm.s.HCPhysInvMmioPg = pVM->pgm.s.HCPhysMmioPg; 1328 1329 1329 1330 /* … … 1419 1420 * 1420 1421 * @returns VBox status code. 1421 * @param pVM VM handle.1422 * @param pVM VM handle. 1422 1423 */ 1423 1424 static int pgmR3InitPaging(PVM pVM) … … 1552 1553 MMPage2Phys(pVM, pVM->pgm.s.pInterPaePDPT64))); 1553 1554 #endif 1554 1555 1555 return VINF_SUCCESS; 1556 1556 } … … 2013 2013 2014 2014 /* 2015 * Determin the max physical address width (MAXPHYADDR) and apply it to 2016 * all the mask members and stuff. 2017 */ 2018 uint32_t cMaxPhysAddrWidth; 2019 uint32_t uMaxExtLeaf = ASMCpuId_EAX(0x80000000); 2020 if ( uMaxExtLeaf >= 0x80000008 2021 && uMaxExtLeaf <= 0x80000fff) 2022 { 2023 cMaxPhysAddrWidth = ASMCpuId_EAX(0x80000008) & 0xff; 2024 LogRel(("PGM: The CPU physical address width is %u bits\n", cMaxPhysAddrWidth)); 2025 cMaxPhysAddrWidth = RT_MIN(52, cMaxPhysAddrWidth); 2026 pVM->pgm.s.fLessThan52PhysicalAddressBits = cMaxPhysAddrWidth < 52; 2027 for (uint32_t iBit = cMaxPhysAddrWidth; iBit < 52; iBit++) 2028 pVM->pgm.s.HCPhysInvMmioPg |= RT_BIT_64(iBit); 2029 } 2030 else 2031 { 2032 LogRel(("PGM: ASSUMING CPU physical address width of 48 bits (uMaxExtLeaf=%#x)\n", uMaxExtLeaf)); 2033 cMaxPhysAddrWidth = 48; 2034 pVM->pgm.s.fLessThan52PhysicalAddressBits = true; 2035 pVM->pgm.s.HCPhysInvMmioPg |= UINT64_C(0x000f0000000000); 2036 } 2037 2038 pVM->pgm.s.GCPhysInvAddrMask = 0; 2039 for (uint32_t iBit = cMaxPhysAddrWidth; iBit < 64; iBit++) 2040 pVM->pgm.s.GCPhysInvAddrMask |= RT_BIT_64(iBit); 2041 2042 /* 2043 * Initialize the invalid paging entry masks, assuming NX is disabled. 2044 */ 2045 uint64_t fMbzPageFrameMask = pVM->pgm.s.GCPhysInvAddrMask & UINT64_C(0x000ffffffffff000); 2046 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++) 2047 { 2048 PVMCPU pVCpu = &pVM->aCpus[iCpu]; 2049 2050 /** @todo The manuals are not entirely clear whether the physical 2051 * address width is relevant. See table 5-9 in the intel 2052 * manual vs the PDE4M descriptions. Write testcase (NP). */ 2053 pVCpu->pgm.s.fGst32BitMbzBigPdeMask = (uint32_t)(fMbzPageFrameMask >> (32 - 13)) | X86_PDE4M_MBZ_MASK; 2054 2055 pVCpu->pgm.s.fGstPaeMbzPteMask = fMbzPageFrameMask | X86_PTE_PAE_MBZ_MASK_NO_NX; 2056 pVCpu->pgm.s.fGstPaeMbzPdeMask = fMbzPageFrameMask | X86_PDE_PAE_MBZ_MASK_NO_NX; 2057 pVCpu->pgm.s.fGstPaeMbzBigPdeMask = fMbzPageFrameMask | X86_PDE2M_PAE_MBZ_MASK_NO_NX; 2058 pVCpu->pgm.s.fGstPaeMbzPdpeMask = fMbzPageFrameMask | X86_PDPE_PAE_MBZ_MASK; 2059 2060 pVCpu->pgm.s.fGstAmd64MbzPteMask = fMbzPageFrameMask | X86_PTE_LM_MBZ_MASK_NO_NX; 2061 pVCpu->pgm.s.fGstAmd64MbzPdeMask = fMbzPageFrameMask | X86_PDE_LM_MBZ_MASK_NX; 2062 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask = fMbzPageFrameMask | X86_PDE2M_LM_MBZ_MASK_NX; 2063 pVCpu->pgm.s.fGstAmd64MbzPdpeMask = fMbzPageFrameMask | X86_PDPE_LM_MBZ_MASK_NO_NX; 2064 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask = fMbzPageFrameMask | X86_PDPE1G_LM_MBZ_MASK_NO_NX; 2065 pVCpu->pgm.s.fGstAmd64MbzPml4eMask = fMbzPageFrameMask | X86_PML4E_MBZ_MASK_NO_NX; 2066 } 2067 2068 /* 2015 2069 * Note that AMD uses all the 8 reserved bits for the address (so 40 bits in total); 2016 2070 * Intel only goes up to 36 bits, so we stick to 36 as well. 2017 * /2018 /** @todo How to test for the 40 bits support? Long mode seems to be the test criterium.*/2071 * Update: More recent intel manuals specifies 40 bits just like AMD. 2072 */ 2019 2073 uint32_t u32Dummy, u32Features; 2020 2074 CPUMGetGuestCpuId(VMMGetCpu(pVM), 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features); 2021 2022 2075 if (u32Features & X86_CPUID_FEATURE_EDX_PSE36) 2023 pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64( 36) - 1;2076 pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64(RT_MAX(36, cMaxPhysAddrWidth)) - 1; 2024 2077 else 2025 2078 pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64(32) - 1; … … 2478 2531 * Get page directory addresses. 2479 2532 */ 2480 PX86PD pPDSrc = pgmGstGet32bitPDPtr( &pVCpu->pgm.s);2533 PX86PD pPDSrc = pgmGstGet32bitPDPtr(pVCpu); 2481 2534 Assert(pPDSrc); 2482 2535 Assert(PGMPhysGCPhys2R3PtrAssert(pVM, (RTGCPHYS)(CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK), sizeof(*pPDSrc)) == pPDSrc); … … 3882 3935 3883 3936 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, cr3 & X86_CR3_PAGE_MASK, (const void **)&pPD, &LockCr3); 3884 if ( RT_FAILURE(rc) 3937 if ( RT_FAILURE(rc) 3885 3938 || !pPD) 3886 3939 { -
trunk/src/VBox/VMM/PGMGstDefs.h
r28800 r30889 5 5 6 6 /* 7 * Copyright (C) 2006-20 07Oracle Corporation7 * Copyright (C) 2006-2010 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 28 28 #undef GSTPDE 29 29 #undef PGSTPDE 30 #undef GSTPTWALK 31 #undef PGSTPTWALK 32 #undef PCGSTPTWALK 30 33 #undef GST_BIG_PAGE_SIZE 31 34 #undef GST_BIG_PAGE_OFFSET_MASK … … 44 47 #undef GST_PDPE_PG_MASK 45 48 #undef GST_GET_PDE_BIG_PG_GCPHYS 49 #undef GST_IS_PTE_VALID 50 #undef GST_IS_PDE_VALID 51 #undef GST_IS_BIG_PDE_VALID 52 #undef GST_IS_PDPE_VALID 53 #undef GST_IS_BIG_PDPE_VALID 54 #undef GST_IS_PML4E_VALID 55 #undef GST_IS_PSE_ACTIVE 56 #undef GST_IS_NX_ACTIVE 57 #undef BTH_IS_NP_ACTIVE 46 58 47 59 #if PGM_GST_TYPE == PGM_TYPE_REAL \ … … 49 61 50 62 # if PGM_SHW_TYPE == PGM_TYPE_EPT 51 # define GSTPT X86PTPAE 52 # define PGSTPT PX86PTPAE 53 # define GSTPTE X86PTEPAE 54 # define PGSTPTE PX86PTEPAE 55 # define GSTPD X86PDPAE 56 # define PGSTPD PX86PDPAE 57 # define GSTPDE X86PDEPAE 58 # define PGSTPDE PX86PDEPAE 59 # define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK 63 # define GSTPT X86PTPAE 64 # define PGSTPT PX86PTPAE 65 # define GSTPTE X86PTEPAE 66 # define PGSTPTE PX86PTEPAE 67 # define GSTPD X86PDPAE 68 # define PGSTPD PX86PDPAE 69 # define GSTPDE X86PDEPAE 70 # define PGSTPDE PX86PDEPAE 71 # define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK 72 # define GST_IS_NX_ACTIVE(pVCpu) (true && This_should_perhaps_not_be_used_in_this_context) 73 # define BTH_IS_NP_ACTIVE(pVM) (true) 60 74 # else 61 # define GSTPT SHWPT 62 # define PGSTPT PSHWPT 63 # define GSTPTE SHWPTE 64 # define PGSTPTE PSHWPTE 65 # define GSTPD SHWPD 66 # define PGSTPD PSHWPD 67 # define GSTPDE SHWPDE 68 # define PGSTPDE PSHWPDE 69 # define GST_PTE_PG_MASK SHW_PTE_PG_MASK 75 # define GSTPT SHWPT 76 # define PGSTPT PSHWPT 77 # define GSTPTE SHWPTE 78 # define PGSTPTE PSHWPTE 79 # define GSTPD SHWPD 80 # define PGSTPD PSHWPD 81 # define GSTPDE SHWPDE 82 # define PGSTPDE PSHWPDE 83 # define GST_PTE_PG_MASK SHW_PTE_PG_MASK 84 # define GST_IS_NX_ACTIVE(pVCpu) (CPUMIsGuestNXEnabled(pVCpu)) /** @todo shadow this variable */ 85 # if PGM_GST_TYPE == PGM_TYPE_PROT /* (comment at top of PGMAllBth.h) */ 86 # define BTH_IS_NP_ACTIVE(pVM) HWACCMIsNestedPagingActive(pVM) 87 # else 88 # define BTH_IS_NP_ACTIVE(pVM) (false) 89 # endif 70 90 # endif 91 # define GST_IS_PTE_VALID(pVCpu, Pte) (true) 92 # define GST_IS_PDE_VALID(pVCpu, Pde) (true) 93 # define GST_IS_BIG_PDE_VALID(pVCpu, Pde) (true) 94 # define GST_IS_PDPE_VALID(pVCpu, Pdpe) (true) 95 # define GST_IS_BIG_PDPE_VALID(pVCpu, Pdpe) (true) 96 # define GST_IS_PML4E_VALID(pVCpu, Pml4e) (true) 97 # define GST_IS_PSE_ACTIVE(pVCpu) (false && This_should_not_be_used_in_this_context) 98 71 99 #elif PGM_GST_TYPE == PGM_TYPE_32BIT 72 # define GSTPT X86PT 73 # define PGSTPT PX86PT 74 # define GSTPTE X86PTE 75 # define PGSTPTE PX86PTE 76 # define GSTPD X86PD 77 # define PGSTPD PX86PD 78 # define GSTPDE X86PDE 79 # define PGSTPDE PX86PDE 80 # define GST_BIG_PAGE_SIZE X86_PAGE_4M_SIZE 81 # define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_4M_OFFSET_MASK 82 # define GST_PDE_PG_MASK X86_PDE_PG_MASK 83 # define GST_PDE_BIG_PG_MASK X86_PDE4M_PG_MASK 84 # define GST_GET_PDE_BIG_PG_GCPHYS(PdeGst) pgmGstGet4MBPhysPage(&pVM->pgm.s, PdeGst) 85 # define GST_PD_SHIFT X86_PD_SHIFT 86 # define GST_PD_MASK X86_PD_MASK 87 # define GST_TOTAL_PD_ENTRIES X86_PG_ENTRIES 88 # define GST_PTE_PG_MASK X86_PTE_PG_MASK 89 # define GST_PT_SHIFT X86_PT_SHIFT 90 # define GST_PT_MASK X86_PT_MASK 91 # define GST_CR3_PAGE_MASK X86_CR3_PAGE_MASK 100 # define GSTPT X86PT 101 # define PGSTPT PX86PT 102 # define GSTPTE X86PTE 103 # define PGSTPTE PX86PTE 104 # define GSTPD X86PD 105 # define PGSTPD PX86PD 106 # define GSTPDE X86PDE 107 # define PGSTPDE PX86PDE 108 # define GSTPTWALK PGMPTWALKGST32BIT 109 # define PGSTPTWALK PPGMPTWALKGST32BIT 110 # define PCGSTPTWALK PCPGMPTWALKGST32BIT 111 # define GST_BIG_PAGE_SIZE X86_PAGE_4M_SIZE 112 # define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_4M_OFFSET_MASK 113 # define GST_PDE_PG_MASK X86_PDE_PG_MASK 114 # define GST_PDE_BIG_PG_MASK X86_PDE4M_PG_MASK 115 # define GST_GET_PDE_BIG_PG_GCPHYS(pVM, PdeGst) pgmGstGet4MBPhysPage(&(pVM)->pgm.s, PdeGst) 116 # define GST_PD_SHIFT X86_PD_SHIFT 117 # define GST_PD_MASK X86_PD_MASK 118 # define GST_TOTAL_PD_ENTRIES X86_PG_ENTRIES 119 # define GST_PTE_PG_MASK X86_PTE_PG_MASK 120 # define GST_PT_SHIFT X86_PT_SHIFT 121 # define GST_PT_MASK X86_PT_MASK 122 # define GST_CR3_PAGE_MASK X86_CR3_PAGE_MASK 123 # define GST_IS_PTE_VALID(pVCpu, Pte) (true) 124 # define GST_IS_PDE_VALID(pVCpu, Pde) (true) 125 # define GST_IS_BIG_PDE_VALID(pVCpu, Pde) (!( (Pde).u & (pVCpu)->pgm.s.fGst32BitMbzBigPdeMask )) 126 //# define GST_IS_PDPE_VALID(pVCpu, Pdpe) (false) 127 //# define GST_IS_BIG_PDPE_VALID(pVCpu, Pdpe) (false) 128 //# define GST_IS_PML4E_VALID(pVCpu, Pml4e) (false) 129 # define GST_IS_PSE_ACTIVE(pVCpu) (CPUMIsGuestPageSizeExtEnabled(pVCpu)) /** @todo ( (pVCpu)->pgm.s.fGst32BitPageSizeExtension ) */ 130 # define GST_IS_NX_ACTIVE(pVCpu) (false) 131 # define BTH_IS_NP_ACTIVE(pVM) (false) 92 132 93 133 #elif PGM_GST_TYPE == PGM_TYPE_PAE \ 94 134 || PGM_GST_TYPE == PGM_TYPE_AMD64 95 # define GSTPT X86PTPAE96 # define PGSTPT PX86PTPAE97 # define GSTPTE X86PTEPAE98 # define PGSTPTE PX86PTEPAE99 # define GSTPD X86PDPAE100 # define PGSTPD PX86PDPAE101 # define GSTPDE X86PDEPAE102 # define PGSTPDE PX86PDEPAE103 # define GST_BIG_PAGE_SIZE X86_PAGE_2M_SIZE104 # define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_2M_OFFSET_MASK105 # define GST_PDE_PG_MASK X86_PDE_PAE_PG_MASK_FULL106 # define GST_PDE_BIG_PG_MASK X86_PDE2M_PAE_PG_MASK107 # define GST_GET_PDE_BIG_PG_GCPHYS( PdeGst) (PdeGst.u & GST_PDE_BIG_PG_MASK)108 # define GST_PD_SHIFT X86_PD_PAE_SHIFT109 # define GST_PD_MASK X86_PD_PAE_MASK135 # define GSTPT X86PTPAE 136 # define PGSTPT PX86PTPAE 137 # define GSTPTE X86PTEPAE 138 # define PGSTPTE PX86PTEPAE 139 # define GSTPD X86PDPAE 140 # define PGSTPD PX86PDPAE 141 # define GSTPDE X86PDEPAE 142 # define PGSTPDE PX86PDEPAE 143 # define GST_BIG_PAGE_SIZE X86_PAGE_2M_SIZE 144 # define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_2M_OFFSET_MASK 145 # define GST_PDE_PG_MASK X86_PDE_PAE_PG_MASK_FULL 146 # define GST_PDE_BIG_PG_MASK X86_PDE2M_PAE_PG_MASK 147 # define GST_GET_PDE_BIG_PG_GCPHYS(pVM, PdeGst) ((PdeGst).u & GST_PDE_BIG_PG_MASK) 148 # define GST_PD_SHIFT X86_PD_PAE_SHIFT 149 # define GST_PD_MASK X86_PD_PAE_MASK 110 150 # if PGM_GST_TYPE == PGM_TYPE_PAE 111 # define GST_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES) 112 # define GST_PDPE_ENTRIES X86_PG_PAE_PDPE_ENTRIES 113 # define GST_PDPE_PG_MASK X86_PDPE_PG_MASK_FULL 114 # define GST_PDPT_SHIFT X86_PDPT_SHIFT 115 # define GST_PDPT_MASK X86_PDPT_MASK_PAE 116 # define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK 117 # define GST_CR3_PAGE_MASK X86_CR3_PAE_PAGE_MASK 151 # define GSTPTWALK PGMPTWALKGSTPAE 152 # define PGSTPTWALK PPGMPTWALKGSTPAE 153 # define PCGSTPTWALK PCPGMPTWALKGSTPAE 154 # define GST_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES) 155 # define GST_PDPE_ENTRIES X86_PG_PAE_PDPE_ENTRIES 156 # define GST_PDPE_PG_MASK X86_PDPE_PG_MASK_FULL 157 # define GST_PDPT_SHIFT X86_PDPT_SHIFT 158 # define GST_PDPT_MASK X86_PDPT_MASK_PAE 159 # define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK 160 # define GST_CR3_PAGE_MASK X86_CR3_PAE_PAGE_MASK 161 # define GST_IS_PTE_VALID(pVCpu, Pte) (!( (Pte).u & (pVCpu)->pgm.s.fGstPaeMbzPteMask )) 162 # define GST_IS_PDE_VALID(pVCpu, Pde) (!( (Pde).u & (pVCpu)->pgm.s.fGstPaeMbzPdeMask )) 163 # define GST_IS_BIG_PDE_VALID(pVCpu, Pde) (!( (Pde).u & (pVCpu)->pgm.s.fGstPaeMbzBigPdeMask )) 164 # define GST_IS_PDPE_VALID(pVCpu, Pdpe) (!( (Pdpe).u & (pVCpu)->pgm.s.fGstPaeMbzPdpeMask )) 165 //# define GST_IS_BIG_PDPE_VALID(pVCpu, Pdpe) (false) 166 //# define GST_IS_PML4E_VALID(pVCpu, Pml4e) (false) 118 167 # else 119 # define GST_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES) 120 # define GST_PDPE_ENTRIES X86_PG_AMD64_PDPE_ENTRIES 121 # define GST_PDPT_SHIFT X86_PDPT_SHIFT 122 # define GST_PDPE_PG_MASK X86_PDPE_PG_MASK_FULL 123 # define GST_PDPT_MASK X86_PDPT_MASK_AMD64 124 # define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK_FULL 125 # define GST_CR3_PAGE_MASK X86_CR3_AMD64_PAGE_MASK 168 # define GSTPTWALK PGMPTWALKGSTAMD64 169 # define PGSTPTWALK PPGMPTWALKGSTAMD64 170 # define PCGSTPTWALK PCPGMPTWALKGSTAMD64 171 # define GST_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES) 172 # define GST_PDPE_ENTRIES X86_PG_AMD64_PDPE_ENTRIES 173 # define GST_PDPT_SHIFT X86_PDPT_SHIFT 174 # define GST_PDPE_PG_MASK X86_PDPE_PG_MASK_FULL 175 # define GST_PDPT_MASK X86_PDPT_MASK_AMD64 176 # define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK_FULL 177 # define GST_CR3_PAGE_MASK X86_CR3_AMD64_PAGE_MASK 178 # define GST_IS_PTE_VALID(pVCpu, Pte) (!( (Pte).u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask )) 179 # define GST_IS_PDE_VALID(pVCpu, Pde) (!( (Pde).u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask )) 180 # define GST_IS_BIG_PDE_VALID(pVCpu, Pde) (!( (Pde).u & (pVCpu)->pgm.s.fGstAmd64MbzBigPdeMask )) 181 # define GST_IS_PDPE_VALID(pVCpu, Pdpe) (!( (Pdpe).u & (pVCpu)->pgm.s.fGstAmd64MbzPdpeMask )) 182 # define GST_IS_BIG_PDPE_VALID(pVCpu, Pdpe) (!( (Pdpe).u & (pVCpu)->pgm.s.fGstAmd64MbzBigPdpeMask )) 183 # define GST_IS_PML4E_VALID(pVCpu, Pml4e) (!( (Pml4e).u & (pVCpu)->pgm.s.fGstAmd64MbzPml4eMask )) 126 184 # endif 127 # define GST_PT_SHIFT X86_PT_PAE_SHIFT 128 # define GST_PT_MASK X86_PT_PAE_MASK 185 # define GST_PT_SHIFT X86_PT_PAE_SHIFT 186 # define GST_PT_MASK X86_PT_PAE_MASK 187 # define GST_IS_PSE_ACTIVE(pVCpu) (true) 188 # define GST_IS_NX_ACTIVE(pVCpu) (CPUMIsGuestNXEnabled(pVCpu)) /** @todo shadow this variable */ 189 # define BTH_IS_NP_ACTIVE(pVM) (false) 129 190 #endif 130 191 -
trunk/src/VBox/VMM/PGMInline.h
r30824 r30889 572 572 573 573 /** 574 * Gets the page directory entry for the specified address (32-bit paging). 575 * 576 * @returns The page directory entry in question. 577 * @param pPGM Pointer to the PGM instance data. 574 * Gets the address the guest page directory (32-bit paging). 575 * 576 * @returns VBox status code. 577 * @param pVCpu The current CPU. 578 * @param ppPd Where to return the mapping. This is always set. 579 */ 580 DECLINLINE(int) pgmGstGet32bitPDPtrEx(PVMCPU pVCpu, PX86PD *ppPd) 581 { 582 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 583 int rc = pgmR0DynMapGCPageInlined(&pVCpu->CTX_SUFF(pVM)->pgm.s, pVCpu->pgm.s.GCPhysCR3, (void **)ppPd); 584 if (RT_FAILURE(rc)) 585 { 586 *ppPd = NULL; 587 return rc; 588 } 589 #else 590 *ppPd = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd); 591 if (RT_UNLIKELY(!*ppPd)) 592 return pgmGstLazyMap32BitPD(pVCpu, ppPd); 593 #endif 594 return VINF_SUCCESS; 595 } 596 597 598 /** 599 * Gets the address the guest page directory (32-bit paging). 600 * 601 * @returns Pointer the page directory entry in question. 602 * @param pVCpu The current CPU. 603 */ 604 DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PVMCPU pVCpu) 605 { 606 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 607 PX86PD pGuestPD = NULL; 608 int rc = pgmR0DynMapGCPageInlined(&pVCpu->CTX_SUFF(pVM)->pgm.s, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPD); 609 if (RT_FAILURE(rc)) 610 { 611 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); 612 return NULL; 613 } 614 #else 615 PX86PD pGuestPD = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd); 616 if (RT_UNLIKELY(!pGuestPD)) 617 { 618 int rc = pgmGstLazyMap32BitPD(pVCpu, &pGuestPD); 619 if (RT_FAILURE(rc)) 620 return NULL; 621 } 622 #endif 623 return pGuestPD; 624 } 625 626 627 /** 628 * Gets the guest page directory pointer table. 629 * 630 * @returns VBox status code. 631 * @param pVCpu The current CPU. 632 * @param ppPdpt Where to return the mapping. This is always set. 633 */ 634 DECLINLINE(int) pgmGstGetPaePDPTPtrEx(PVMCPU pVCpu, PX86PDPT *ppPdpt) 635 { 636 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 637 int rc = pgmR0DynMapGCPageOffInlined(&pVCpu->CTX_SUFF(pVM)->pgm.s, pVCpu->pgm.s.GCPhysCR3, (void **)ppPdpt); 638 if (RT_FAILURE(rc)) 639 { 640 *ppPdpt = NULL; 641 return rc; 642 } 643 #else 644 *ppPdpt = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt); 645 if (RT_UNLIKELY(!*ppPdpt)) 646 return pgmGstLazyMapPaePDPT(pVCpu, ppPdpt); 647 #endif 648 return VINF_SUCCESS; 649 } 650 651 /** 652 * Gets the guest page directory pointer table. 653 * 654 * @returns Pointer to the page directory in question. 655 * @returns NULL if the page directory is not present or on an invalid page. 656 * @param pVCpu The current CPU. 657 */ 658 DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PVMCPU pVCpu) 659 { 660 PX86PDPT pGuestPdpt; 661 int rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGuestPdpt); 662 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); 663 return pGuestPdpt; 664 } 665 666 667 /** 668 * Gets the guest page directory pointer table entry for the specified address. 669 * 670 * @returns Pointer to the page directory in question. 671 * @returns NULL if the page directory is not present or on an invalid page. 672 * @param pVCpu The current CPU 578 673 * @param GCPtr The address. 579 674 */ 580 DECLINLINE(X86PDE) pgmGstGet32bitPDE(PPGMCPU pPGM, RTGCPTR GCPtr) 581 { 675 DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PVMCPU pVCpu, RTGCPTR GCPtr) 676 { 677 AssertGCPtr32(GCPtr); 678 582 679 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 583 PCX86PD pGuestPD = NULL; 584 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD); 585 if (RT_FAILURE(rc)) 586 { 587 X86PDE ZeroPde = {0}; 588 AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPde); 589 } 590 #else 591 PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd); 592 if (RT_UNLIKELY(!pGuestPD)) 593 pGuestPD = pgmGstLazyMap32BitPD(pPGM); 594 #endif 595 return pGuestPD->a[GCPtr >> X86_PD_SHIFT]; 596 } 597 598 599 /** 600 * Gets the address of a specific page directory entry (32-bit paging). 601 * 602 * @returns Pointer the page directory entry in question. 603 * @param pPGM Pointer to the PGM instance data. 604 * @param GCPtr The address. 605 */ 606 DECLINLINE(PX86PDE) pgmGstGet32bitPDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr) 607 { 608 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 609 PX86PD pGuestPD = NULL; 610 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD); 680 PX86PDPT pGuestPDPT = NULL; 681 int rc = pgmR0DynMapGCPageOffInlined(&pVCpu->CTX_SUCC(pVM)->pgm.s, pPGM->GCPhysCR3, (void **)&pGuestPDPT); 611 682 AssertRCReturn(rc, NULL); 612 683 #else 613 PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd); 614 if (RT_UNLIKELY(!pGuestPD)) 615 pGuestPD = pgmGstLazyMap32BitPD(pPGM); 616 #endif 617 return &pGuestPD->a[GCPtr >> X86_PD_SHIFT]; 618 } 619 620 621 /** 622 * Gets the address the guest page directory (32-bit paging). 623 * 624 * @returns Pointer the page directory entry in question. 625 * @param pPGM Pointer to the PGM instance data. 626 */ 627 DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PPGMCPU pPGM) 628 { 629 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 630 PX86PD pGuestPD = NULL; 631 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD); 632 AssertRCReturn(rc, NULL); 633 #else 634 PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd); 635 if (RT_UNLIKELY(!pGuestPD)) 636 pGuestPD = pgmGstLazyMap32BitPD(pPGM); 637 #endif 638 return pGuestPD; 639 } 640 641 642 /** 643 * Gets the guest page directory pointer table. 644 * 645 * @returns Pointer to the page directory in question. 646 * @returns NULL if the page directory is not present or on an invalid page. 647 * @param pPGM Pointer to the PGM instance data. 648 */ 649 DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PPGMCPU pPGM) 650 { 651 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 652 PX86PDPT pGuestPDPT = NULL; 653 int rc = pgmR0DynMapGCPageOffInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPDPT); 654 AssertRCReturn(rc, NULL); 655 #else 656 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt); 684 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt); 657 685 if (RT_UNLIKELY(!pGuestPDPT)) 658 pGuestPDPT = pgmGstLazyMapPaePDPT(pPGM); 659 #endif 660 return pGuestPDPT; 661 } 662 663 664 /** 665 * Gets the guest page directory pointer table entry for the specified address. 666 * 667 * @returns Pointer to the page directory in question. 668 * @returns NULL if the page directory is not present or on an invalid page. 669 * @param pPGM Pointer to the PGM instance data. 670 * @param GCPtr The address. 671 */ 672 DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PPGMCPU pPGM, RTGCPTR GCPtr) 673 { 674 AssertGCPtr32(GCPtr); 675 676 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 677 PX86PDPT pGuestPDPT = 0; 678 int rc = pgmR0DynMapGCPageOffInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPDPT); 679 AssertRCReturn(rc, 0); 680 #else 681 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt); 682 if (RT_UNLIKELY(!pGuestPDPT)) 683 pGuestPDPT = pgmGstLazyMapPaePDPT(pPGM); 686 { 687 int rc = pgmGstLazyMapPaePDPT(pVCpu, &pGuestPDPT); 688 if (RT_FAILURE(rc)) 689 return NULL; 690 } 684 691 #endif 685 692 return &pGuestPDPT->a[(GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE]; 686 }687 688 689 /**690 * Gets the page directory for the specified address.691 *692 * @returns Pointer to the page directory in question.693 * @returns NULL if the page directory is not present or on an invalid page.694 * @param pPGM Pointer to the PGM instance data.695 * @param GCPtr The address.696 */697 DECLINLINE(PX86PDPAE) pgmGstGetPaePD(PPGMCPU pPGM, RTGCPTR GCPtr)698 {699 AssertGCPtr32(GCPtr);700 701 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);702 AssertReturn(pGuestPDPT, NULL);703 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;704 if (pGuestPDPT->a[iPdpt].n.u1Present)705 {706 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0707 PX86PDPAE pGuestPD = NULL;708 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);709 AssertRCReturn(rc, NULL);710 #else711 PX86PDPAE pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];712 if ( !pGuestPD713 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])714 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);715 #endif716 return pGuestPD;717 /* returning NULL is ok if we assume it's just an invalid page of some kind emulated as all 0s. (not quite true) */718 }719 return NULL;720 }721 722 723 /**724 * Gets the page directory entry for the specified address.725 *726 * @returns Pointer to the page directory entry in question.727 * @returns NULL if the page directory is not present or on an invalid page.728 * @param pPGM Pointer to the PGM instance data.729 * @param GCPtr The address.730 */731 DECLINLINE(PX86PDEPAE) pgmGstGetPaePDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)732 {733 AssertGCPtr32(GCPtr);734 735 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);736 AssertReturn(pGuestPDPT, NULL);737 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;738 if (pGuestPDPT->a[iPdpt].n.u1Present)739 {740 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;741 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0742 PX86PDPAE pGuestPD = NULL;743 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);744 AssertRCReturn(rc, NULL);745 #else746 PX86PDPAE pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];747 if ( !pGuestPD748 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])749 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);750 #endif751 return &pGuestPD->a[iPD];752 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page or something which we'll emulate as all 0s. (not quite true) */753 }754 return NULL;755 693 } 756 694 … … 761 699 * @returns The page directory entry in question. 762 700 * @returns A non-present entry if the page directory is not present or on an invalid page. 763 * @param p PGM Pointer to the PGM instance data.701 * @param pVCpu The handle of the virtual CPU. 764 702 * @param GCPtr The address. 765 703 */ 766 DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(P PGMCPU pPGM, RTGCPTR GCPtr)704 DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PVMCPU pVCpu, RTGCPTR GCPtr) 767 705 { 768 706 AssertGCPtr32(GCPtr); 769 X86PDEPAE ZeroPde = {0}; 770 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM); 707 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu); 771 708 if (RT_LIKELY(pGuestPDPT)) 772 709 { 773 710 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE; 774 if (pGuestPDPT->a[iPdpt].n.u1Present) 711 if ( pGuestPDPT->a[iPdpt].n.u1Present 712 && !(pGuestPDPT->a[iPdpt].u & pVCpu->pgm.s.fGstPaeMbzPdpeMask) ) 775 713 { 776 714 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 777 715 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 778 716 PX86PDPAE pGuestPD = NULL; 779 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD); 780 AssertRCReturn(rc, ZeroPde); 717 int rc = pgmR0DynMapGCPageInlined(&pVCpu->CTX_SUFF(pVM)->pgm.s, 718 pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, 719 (void **)&pGuestPD); 720 if (RT_SUCCESS(rc)) 721 return pGuestPD->a[iPD]; 722 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); 781 723 #else 782 PX86PDPAE pGuestPD = p PGM->CTX_SUFF(apGstPaePDs)[iPdpt];724 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt]; 783 725 if ( !pGuestPD 784 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt]) 785 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt); 786 #endif 787 return pGuestPD->a[iPD]; 726 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt]) 727 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD); 728 if (pGuestPD) 729 return pGuestPD->a[iPD]; 730 #endif 788 731 } 789 732 } 733 734 X86PDEPAE ZeroPde = {0}; 790 735 return ZeroPde; 791 736 } … … 798 743 * @returns Pointer to the page directory in question. 799 744 * @returns NULL if the page directory is not present or on an invalid page. 800 * @param p PGM Pointer to the PGM instance data.745 * @param pVCpu The current CPU. 801 746 * @param GCPtr The address. 802 747 * @param piPD Receives the index into the returned page directory 803 748 * @param pPdpe Receives the page directory pointer entry. Optional. 804 749 */ 805 DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(P PGMCPU pPGM, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)750 DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe) 806 751 { 807 752 AssertGCPtr32(GCPtr); 808 753 809 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM); 810 AssertReturn(pGuestPDPT, NULL); 754 /* The PDPE. */ 755 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu); 756 if (RT_UNLIKELY(!pGuestPDPT)) 757 return NULL; 811 758 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE; 812 759 if (pPdpe) 813 760 *pPdpe = pGuestPDPT->a[iPdpt]; 814 if (pGuestPDPT->a[iPdpt].n.u1Present) 815 { 816 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 761 if (!pGuestPDPT->a[iPdpt].n.u1Present) 762 return NULL; 763 if (RT_UNLIKELY(pVCpu->pgm.s.fGstPaeMbzPdpeMask & pGuestPDPT->a[iPdpt].u)) 764 return NULL; 765 766 /* The PDE. */ 817 767 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 818 PX86PDPAE pGuestPD = NULL; 819 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD); 820 AssertRCReturn(rc, NULL); 768 PX86PDPAE pGuestPD = NULL; 769 int rc = pgmR0DynMapGCPageInlined(&pVCpu->CTX_SUFF(pVM)->pgm.s, 770 pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, 771 (void **)&pGuestPD); 772 if (RT_FAILURE(rc)) 773 { 774 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); 775 return NULL; 776 } 821 777 #else 822 PX86PDPAE pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt]; 823 if ( !pGuestPD 824 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt]) 825 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt); 826 #endif 827 *piPD = iPD; 828 return pGuestPD; 829 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page of some kind emulated as all 0s. */ 830 } 831 return NULL; 778 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt]; 779 if ( !pGuestPD 780 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt]) 781 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD); 782 #endif 783 784 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 785 return pGuestPD; 832 786 } 833 787 … … 837 791 * Gets the page map level-4 pointer for the guest. 838 792 * 793 * @returns VBox status code. 794 * @param pVCpu The current CPU. 795 * @param ppPml4 Where to return the mapping. Always set. 796 */ 797 DECLINLINE(int) pgmGstGetLongModePML4PtrEx(PVMCPU pVCpu, PX86PML4 *ppPml4) 798 { 799 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 800 int rc = pgmR0DynMapGCPageInlined(&pVCpu->CTX_SUFF(pVM)->pgm.s, pPGM->GCPhysCR3, (void **)ppPml4); 801 if (RT_FAILURE(rc)) 802 { 803 *ppPml4 = NULL; 804 return rc; 805 } 806 #else 807 *ppPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4); 808 if (RT_UNLIKELY(!*ppPml4)) 809 return pgmGstLazyMapPml4(pVCpu, ppPml4); 810 #endif 811 return VINF_SUCCESS; 812 } 813 814 815 /** 816 * Gets the page map level-4 pointer for the guest. 817 * 839 818 * @returns Pointer to the PML4 page. 840 * @param pPGM Pointer to the PGM instance data. 841 */ 842 DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PPGMCPU pPGM) 819 * @param pVCpu The current CPU. 820 */ 821 DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PVMCPU pVCpu) 822 { 823 PX86PML4 pGuestPml4; 824 int rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGuestPml4); 825 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); 826 return pGuestPml4; 827 } 828 829 830 /** 831 * Gets the pointer to a page map level-4 entry. 832 * 833 * @returns Pointer to the PML4 entry. 834 * @param pVCpu The current CPU. 835 * @param iPml4 The index. 836 * @remarks Only used by AssertCR3. 837 */ 838 DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PVMCPU pVCpu, unsigned int iPml4) 843 839 { 844 840 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 … … 847 843 AssertRCReturn(rc, NULL); 848 844 #else 849 PX86PML4 pGuestPml4 = p PGM->CTX_SUFF(pGstAmd64Pml4);845 PX86PML4 pGuestPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4); 850 846 if (RT_UNLIKELY(!pGuestPml4)) 851 pGuestPml4 = pgmGstLazyMapPml4(pPGM); 852 Assert(pGuestPml4); 853 #endif 854 return pGuestPml4; 855 } 856 857 858 /** 859 * Gets the pointer to a page map level-4 entry. 860 * 861 * @returns Pointer to the PML4 entry. 862 * @param pPGM Pointer to the PGM instance data. 863 * @param iPml4 The index. 864 */ 865 DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PPGMCPU pPGM, unsigned int iPml4) 866 { 867 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 868 PX86PML4 pGuestPml4; 869 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPml4); 870 AssertRCReturn(rc, NULL); 871 #else 872 PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4); 873 if (RT_UNLIKELY(!pGuestPml4)) 874 pGuestPml4 = pgmGstLazyMapPml4(pPGM); 875 Assert(pGuestPml4); 847 { 848 int rc = pgmGstLazyMapPml4(pVCpu, &pGuestPml4); 849 AssertRCReturn(rc, NULL); 850 } 876 851 #endif 877 852 return &pGuestPml4->a[iPml4]; 878 }879 880 881 /**882 * Gets a page map level-4 entry.883 *884 * @returns The PML4 entry.885 * @param pPGM Pointer to the PGM instance data.886 * @param iPml4 The index.887 */888 DECLINLINE(X86PML4E) pgmGstGetLongModePML4E(PPGMCPU pPGM, unsigned int iPml4)889 {890 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0891 PX86PML4 pGuestPml4;892 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPml4);893 if (RT_FAILURE(rc))894 {895 X86PML4E ZeroPml4e = {0};896 AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPml4e);897 }898 #else899 PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);900 if (!pGuestPml4)901 pGuestPml4 = pgmGstLazyMapPml4(pPGM);902 Assert(pGuestPml4);903 #endif904 return pGuestPml4->a[iPml4];905 }906 907 908 /**909 * Gets the page directory pointer entry for the specified address.910 *911 * @returns Pointer to the page directory pointer entry in question.912 * @returns NULL if the page directory is not present or on an invalid page.913 * @param pPGM Pointer to the PGM instance data.914 * @param GCPtr The address.915 * @param ppPml4e Page Map Level-4 Entry (out)916 */917 DECLINLINE(PX86PDPE) pgmGstGetLongModePDPTPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e)918 {919 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);920 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;921 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];922 if (pPml4e->n.u1Present)923 {924 PX86PDPT pPdpt;925 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdpt);926 AssertRCReturn(rc, NULL);927 928 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;929 return &pPdpt->a[iPdpt];930 }931 return NULL;932 853 } 933 854 … … 938 859 * @returns The page directory entry in question. 939 860 * @returns A non-present entry if the page directory is not present or on an invalid page. 940 * @param p PGM Pointer to the PGM instance data.861 * @param pVCpu The current CPU. 941 862 * @param GCPtr The address. 942 * @param ppPml4e Page Map Level-4 Entry (out) 943 * @param pPdpe Page directory pointer table entry (out) 944 */ 945 DECLINLINE(X86PDEPAE) pgmGstGetLongModePDEEx(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe) 946 { 947 X86PDEPAE ZeroPde = {0}; 948 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM); 949 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 950 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4]; 951 if (pPml4e->n.u1Present) 863 */ 864 DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PVMCPU pVCpu, RTGCPTR64 GCPtr) 865 { 866 /* 867 * Note! To keep things simple, ASSUME invalid physical addresses will 868 * cause X86_TRAP_PF_RSVD. This isn't a problem until we start 869 * supporing 52-bit wide physical guest addresses. 870 */ 871 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu); 872 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 873 if ( RT_LIKELY(pGuestPml4) 874 && pGuestPml4->a[iPml4].n.u1Present 875 && !(pGuestPml4->a[iPml4].u & pVCpu->pgm.s.fGstAmd64MbzPml4eMask) ) 952 876 { 953 877 PCX86PDPT pPdptTemp; 954 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp); 955 AssertRCReturn(rc, ZeroPde); 956 957 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 958 *pPdpe = pPdptTemp->a[iPdpt]; 959 if (pPdptTemp->a[iPdpt].n.u1Present) 878 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp); 879 if (RT_SUCCESS(rc)) 960 880 { 961 PCX86PDPAE pPD; 962 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD); 963 AssertRCReturn(rc, ZeroPde); 964 965 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 966 return pPD->a[iPD]; 881 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 882 if ( pPdptTemp->a[iPdpt].n.u1Present 883 && !(pPdptTemp->a[iPdpt].u & pVCpu->pgm.s.fGstAmd64MbzPdpeMask) ) 884 { 885 PCX86PDPAE pPD; 886 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD); 887 if (RT_SUCCESS(rc)) 888 { 889 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 890 return pPD->a[iPD]; 891 } 892 } 967 893 } 968 } 969 894 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); 895 } 896 897 X86PDEPAE ZeroPde = {0}; 970 898 return ZeroPde; 971 }972 973 974 /**975 * Gets the page directory entry for the specified address.976 *977 * @returns The page directory entry in question.978 * @returns A non-present entry if the page directory is not present or on an invalid page.979 * @param pPGM Pointer to the PGM instance data.980 * @param GCPtr The address.981 */982 DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PPGMCPU pPGM, RTGCPTR64 GCPtr)983 {984 X86PDEPAE ZeroPde = {0};985 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);986 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;987 if (pGuestPml4->a[iPml4].n.u1Present)988 {989 PCX86PDPT pPdptTemp;990 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);991 AssertRCReturn(rc, ZeroPde);992 993 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;994 if (pPdptTemp->a[iPdpt].n.u1Present)995 {996 PCX86PDPAE pPD;997 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);998 AssertRCReturn(rc, ZeroPde);999 1000 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;1001 return pPD->a[iPD];1002 }1003 }1004 return ZeroPde;1005 }1006 1007 1008 /**1009 * Gets the page directory entry for the specified address.1010 *1011 * @returns Pointer to the page directory entry in question.1012 * @returns NULL if the page directory is not present or on an invalid page.1013 * @param pPGM Pointer to the PGM instance data.1014 * @param GCPtr The address.1015 */1016 DECLINLINE(PX86PDEPAE) pgmGstGetLongModePDEPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr)1017 {1018 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);1019 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;1020 if (pGuestPml4->a[iPml4].n.u1Present)1021 {1022 PCX86PDPT pPdptTemp;1023 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);1024 AssertRCReturn(rc, NULL);1025 1026 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;1027 if (pPdptTemp->a[iPdpt].n.u1Present)1028 {1029 PX86PDPAE pPD;1030 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);1031 AssertRCReturn(rc, NULL);1032 1033 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;1034 return &pPD->a[iPD];1035 }1036 }1037 return NULL;1038 899 } 1039 900 … … 1044 905 * @returns The page directory in question. 1045 906 * @returns NULL if the page directory is not present or on an invalid page. 1046 * @param p PGM Pointer to the PGM instance data.907 * @param pVCpu The current CPU. 1047 908 * @param GCPtr The address. 1048 909 * @param ppPml4e Page Map Level-4 Entry (out) … … 1050 911 * @param piPD Receives the index into the returned page directory 1051 912 */ 1052 DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD) 1053 { 1054 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM); 1055 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 1056 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4]; 1057 if (pPml4e->n.u1Present) 1058 { 1059 PCX86PDPT pPdptTemp; 1060 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp); 1061 AssertRCReturn(rc, NULL); 1062 1063 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 1064 *pPdpe = pPdptTemp->a[iPdpt]; 1065 if (pPdptTemp->a[iPdpt].n.u1Present) 1066 { 1067 PX86PDPAE pPD; 1068 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD); 1069 AssertRCReturn(rc, NULL); 1070 1071 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 1072 return pPD; 1073 } 1074 } 1075 return 0; 913 DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD) 914 { 915 /* The PMLE4. */ 916 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu); 917 if (RT_UNLIKELY(!pGuestPml4)) 918 return NULL; 919 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 920 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4]; 921 if (!pPml4e->n.u1Present) 922 return NULL; 923 if (RT_UNLIKELY(pPml4e->u & pVCpu->pgm.s.fGstAmd64MbzPml4eMask)) 924 return NULL; 925 926 /* The PDPE. */ 927 PCX86PDPT pPdptTemp; 928 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp); 929 if (RT_FAILURE(rc)) 930 { 931 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); 932 return NULL; 933 } 934 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 935 *pPdpe = pPdptTemp->a[iPdpt]; 936 if (!pPdpe->n.u1Present) 937 return NULL; 938 if (RT_UNLIKELY(pPdpe->u & pVCpu->pgm.s.fGstAmd64MbzPdpeMask)) 939 return NULL; 940 941 /* The PDE. */ 942 PX86PDPAE pPD; 943 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD); 944 if (RT_FAILURE(rc)) 945 { 946 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); 947 return NULL; 948 } 949 950 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 951 return pPD; 1076 952 } 1077 953 … … 1271 1147 return NULL; 1272 1148 return &pShwPml4->a[iPml4]; 1273 }1274 1275 1276 /**1277 * Gets the GUEST page directory pointer for the specified address.1278 *1279 * @returns The page directory in question.1280 * @returns NULL if the page directory is not present or on an invalid page.1281 * @param pPGM Pointer to the PGM instance data.1282 * @param GCPtr The address.1283 * @param piPD Receives the index into the returned page directory1284 */1285 DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, unsigned *piPD)1286 {1287 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);1288 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;1289 if (pGuestPml4->a[iPml4].n.u1Present)1290 {1291 PCX86PDPT pPdptTemp;1292 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);1293 AssertRCReturn(rc, NULL);1294 1295 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;1296 if (pPdptTemp->a[iPdpt].n.u1Present)1297 {1298 PX86PDPAE pPD;1299 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);1300 AssertRCReturn(rc, NULL);1301 1302 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;1303 return pPD;1304 }1305 }1306 return NULL;1307 1149 } 1308 1150 -
trunk/src/VBox/VMM/PGMInternal.h
r30843 r30889 91 91 92 92 /** 93 * Enables optimizations for MMIO handlers that exploits X86_TRAP_PF_RSVD and 94 * VMX_EXIT_EPT_MISCONFIG. 95 */ 96 #if 0 /* ! remember to disable before committing ! XXX TODO */ 97 # define PGM_WITH_MMIO_OPTIMIZATIONS 98 #endif 99 100 /** 93 101 * Chunk unmapping code activated on 32-bit hosts for > 1.5/2 GB guest memory support 94 102 */ … … 266 274 # define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \ 267 275 PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */ 276 #endif 277 278 /** @def PGM_GCPHYS_2_PTR_BY_VMCPU 279 * Maps a GC physical page address to a virtual address. 280 * 281 * @returns VBox status code. 282 * @param pVCpu The current CPU. 283 * @param GCPhys The GC physical address to map to a virtual one. 284 * @param ppv Where to store the virtual address. No need to cast this. 285 * 286 * @remark In RC this uses PGMGCDynMapGCPage(), so it will consume of the 287 * small page window employeed by that function. Be careful. 288 * @remark There is no need to assert on the result. 289 */ 290 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 291 # define PGM_RCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhys, ppv) \ 292 pgmR0DynMapGCPageInlined(&(pVCpu)->CTX_SUFF(pVM)->pgm.s, GCPhys, (void **)(ppv)) 293 #else 294 # define PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhys, ppv) \ 295 PGM_GCPHYS_2_PTR((pVCpu)->CTX_SUFF(pVM), GCPhys, ppv) 268 296 #endif 269 297 … … 1764 1792 /** Shw: PAE page directory; Gst: PAE page directory. */ 1765 1793 PGMPOOLKIND_PAE_PD_FOR_PAE_PD, 1766 /** Shw: PAE page directory; Gst: no paging. */1794 /** Shw: PAE page directory; Gst: no paging. Note: +NP. */ 1767 1795 PGMPOOLKIND_PAE_PD_PHYS, 1768 1796 … … 1881 1909 * It's a hack required because of REMR3NotifyHandlerPhysicalDeregister. */ 1882 1910 bool volatile fReusedFlushPending; 1883 /** Used to mark the page as dirty (write monitoring if temporarily off. */ 1911 /** Used to mark the page as dirty (write monitoring is temporarily 1912 * off). */ 1884 1913 bool fDirty; 1885 1914 … … 2269 2298 2270 2299 2300 /** 2301 * Page fault guest state for the AMD64 paging mode. 2302 */ 2303 typedef struct PGMPTWALKCORE 2304 { 2305 /** The guest virtual address that is being resolved by the walk 2306 * (input). */ 2307 RTGCPTR GCPtr; 2308 2309 /** The guest physcial address that is the result of the walk. 2310 * @remarks only valid if fSucceeded is set. */ 2311 RTGCPHYS GCPhys; 2312 2313 /** Set if the walk succeeded, i.d. GCPhys is valid. */ 2314 bool fSucceeded; 2315 /** The level problem arrised at. 2316 * PTE is level 1, PDE is level 2, PDPE is level 3, PML4 is level 4, CR3 is 2317 * level 8. This is 0 on success. */ 2318 uint8_t uLevel; 2319 /** Set if the page isn't present. */ 2320 bool fNotPresent; 2321 /** Encountered a bad physical address. */ 2322 bool fBadPhysAddr; 2323 /** Set if there was reserved bit violations. */ 2324 bool fRsvdError; 2325 /** Set if it involves a big page (2/4 MB). */ 2326 bool fBigPage; 2327 /** Set if it involves a gigantic page (1 GB). */ 2328 bool fGigantPage; 2329 #if 0 2330 /** Set if write access was attempted and not possible. */ 2331 bool fWriteError; 2332 /** Set if execute access was attempted and not possible. */ 2333 bool fExecuteError; 2334 #endif 2335 /** Unused. */ 2336 bool afUnused[3]; 2337 } PGMPTWALKCORE; 2338 2339 2340 /** 2341 * Guest page table walk for the AMD64 mode. 2342 */ 2343 typedef struct PGMPTWALKGSTAMD64 2344 { 2345 /** The common core. */ 2346 PGMPTWALKCORE Core; 2347 2348 PX86PML4 pPml4; 2349 PX86PML4E pPml4e; 2350 X86PML4E Pml4e; 2351 2352 PX86PDPT pPdpt; 2353 PX86PDPE pPdpe; 2354 X86PDPE Pdpe; 2355 2356 PX86PDPAE pPd; 2357 PX86PDEPAE pPde; 2358 X86PDEPAE Pde; 2359 2360 PX86PTPAE pPt; 2361 PX86PTEPAE pPte; 2362 X86PTEPAE Pte; 2363 } PGMPTWALKGSTAMD64; 2364 /** Pointer to a AMD64 guest page table walk. */ 2365 typedef PGMPTWALKGSTAMD64 *PPGMPTWALKGSTAMD64; 2366 /** Pointer to a const AMD64 guest page table walk. */ 2367 typedef PGMPTWALKGSTAMD64 const *PCPGMPTWALKGSTAMD64; 2368 2369 /** 2370 * Guest page table walk for the PAE mode. 2371 */ 2372 typedef struct PGMPTWALKGSTPAE 2373 { 2374 /** The common core. */ 2375 PGMPTWALKCORE Core; 2376 2377 PX86PDPT pPdpt; 2378 PX86PDPE pPdpe; 2379 X86PDPE Pdpe; 2380 2381 PX86PDPAE pPd; 2382 PX86PDEPAE pPde; 2383 X86PDEPAE Pde; 2384 2385 PX86PTPAE pPt; 2386 PX86PTEPAE pPte; 2387 X86PTEPAE Pte; 2388 } PGMPTWALKGSTPAE; 2389 /** Pointer to a PAE guest page table walk. */ 2390 typedef PGMPTWALKGSTPAE *PPGMPTWALKGSTPAE; 2391 /** Pointer to a const AMD64 guest page table walk. */ 2392 typedef PGMPTWALKGSTPAE const *PCPGMPTWALKGSTPAE; 2393 2394 /** 2395 * Guest page table walk for the 32-bit mode. 2396 */ 2397 typedef struct PGMPTWALKGST32BIT 2398 { 2399 /** The common core. */ 2400 PGMPTWALKCORE Core; 2401 2402 PX86PD pPd; 2403 PX86PDE pPde; 2404 X86PDE Pde; 2405 2406 PX86PT pPt; 2407 PX86PTE pPte; 2408 X86PTE Pte; 2409 } PGMPTWALKGST32BIT; 2410 /** Pointer to a 32-bit guest page table walk. */ 2411 typedef PGMPTWALKGST32BIT *PPGMPTWALKGST32BIT; 2412 /** Pointer to a const 32-bit guest page table walk. */ 2413 typedef PGMPTWALKGST32BIT const *PCPGMPTWALKGST32BIT; 2414 2415 2271 2416 /** @name Paging mode macros 2272 * @{ */ 2417 * @{ 2418 */ 2273 2419 #ifdef IN_RC 2274 2420 # define PGM_CTX(a,b) a##RC##b … … 2497 2643 * detection. */ 2498 2644 bool fPhysWriteMonitoringEngaged; 2645 /** Set if the CPU has less than 52-bit physical address width. 2646 * This is used */ 2647 bool fLessThan52PhysicalAddressBits; 2499 2648 /** Alignment padding. */ 2500 bool afAlignment0[ 2];2649 bool afAlignment0[1]; 2501 2650 2502 2651 /* … … 2519 2668 /** 4 MB page mask; 32 or 36 bits depending on PSE-36 (identical for all VCPUs) */ 2520 2669 RTGCPHYS GCPhys4MBPSEMask; 2670 /** Mask containing the invalid bits of a guest physical address. 2671 * @remarks this does not stop at bit 52. */ 2672 RTGCPHYS GCPhysInvAddrMask; 2521 2673 2522 2674 /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for R3. … … 2706 2858 RTGCPTR pvZeroPgRC; 2707 2859 /** @}*/ 2860 2861 /** @name The Invalid MMIO page. 2862 * This page is filled with 0xfeedface. 2863 * @{ */ 2864 /** The host physical address of the invalid MMIO page. */ 2865 RTHCPHYS HCPhysMmioPg; 2866 /** The host pysical address of the invalid MMIO page pluss all invalid 2867 * physical address bits set. This is used to trigger X86_TRAP_PF_RSVD. 2868 * @remarks Check fLessThan52PhysicalAddressBits before use. */ 2869 RTHCPHYS HCPhysInvMmioPg; 2870 /** The ring-3 mapping of the invalid MMIO page. */ 2871 RTR3PTR pvMmioPgR3; 2872 /** @} */ 2873 2708 2874 2709 2875 /** The number of handy pages. */ … … 2920 3086 * @param pPGM Pointer to PGMCPU instance data. 2921 3087 */ 2922 #define PGMCPU2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) )3088 #define PGMCPU2VM(pPGM) ( (PVM)((char*)(pPGM) - (pPGM)->offVM) ) 2923 3089 2924 3090 /** … … 2927 3093 * @param pPGM Pointer to PGMCPU instance data. 2928 3094 */ 2929 #define PGMCPU2PGM(pPGMCpu) ( (PPGM)((char*)pPGMCpu - pPGMCpu->offPGM) )3095 #define PGMCPU2PGM(pPGMCpu) ( (PPGM)((char *)(pPGMCpu) - (pPGMCpu)->offPGM) ) 2930 3096 2931 3097 /** … … 2979 3145 /** The guest's page directory, static RC mapping. */ 2980 3146 RCPTRTYPE(PX86PD) pGst32BitPdRC; 3147 /** Mask containing the MBZ bits of a big page PDE. */ 3148 uint32_t fGst32BitMbzBigPdeMask; 3149 /** Set if the page size extension (PSE) is enabled. */ 3150 bool fGst32BitPageSizeExtension; 3151 /** Alignment padding. */ 3152 bool afAlignment4[3]; 2981 3153 /** @} */ 2982 3154 … … 3009 3181 /** The physical addresses of the monitored guest page directories (PAE). */ 3010 3182 RTGCPHYS aGCPhysGstPaePDsMonitored[4]; 3183 /** Mask containing the MBZ PTE bits. */ 3184 uint64_t fGstPaeMbzPteMask; 3185 /** Mask containing the MBZ PDE bits. */ 3186 uint64_t fGstPaeMbzPdeMask; 3187 /** Mask containing the MBZ big page PDE bits. */ 3188 uint64_t fGstPaeMbzBigPdeMask; 3189 /** Mask containing the MBZ PDPE bits. */ 3190 uint64_t fGstPaeMbzPdpeMask; 3011 3191 /** @} */ 3012 3192 … … 3021 3201 RTR0PTR alignment6b; /**< alignment equalizer. */ 3022 3202 #endif 3203 /** Mask containing the MBZ PTE bits. */ 3204 uint64_t fGstAmd64MbzPteMask; 3205 /** Mask containing the MBZ PDE bits. */ 3206 uint64_t fGstAmd64MbzPdeMask; 3207 /** Mask containing the MBZ big page PDE bits. */ 3208 uint64_t fGstAmd64MbzBigPdeMask; 3209 /** Mask containing the MBZ PDPE bits. */ 3210 uint64_t fGstAmd64MbzPdpeMask; 3211 /** Mask containing the MBZ big page PDPE bits. */ 3212 uint64_t fGstAmd64MbzBigPdpeMask; 3213 /** Mask containing the MBZ PML4E bits. */ 3214 uint64_t fGstAmd64MbzPml4eMask; 3023 3215 /** @} */ 3024 3216 … … 3451 3643 int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD); 3452 3644 3453 PX86PD pgmGstLazyMap32BitPD(PPGMCPU pPGM);3454 PX86PDPT pgmGstLazyMapPaePDPT(PPGMCPU pPGM);3455 PX86PDPAE pgmGstLazyMapPaePD(PPGMCPU pPGM, uint32_t iPdpt);3456 PX86PML4 pgmGstLazyMapPml4(PPGMCPU pPGM);3645 int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd); 3646 int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt); 3647 int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd); 3648 int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4); 3457 3649 3458 3650 # if defined(VBOX_STRICT) && HC_ARCH_BITS == 64 -
trunk/src/VBox/VMM/PGMMap.cpp
r28800 r30889 1280 1280 { 1281 1281 unsigned iPDSrc; 1282 PX86PDPAE pPDSrc = pgmGstGetPaePDPtr( &pVCpu->pgm.s, (RTGCPTR32)iPDPTE << X86_PDPT_SHIFT, &iPDSrc, NULL);1282 PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(pVCpu, (RTGCPTR32)iPDPTE << X86_PDPT_SHIFT, &iPDSrc, NULL); 1283 1283 1284 1284 /* -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r30861 r30889 1006 1006 /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB 1007 1007 if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */ 1008 if ( (u Value& (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))1008 if ( (uOldEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA)) 1009 1009 != (pVCpu->cpum.s.Guest.msrEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))) 1010 1010 { … … 1013 1013 1014 1014 /* Notify PGM about NXE changes. */ 1015 if ( (u Value& MSR_K6_EFER_NXE)1015 if ( (uOldEFER & MSR_K6_EFER_NXE) 1016 1016 != (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE)) 1017 PGMNotifyNxeChanged(pVCpu, ! !(uValue& MSR_K6_EFER_NXE));1017 PGMNotifyNxeChanged(pVCpu, !(uOldEFER & MSR_K6_EFER_NXE)); 1018 1018 } 1019 1019 break; -
trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp
r30338 r30889 1035 1035 * @param pvUser Pointer to the MMIO ring-3 range entry. 1036 1036 */ 1037 int iomMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser)1037 static int iomMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser) 1038 1038 { 1039 1039 /* Take the IOM lock before performing any MMIO. */ … … 1251 1251 LogFlow(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n", 1252 1252 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip)); 1253 if (!pvUser) 1254 { 1255 int rc = iomLock(pVM); 1256 pvUser = iomMMIOGetRange(&pVM->iom.s, GCPhysFault); 1257 iomUnlock(pVM); 1258 } 1253 1259 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, uErrorCode, pCtxCore, GCPhysFault, pvUser); 1254 1260 return VBOXSTRICTRC_VAL(rcStrict); -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r30861 r30889 615 615 const bool fWrite = !!(fAccess & X86_PTE_RW); 616 616 const bool fUser = !!(fAccess & X86_PTE_US); 617 if ( !(fPageGst & X86_PTE_P)617 if ( !(fPageGst & X86_PTE_P) 618 618 || (fWrite && !(fPageGst & X86_PTE_RW)) 619 619 || (fUser && !(fPageGst & X86_PTE_US)) ) … … 633 633 { 634 634 /* 635 * Page is not present in our page tables.636 * Try to sync it!637 */635 * Page is not present in our page tables. 636 * Try to sync it! 637 */ 638 638 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US); 639 639 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US); … … 650 650 * This check is a bit paranoid, but useful. 651 651 */ 652 /* * @note this will assert when writing to monitored pages (a bit annoying actually)*/652 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */ 653 653 uint64_t fPageShw; 654 654 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL); … … 1371 1371 1372 1372 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1373 1373 1374 /** 1374 1375 * Performs the lazy mapping of the 32-bit guest PD. 1375 1376 * 1377 * @returns VBox status code. 1378 * @param pVCpu The current CPU. 1379 * @param ppPd Where to return the pointer to the mapping. This is 1380 * always set. 1381 */ 1382 int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd) 1383 { 1384 PVM pVM = pVCpu->CTX_SUFF(pVM); 1385 pgmLock(pVM); 1386 1387 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd)); 1388 1389 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK; 1390 PPGMPAGE pPage; 1391 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysCR3, &pPage); 1392 if (RT_SUCCESS(rc)) 1393 { 1394 RTHCPTR HCPtrGuestCR3; 1395 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3); 1396 if (RT_SUCCESS(rc)) 1397 { 1398 pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3; 1399 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 1400 pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3; 1401 # endif 1402 *ppPd = (PX86PD)HCPtrGuestCR3; 1403 1404 pgmUnlock(pVM); 1405 return VINF_SUCCESS; 1406 } 1407 1408 AssertRC(rc); 1409 } 1410 pgmUnlock(pVM); 1411 1412 *ppPd = NULL; 1413 return rc; 1414 } 1415 1416 1417 /** 1418 * Performs the lazy mapping of the PAE guest PDPT. 1419 * 1420 * @returns VBox status code. 1421 * @param pVCpu The current CPU. 1422 * @param ppPdpt Where to return the pointer to the mapping. This is 1423 * always set. 1424 */ 1425 int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt) 1426 { 1427 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt)); 1428 PVM pVM = pVCpu->CTX_SUFF(pVM); 1429 pgmLock(pVM); 1430 1431 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK; 1432 PPGMPAGE pPage; 1433 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysCR3, &pPage); 1434 if (RT_SUCCESS(rc)) 1435 { 1436 RTHCPTR HCPtrGuestCR3; 1437 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3); 1438 if (RT_SUCCESS(rc)) 1439 { 1440 pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3; 1441 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 1442 pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3; 1443 # endif 1444 *ppPdpt = (PX86PDPT)HCPtrGuestCR3; 1445 1446 pgmUnlock(pVM); 1447 return VINF_SUCCESS; 1448 } 1449 1450 AssertRC(rc); 1451 } 1452 1453 pgmUnlock(pVM); 1454 *ppPdpt = NULL; 1455 return rc; 1456 } 1457 1458 1459 /** 1460 * Performs the lazy mapping / updating of a PAE guest PD. 1461 * 1376 1462 * @returns Pointer to the mapping. 1377 * @param pPGM The PGM instance data. 1378 */ 1379 PX86PD pgmGstLazyMap32BitPD(PPGMCPU pPGM) 1380 { 1381 Assert(!pPGM->CTX_SUFF(pGst32BitPd)); 1382 PVM pVM = PGMCPU2VM(pPGM); 1463 * @returns VBox status code. 1464 * @param pVCpu The current CPU. 1465 * @param iPdpt Which PD entry to map (0..3). 1466 * @param ppPd Where to return the pointer to the mapping. This is 1467 * always set. 1468 */ 1469 int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd) 1470 { 1471 PVM pVM = pVCpu->CTX_SUFF(pVM); 1383 1472 pgmLock(pVM); 1384 1473 1385 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3); 1386 AssertReturn(pPage, NULL); 1387 1388 RTHCPTR HCPtrGuestCR3; 1389 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3); 1390 AssertRCReturn(rc, NULL); 1391 1392 pPGM->pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3; 1393 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 1394 pPGM->pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3; 1395 # endif 1396 1397 pgmUnlock(pVM); 1398 return pPGM->CTX_SUFF(pGst32BitPd); 1399 } 1400 1401 1402 /** 1403 * Performs the lazy mapping of the PAE guest PDPT. 1404 * 1405 * @returns Pointer to the mapping. 1406 * @param pPGM The PGM instance data. 1407 */ 1408 PX86PDPT pgmGstLazyMapPaePDPT(PPGMCPU pPGM) 1409 { 1410 Assert(!pPGM->CTX_SUFF(pGstPaePdpt)); 1411 PVM pVM = PGMCPU2VM(pPGM); 1412 pgmLock(pVM); 1413 1414 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3); 1415 AssertReturn(pPage, NULL); 1416 1417 RTHCPTR HCPtrGuestCR3; 1418 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAE_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysR3 masking isn't necessary. */ 1419 AssertRCReturn(rc, NULL); 1420 1421 pPGM->pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3; 1422 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 1423 pPGM->pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3; 1424 # endif 1425 1426 pgmUnlock(pVM); 1427 return pPGM->CTX_SUFF(pGstPaePdpt); 1428 } 1429 1430 1431 /** 1432 * Performs the lazy mapping / updating of a PAE guest PD. 1433 * 1434 * @returns Pointer to the mapping. 1435 * @param pPGM The PGM instance data. 1436 * @param iPdpt Which PD entry to map (0..3). 1437 */ 1438 PX86PDPAE pgmGstLazyMapPaePD(PPGMCPU pPGM, uint32_t iPdpt) 1439 { 1440 PVM pVM = PGMCPU2VM(pPGM); 1441 pgmLock(pVM); 1442 1443 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt); 1474 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt); 1444 1475 Assert(pGuestPDPT); 1445 1476 Assert(pGuestPDPT->a[iPdpt].n.u1Present); 1446 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK ;1447 bool const fChanged = p PGM->aGCPhysGstPaePDs[iPdpt] != GCPhys;1448 1449 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);1450 i f (RT_LIKELY(pPage))1451 {1452 int rc = VINF_SUCCESS;1477 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK_FULL; 1478 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys; 1479 1480 PPGMPAGE pPage; 1481 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage); 1482 if (RT_SUCCESS(rc)) 1483 { 1453 1484 RTRCPTR RCPtr = NIL_RTRCPTR; 1454 1485 RTHCPTR HCPtr = NIL_RTHCPTR; … … 1464 1495 if (RT_SUCCESS(rc)) 1465 1496 { 1466 p PGM->apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;1497 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr; 1467 1498 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 1468 p PGM->apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;1499 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr; 1469 1500 # endif 1470 1501 if (fChanged) 1471 1502 { 1472 p PGM->aGCPhysGstPaePDs[iPdpt] = GCPhys;1473 p PGM->apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;1503 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys; 1504 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr; 1474 1505 } 1475 1506 1507 *ppPd = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt]; 1476 1508 pgmUnlock(pVM); 1477 return pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];1509 return VINF_SUCCESS; 1478 1510 } 1479 1511 } 1480 1512 1481 1513 /* Invalid page or some failure, invalidate the entry. */ 1482 p PGM->aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;1483 p PGM->apGstPaePDsR3[iPdpt] = 0;1514 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS; 1515 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = 0; 1484 1516 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 1485 p PGM->apGstPaePDsR0[iPdpt] = 0;1517 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = 0; 1486 1518 # endif 1487 p PGM->apGstPaePDsRC[iPdpt] = 0;1519 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = 0; 1488 1520 1489 1521 pgmUnlock(pVM); 1490 return NULL; 1491 } 1522 return rc; 1523 } 1524 1492 1525 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1493 1494 1526 #if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1527 1495 1528 /** 1496 1529 * Performs the lazy mapping of the 32-bit guest PD. 1497 1530 * 1498 * @returns Pointer to the mapping. 1499 * @param pPGM The PGM instance data. 1500 */ 1501 PX86PML4 pgmGstLazyMapPml4(PPGMCPU pPGM) 1502 { 1503 Assert(!pPGM->CTX_SUFF(pGstAmd64Pml4)); 1504 PVM pVM = PGMCPU2VM(pPGM); 1531 * @returns VBox status code. 1532 * @param pVCpu The current CPU. 1533 * @param ppPml4 Where to return the pointer to the mapping. This will 1534 * always be set. 1535 */ 1536 int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4) 1537 { 1538 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4)); 1539 PVM pVM = pVCpu->CTX_SUFF(pVM); 1505 1540 pgmLock(pVM); 1506 1541 1507 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3); 1508 AssertReturn(pPage, NULL); 1509 1510 RTHCPTR HCPtrGuestCR3; 1511 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */ 1512 AssertRCReturn(rc, NULL); 1513 1514 pPGM->pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3; 1515 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 1516 pPGM->pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3; 1517 # endif 1542 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK; 1543 PPGMPAGE pPage; 1544 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysCR3, &pPage); 1545 if (RT_SUCCESS(rc)) 1546 { 1547 RTHCPTR HCPtrGuestCR3; 1548 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3); 1549 if (RT_SUCCESS(rc)) 1550 { 1551 pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3; 1552 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 1553 pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3; 1554 # endif 1555 *ppPml4 = (PX86PML4)HCPtrGuestCR3; 1556 1557 pgmUnlock(pVM); 1558 return VINF_SUCCESS; 1559 } 1560 } 1518 1561 1519 1562 pgmUnlock(pVM); 1520 return pPGM->CTX_SUFF(pGstAmd64Pml4); 1521 } 1563 *ppPml4 = NULL; 1564 return rc; 1565 } 1566 1522 1567 #endif 1523 1568 … … 1532 1577 { 1533 1578 Assert(iPdpt <= 3); 1534 return pgmGstGetPaePDPTPtr( &pVCpu->pgm.s)->a[iPdpt & 3];1579 return pgmGstGetPaePDPTPtr(pVCpu)->a[iPdpt & 3]; 1535 1580 } 1536 1581 … … 1684 1729 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 1685 1730 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal)); 1731 1732 /* 1733 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed. 1734 */ 1735 int rc = VINF_SUCCESS; 1736 RTGCPHYS GCPhysCR3; 1737 switch (pVCpu->pgm.s.enmGuestMode) 1738 { 1739 case PGMMODE_PAE: 1740 case PGMMODE_PAE_NX: 1741 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK); 1742 break; 1743 case PGMMODE_AMD64: 1744 case PGMMODE_AMD64_NX: 1745 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK); 1746 break; 1747 default: 1748 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK); 1749 break; 1750 } 1751 1752 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3) 1753 { 1754 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3; 1755 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3; 1756 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3); 1757 if (RT_LIKELY(rc == VINF_SUCCESS)) 1758 { 1759 if (pgmMapAreMappingsFloating(&pVM->pgm.s)) 1760 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3; 1761 } 1762 else 1763 { 1764 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc)); 1765 Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3)); 1766 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3; 1767 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3; 1768 if (pgmMapAreMappingsFloating(&pVM->pgm.s)) 1769 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3; 1770 } 1771 1772 if (fGlobal) 1773 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global)); 1774 else 1775 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3)); 1776 } 1777 else 1778 { 1779 # ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT 1780 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1781 if (pPool->cDirtyPages) 1782 { 1783 pgmLock(pVM); 1784 pgmPoolResetDirtyPages(pVM); 1785 pgmUnlock(pVM); 1786 } 1787 # endif 1788 /* 1789 * Check if we have a pending update of the CR3 monitoring. 1790 */ 1791 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3) 1792 { 1793 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3; 1794 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled); 1795 } 1796 if (fGlobal) 1797 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global)); 1798 else 1799 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3)); 1800 } 1801 1802 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a); 1803 return rc; 1804 } 1805 1806 1807 /** 1808 * Performs and schedules necessary updates following a CR3 load or reload when 1809 * using nested or extended paging. 1810 * 1811 * This API is an alterantive to PDMFlushTLB that avoids actually flushing the 1812 * TLB and triggering a SyncCR3. 1813 * 1814 * This will normally involve mapping the guest PD or nPDPT 1815 * 1816 * @returns VBox status code. 1817 * @retval VINF_SUCCESS. 1818 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring 1819 * requires a CR3 sync. This can safely be ignored and overridden since 1820 * the FF will be set too then.) 1821 * @param pVCpu VMCPU handle. 1822 * @param cr3 The new cr3. 1823 */ 1824 VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3) 1825 { 1826 PVM pVM = pVCpu->CTX_SUFF(pVM); 1827 1828 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3)); 1829 1830 /* We assume we're only called in nested paging mode. */ 1831 Assert(HWACCMIsNestedPagingActive(pVM) || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT); 1832 Assert(pVM->pgm.s.fMappingsDisabled); 1833 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)); 1686 1834 1687 1835 /* … … 1704 1852 break; 1705 1853 } 1706 1707 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)1708 {1709 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;1710 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;1711 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);1712 if (RT_LIKELY(rc == VINF_SUCCESS))1713 {1714 if (pgmMapAreMappingsFloating(&pVM->pgm.s))1715 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;1716 }1717 else1718 {1719 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));1720 Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));1721 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;1722 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;1723 if (pgmMapAreMappingsFloating(&pVM->pgm.s))1724 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;1725 }1726 1727 if (fGlobal)1728 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));1729 else1730 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));1731 }1732 else1733 {1734 # ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT1735 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);1736 if (pPool->cDirtyPages)1737 {1738 pgmLock(pVM);1739 pgmPoolResetDirtyPages(pVM);1740 pgmUnlock(pVM);1741 }1742 # endif1743 /*1744 * Check if we have a pending update of the CR3 monitoring.1745 */1746 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)1747 {1748 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;1749 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);1750 }1751 if (fGlobal)1752 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));1753 else1754 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));1755 }1756 1757 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);1758 return rc;1759 }1760 1761 1762 /**1763 * Performs and schedules necessary updates following a CR3 load or reload when1764 * using nested or extended paging.1765 *1766 * This API is an alterantive to PDMFlushTLB that avoids actually flushing the1767 * TLB and triggering a SyncCR3.1768 *1769 * This will normally involve mapping the guest PD or nPDPT1770 *1771 * @returns VBox status code.1772 * @retval VINF_SUCCESS.1773 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring1774 * requires a CR3 sync. This can safely be ignored and overridden since1775 * the FF will be set too then.)1776 * @param pVCpu VMCPU handle.1777 * @param cr3 The new cr3.1778 */1779 VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)1780 {1781 PVM pVM = pVCpu->CTX_SUFF(pVM);1782 1783 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));1784 1785 /* We assume we're only called in nested paging mode. */1786 Assert(HWACCMIsNestedPagingActive(pVM) || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);1787 Assert(pVM->pgm.s.fMappingsDisabled);1788 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));1789 1790 /*1791 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.1792 */1793 int rc = VINF_SUCCESS;1794 RTGCPHYS GCPhysCR3;1795 switch (pVCpu->pgm.s.enmGuestMode)1796 {1797 case PGMMODE_PAE:1798 case PGMMODE_PAE_NX:1799 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);1800 break;1801 case PGMMODE_AMD64:1802 case PGMMODE_AMD64_NX:1803 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);1804 break;1805 default:1806 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);1807 break;1808 }1809 1854 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3) 1810 1855 { … … 2113 2158 VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe) 2114 2159 { 2115 /* later */ 2160 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe)); 2161 if (fNxe) 2162 { 2163 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */ 2164 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX; 2165 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX; 2166 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX; 2167 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */ 2168 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX; 2169 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX; 2170 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX; 2171 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX; 2172 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX; 2173 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX; 2174 } 2175 else 2176 { 2177 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */ 2178 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX; 2179 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX; 2180 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX; 2181 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */ 2182 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX; 2183 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX; 2184 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX; 2185 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX; 2186 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX; 2187 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX; 2188 } 2116 2189 } 2117 2190 -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r30325 r30889 3 3 * VBox - Page Manager, Shadow+Guest Paging Template - All context code. 4 4 * 5 * This file is a big challenge! 5 * @remarks The nested page tables on AMD makes use of PGM_SHW_TYPE in 6 * {PGM_TYPE_AMD64, PGM_TYPE_PAE and PGM_TYPE_32BIT} and PGM_GST_TYPE 7 * set to PGM_TYPE_PROT. Half of the code in this file is not 8 * exercised with PGM_SHW_TYPE set to PGM_TYPE_NESTED. 9 * 10 * @remarks Extended page tables (intel) are built with PGM_GST_TYPE set to 11 * PGM_TYPE_PROT (and PGM_SHW_TYPE set to PGM_TYPE_EPT). 12 * 13 * @remarks This file is one big \#ifdef-orgy! 14 * 6 15 */ 7 16 8 17 /* 9 * Copyright (C) 2006-20 07Oracle Corporation18 * Copyright (C) 2006-2010 Oracle Corporation 10 19 * 11 20 * This file is part of VirtualBox Open Source Edition (OSE), as … … 17 26 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. 18 27 */ 28 19 29 20 30 /******************************************************************************* … … 40 50 41 51 42 /* Filter out some illegal combinations of guest and shadow paging, so we can remove redundant checks inside functions. */ 52 /* 53 * Filter out some illegal combinations of guest and shadow paging, so we can 54 * remove redundant checks inside functions. 55 */ 43 56 #if PGM_GST_TYPE == PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT 44 57 # error "Invalid combination; PAE guest implies PAE shadow" … … 83 96 # endif 84 97 85 # if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \ 98 # if ( PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT \ 99 || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \ 86 100 && PGM_SHW_TYPE != PGM_TYPE_NESTED \ 87 101 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT) … … 106 120 # if PGM_GST_TYPE == PGM_TYPE_32BIT 107 121 const unsigned iPDSrc = pvFault >> GST_PD_SHIFT; 108 PGSTPD pPDSrc = pgmGstGet32bitPDPtr( &pVCpu->pgm.s);122 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(pVCpu); 109 123 110 124 # elif PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64 … … 113 127 unsigned iPDSrc = 0; /* initialized to shut up gcc */ 114 128 X86PDPE PdpeSrc; 115 PGSTPD pPDSrc = pgmGstGetPaePDPtr( &pVCpu->pgm.s, pvFault, &iPDSrc, &PdpeSrc);129 PGSTPD pPDSrc = pgmGstGetPaePDPtr(pVCpu, pvFault, &iPDSrc, &PdpeSrc); 116 130 117 131 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 118 132 unsigned iPDSrc = 0; /* initialized to shut up gcc */ 119 PX86PML4E pPml4eSrc ;133 PX86PML4E pPml4eSrc = NULL; /* ditto */ 120 134 X86PDPE PdpeSrc; 121 135 PGSTPD pPDSrc; 122 136 123 pPDSrc = pgmGstGetLongModePDPtr( &pVCpu->pgm.s, pvFault, &pPml4eSrc, &PdpeSrc, &iPDSrc);137 pPDSrc = pgmGstGetLongModePDPtr(pVCpu, pvFault, &pPml4eSrc, &PdpeSrc, &iPDSrc); 124 138 Assert(pPml4eSrc); 125 139 # endif … … 158 172 # endif 159 173 160 /* First check for a genuine guest page fault. */ 174 /* 175 * First check for a genuine guest page fault. 176 */ 177 /** @todo This duplicates the page table walk we're doing below. Need to 178 * find some way to avoid this double work, probably by caching 179 * the data. */ 161 180 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 162 181 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e); … … 165 184 if (rc == VINF_EM_RAW_GUEST_TRAP) 166 185 { 167 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) 168 = &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; }); 186 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; }); 169 187 return rc; 170 188 } … … 175 193 pgmLock(pVM); 176 194 177 /* Fetch the guest PDE */ 195 /* 196 * Fetch the guest PDE, PDPE and PML4E. 197 */ 178 198 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 179 199 GSTPDE PdeSrc = pPDSrc->a[iPDSrc]; … … 283 303 if ( !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */ 284 304 && !pPDDst->a[iPDDst].n.u1Present 285 )305 ) 286 306 { 287 307 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2SyncPT; }); … … 390 410 # endif 391 411 ) 392 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS( PdeSrc)412 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(pVM, PdeSrc) 393 413 | ((RTGCPHYS)pvFault & (GST_BIG_PAGE_OFFSET_MASK ^ PAGE_OFFSET_MASK)); 394 414 else … … 1098 1118 */ 1099 1119 # if PGM_GST_TYPE == PGM_TYPE_32BIT 1100 PGSTPD pPDSrc = pgmGstGet32bitPDPtr( &pVCpu->pgm.s);1120 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(pVCpu); 1101 1121 const unsigned iPDSrc = GCPtrPage >> GST_PD_SHIFT; 1102 1122 GSTPDE PdeSrc = pPDSrc->a[iPDSrc]; … … 1105 1125 # if PGM_GST_TYPE == PGM_TYPE_PAE 1106 1126 X86PDPE PdpeSrc; 1107 PX86PDPAE pPDSrc = pgmGstGetPaePDPtr( &pVCpu->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc);1127 PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(pVCpu, GCPtrPage, &iPDSrc, &PdpeSrc); 1108 1128 # else /* AMD64 */ 1109 1129 PX86PML4E pPml4eSrc; 1110 1130 X86PDPE PdpeSrc; 1111 PX86PDPAE pPDSrc = pgmGstGetLongModePDPtr( &pVCpu->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);1131 PX86PDPAE pPDSrc = pgmGstGetLongModePDPtr(pVCpu, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc); 1112 1132 # endif 1113 1133 GSTPDE PdeSrc; … … 1224 1244 /* Before freeing the page, check if anything really changed. */ 1225 1245 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, PdeDst.u & SHW_PDE_PG_MASK); 1226 RTGCPHYS GCPhys = GST_GET_PDE_BIG_PG_GCPHYS( PdeSrc);1246 RTGCPHYS GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(pVM, PdeSrc); 1227 1247 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 1228 1248 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/ … … 1384 1404 1385 1405 /** 1406 * Modifies a shadow PTE to account for access handlers. 1407 * 1408 * @param pVM The VM handle. 1409 * @param pPage The page in question. 1410 * @param fPteSrc The flags of the source PTE. 1411 * @param pPteDst The shadow PTE (output). 1412 */ 1413 DECLINLINE(void) PGM_BTH_NAME(SyncHandlerPte)(PVM pVM, PCPGMPAGE pPage, uint32_t fPteSrc, PSHWPTE pPteDst) 1414 { 1415 /** @todo r=bird: Are we actually handling dirty and access bits for pages with access handlers correctly? No. 1416 * Update: \#PF should deal with this before or after calling the handlers. It has all the info to do the job efficiently. */ 1417 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) 1418 { 1419 #if PGM_SHW_TYPE == PGM_TYPE_EPT 1420 pPteDst->u = PGM_PAGE_GET_HCPHYS(pPage); 1421 pPteDst->n.u1Present = 1; 1422 pPteDst->n.u1Execute = 1; 1423 pPteDst->n.u1IgnorePAT = 1; 1424 pPteDst->n.u3EMT = VMX_EPT_MEMTYPE_WB; 1425 /* PteDst.n.u1Write = 0 && PteDst.n.u1Size = 0 */ 1426 #else 1427 pPteDst->u = (fPteSrc & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW)) 1428 | PGM_PAGE_GET_HCPHYS(pPage); 1429 #endif 1430 } 1431 #ifdef PGM_WITH_MMIO_OPTIMIZATIONS 1432 # if PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64 1433 else if ( PGM_PAGE_IS_MMIO(pPage) 1434 # if PGM_SHW_TYPE != PGM_TYPE_EPT 1435 && ( (fPteSrc & (X86_PTE_RW /*| X86_PTE_D | X86_PTE_A*/ | X86_PTE_US )) /* #PF handles D & A first. */ 1436 == (X86_PTE_RW /*| X86_PTE_D | X86_PTE_A*/) 1437 || BTH_IS_NP_ACTIVE(pVM) ) 1438 # endif 1439 # if PGM_SHW_TYPE == PGM_TYPE_AMD64 1440 && pVM->pgm.s.fLessThan52PhysicalAddressBits 1441 # endif 1442 ) 1443 { 1444 LogFlow(("SyncHandlerPte: MMIO page -> invalid \n")); 1445 # if PGM_SHW_TYPE == PGM_TYPE_EPT 1446 /* 25.2.3.1: Reserved physical address bit -> EPT Misconfiguration (exit 49) */ 1447 pPteDst->u = pVM->pgm.s.HCPhysInvMmioPg; 1448 /* 25.2.3.1: bits 2:0 = 010b -> EPT Misconfiguration (exit 49) */ 1449 pPteDst->n.u1Present = 0; 1450 pPteDst->n.u1Write = 1; 1451 pPteDst->n.u1Execute = 0; 1452 /* 25.2.3.1: leaf && 2:0 != 0 && u3Emt in {2, 3, 7} -> EPT Misconfiguration */ 1453 pPteDst->n.u3EMT = 7; 1454 # else 1455 /* Set high page frame bits that MBZ (bankers on PAE, CPU dependent on AMD64). */ 1456 pPteDst->u = pVM->pgm.s.HCPhysInvMmioPg | X86_PTE_PAE_MBZ_MASK_NO_NX | X86_PTE_P; 1457 # endif 1458 } 1459 # endif 1460 #endif /* PGM_WITH_MMIO_OPTIMIZATIONS */ 1461 else 1462 { 1463 LogFlow(("SyncHandlerPte: monitored page (%R[pgmpage]) -> mark not present\n", pPage)); 1464 pPteDst->u = 0; 1465 } 1466 /** @todo count these kinds of entries. */ 1467 } 1468 1469 1470 /** 1386 1471 * Creates a 4K shadow page for a guest page. 1387 1472 * 1388 1473 * For 4M pages the caller must convert the PDE4M to a PTE, this includes adjusting the 1389 * physical address. The PdeSrc argument only the flags are used. No page structured1390 * will be mapped in this function.1474 * physical address. The PdeSrc argument only the flags are used. No page 1475 * structured will be mapped in this function. 1391 1476 * 1392 1477 * @param pVCpu The VMCPU handle. … … 1400 1485 * @remark Not used for 2/4MB pages! 1401 1486 */ 1402 DECLINLINE(void) PGM_BTH_NAME(SyncPageWorker)(PVMCPU pVCpu, PSHWPTE pPteDst, GSTPDE PdeSrc, GSTPTE PteSrc, PPGMPOOLPAGE pShwPage, unsigned iPTDst) 1487 DECLINLINE(void) PGM_BTH_NAME(SyncPageWorker)(PVMCPU pVCpu, PSHWPTE pPteDst, GSTPDE PdeSrc, GSTPTE PteSrc, 1488 PPGMPOOLPAGE pShwPage, unsigned iPTDst) 1403 1489 { 1404 if (PteSrc.n.u1Present) 1490 if ( PteSrc.n.u1Present 1491 && GST_IS_PTE_VALID(pVCpu, PteSrc)) 1405 1492 { 1406 1493 PVM pVM = pVCpu->CTX_SUFF(pVM); 1407 1494 1408 # if defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT)\1409 && PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)\1410 && (PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64)1495 # if defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT) \ 1496 && PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) \ 1497 && (PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) 1411 1498 if (pShwPage->fDirty) 1412 1499 { … … 1425 1512 if (RT_SUCCESS(rc)) 1426 1513 { 1427 /* Ignore ballooned pages. Don't return errors or use a fatal assert here as part of a shadow sync range might included ballooned pages. */ 1514 /* Ignore ballooned pages. 1515 Don't return errors or use a fatal assert here as part of a 1516 shadow sync range might included ballooned pages. */ 1428 1517 if (PGM_PAGE_IS_BALLOONED(pPage)) 1518 { 1519 Assert(!pPteDst->n.u1Present); /** @todo user tracking needs updating if this triggers. */ 1429 1520 return; 1521 } 1430 1522 1431 1523 #ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC 1432 /* Try to make the page writable if necessary. */1524 /* Make the page writable if necessary. */ 1433 1525 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM 1434 1526 && ( PGM_PAGE_IS_ZERO(pPage) … … 1450 1542 #endif 1451 1543 1452 /** @todo investiage PWT, PCD and PAT. */1453 1544 /* 1454 1545 * Make page table entry. … … 1456 1547 SHWPTE PteDst; 1457 1548 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 1458 { 1459 /** @todo r=bird: Are we actually handling dirty and access bits for pages with access handlers correctly? No. */ 1460 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) 1461 { 1462 #if PGM_SHW_TYPE == PGM_TYPE_EPT 1463 PteDst.u = PGM_PAGE_GET_HCPHYS(pPage); 1464 PteDst.n.u1Present = 1; 1465 PteDst.n.u1Execute = 1; 1466 PteDst.n.u1IgnorePAT = 1; 1467 PteDst.n.u3EMT = VMX_EPT_MEMTYPE_WB; 1468 /* PteDst.n.u1Write = 0 && PteDst.n.u1Size = 0 */ 1469 #else 1470 PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW)) 1471 | PGM_PAGE_GET_HCPHYS(pPage); 1472 #endif 1473 } 1474 else 1475 { 1476 LogFlow(("SyncPageWorker: monitored page (%RHp) -> mark not present\n", PGM_PAGE_GET_HCPHYS(pPage))); 1477 PteDst.u = 0; 1478 } 1479 /** @todo count these two kinds. */ 1480 } 1549 PGM_BTH_NAME(SyncHandlerPte)(pVM, pPage, 1550 PteSrc.u & ~( X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT 1551 | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW), 1552 &PteDst); 1481 1553 else 1482 1554 { … … 1492 1564 PteDst.u = 0; 1493 1565 } 1494 else1495 1566 /* 1496 1567 * If the page is not flagged as dirty and is writable, then make it read-only, so we can set the dirty bit 1497 1568 * when the page is modified. 1498 1569 */ 1499 if (!PteSrc.n.u1Dirty && (PdeSrc.n.u1Write & PteSrc.n.u1Write))1570 else if (!PteSrc.n.u1Dirty && (PdeSrc.n.u1Write & PteSrc.n.u1Write)) 1500 1571 { 1501 1572 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPage)); … … 1521 1592 #endif 1522 1593 } 1523 } 1524 1525 /*1526 * Make sure only allocated pages are mapped writable.1527 */1528 if ( PteDst.n.u1Write1529 && PteDst.n.u1Present1530 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)1531 {1532 /* Still applies to shared pages. */1533 Assert(!PGM_PAGE_IS_ZERO(pPage));1534 PteDst.n.u1Write = 0; /** @todo this isn't quite working yet. */1535 Log3(("SyncPageWorker: write-protecting %RGp pPage=%R[pgmpage]at iPTDst=%d\n", (RTGCPHYS)(PteSrc.u & X86_PTE_PAE_PG_MASK), pPage, iPTDst));1594 1595 /* 1596 * Make sure only allocated pages are mapped writable. 1597 */ 1598 if ( PteDst.n.u1Write 1599 && PteDst.n.u1Present 1600 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED) 1601 { 1602 /* Still applies to shared pages. */ 1603 Assert(!PGM_PAGE_IS_ZERO(pPage)); 1604 PteDst.n.u1Write = 0; /** @todo this isn't quite working yet. Why, isn't it? */ 1605 Log3(("SyncPageWorker: write-protecting %RGp pPage=%R[pgmpage]at iPTDst=%d\n", (RTGCPHYS)(PteSrc.u & X86_PTE_PAE_PG_MASK), pPage, iPTDst)); 1606 } 1536 1607 } 1537 1608 … … 1564 1635 #endif 1565 1636 ASMAtomicWriteSize(pPteDst, PteDst.u); 1637 return; 1566 1638 } 1567 /* else MMIO or invalid page, we must handle them manually in the #PF handler. */ 1568 /** @todo count these. */ 1569 } 1639 1640 /** @todo count these three different kinds. */ 1641 Log2(("SyncPageWorker: invalid address in Pte\n")); 1642 } 1643 else if (!PteSrc.n.u1Present) 1644 Log2(("SyncPageWorker: page not present in Pte\n")); 1570 1645 else 1571 { 1572 /* 1573 * Page not-present. 1574 */ 1575 Log2(("SyncPageWorker: page not present in Pte\n")); 1576 /* Keep user track up to date. */ 1577 if (pPteDst->n.u1Present) 1578 { 1579 Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", (uint64_t)pPteDst->u)); 1580 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVCpu, pShwPage, pPteDst->u & SHW_PTE_PG_MASK, iPTDst); 1581 } 1582 ASMAtomicWriteSize(pPteDst, 0); 1583 /** @todo count these. */ 1584 } 1646 Log2(("SyncPageWorker: invalid Pte\n")); 1647 1648 /* 1649 * The page is not present or the PTE is bad. Replace the shadow PTE by 1650 * an empty entry, making sure to keep the user tracking up to date. 1651 */ 1652 if (pPteDst->n.u1Present) 1653 { 1654 Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", (uint64_t)pPteDst->u)); 1655 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVCpu, pShwPage, pPteDst->u & SHW_PTE_PG_MASK, iPTDst); 1656 } 1657 ASMAtomicWriteSize(pPteDst, 0); 1585 1658 } 1586 1659 … … 1591 1664 * There are no conflicts at this point, neither is there any need for 1592 1665 * page table allocations. 1666 * 1667 * When called in PAE or AMD64 guest mode, the guest PDPE shall be valid. 1668 * When called in AMD64 guest mode, the guest PML4E shall be valid. 1593 1669 * 1594 1670 * @returns VBox status code. … … 1686 1762 */ 1687 1763 # if PGM_GST_TYPE == PGM_TYPE_32BIT 1688 const bool fBigPage = PdeSrc.b.u1Size && CPUMIsGuestPageSizeExtEnabled(pVCpu);1764 const bool fBigPage = PdeSrc.b.u1Size && CPUMIsGuestPageSizeExtEnabled(pVCpu); 1689 1765 # else 1690 const bool fBigPage = PdeSrc.b.u1Size; 1691 # endif 1766 const bool fBigPage = PdeSrc.b.u1Size; 1767 # endif 1768 const bool fPdeValid = !fBigPage ? GST_IS_PDE_VALID(pVCpu, PdeSrc) : GST_IS_BIG_PDE_VALID(pVCpu, PdeSrc); 1692 1769 RTGCPHYS GCPhys; 1693 1770 if (!fBigPage) … … 1701 1778 else 1702 1779 { 1703 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS( PdeSrc);1780 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(pVM, PdeSrc); 1704 1781 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 1705 1782 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/ … … 1707 1784 # endif 1708 1785 } 1709 if ( pShwPage->GCPhys == GCPhys 1786 if ( fPdeValid 1787 && pShwPage->GCPhys == GCPhys 1710 1788 && PdeSrc.n.u1Present 1711 && (PdeSrc.n.u1User == PdeDst.n.u1User)1789 && PdeSrc.n.u1User == PdeDst.n.u1User 1712 1790 && (PdeSrc.n.u1Write == PdeDst.n.u1Write || !PdeDst.n.u1Write) 1713 1791 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE) … … 1819 1897 */ 1820 1898 /* Calculate the GC physical address of this 4KB shadow page. */ 1821 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS( PdeSrc) | (GCPtrPage & GST_BIG_PAGE_OFFSET_MASK);1899 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(pVM, PdeSrc) | (GCPtrPage & GST_BIG_PAGE_OFFSET_MASK); 1822 1900 /* Find ram range. */ 1823 1901 PPGMPAGE pPage; … … 1852 1930 */ 1853 1931 SHWPTE PteDst; 1854 PteDst.u = (PdeSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))1855 | PGM_PAGE_GET_HCPHYS(pPage);1856 1932 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 1857 { 1858 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) 1859 PteDst.n.u1Write = 0; 1860 else 1861 PteDst.u = 0; 1862 } 1933 PGM_BTH_NAME(SyncHandlerPte)(pVM, pPage, 1934 PdeSrc.u & ~( X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK 1935 | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT), 1936 &PteDst); 1937 else 1938 PteDst.u = (PdeSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT)) 1939 | PGM_PAGE_GET_HCPHYS(pPage); 1863 1940 1864 1941 const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK; … … 1909 1986 } 1910 1987 else 1988 { 1911 1989 LogFlow(("PGM_GCPHYS_2_PTR %RGp (big) failed with %Rrc\n", GCPhys, rc)); 1990 /** @todo must wipe the shadow page table in this case. */ 1991 } 1912 1992 } 1913 1993 # if defined(IN_RC) … … 1917 1997 return VINF_SUCCESS; 1918 1998 } 1999 1919 2000 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPagePDNAs)); 1920 2001 } 1921 else 2002 else if (fPdeValid) 1922 2003 { 1923 2004 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPagePDOutOfSync)); … … 1925 2006 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u, pShwPage->GCPhys, GCPhys)); 1926 2007 } 2008 else 2009 { 2010 /// @todo STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPagePDOutOfSync)); 2011 Log2(("SyncPage: Bad PDE at %RGp PdeSrc=%RX64 PdeDst=%RX64 (GCPhys %RGp vs %RGp)\n", 2012 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u, pShwPage->GCPhys, GCPhys)); 2013 } 1927 2014 1928 2015 /* 1929 * Mark the PDE not present. Restart the instruction and let #PF call SyncPT.2016 * Mark the PDE not present. Restart the instruction and let #PF call SyncPT. 1930 2017 * Yea, I'm lazy. 1931 2018 */ … … 1939 2026 PGM_INVL_VCPU_TLBS(pVCpu); 1940 2027 return VINF_PGM_SYNCPAGE_MODIFIED_PDE; 2028 1941 2029 1942 2030 #elif (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \ … … 1949 2037 * Get the shadow PDE, find the shadow page table in the pool. 1950 2038 */ 1951 # if PGM_SHW_TYPE == PGM_TYPE_32BIT2039 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 1952 2040 X86PDE PdeDst = pgmShwGet32BitPDE(&pVCpu->pgm.s, GCPtrPage); 1953 2041 1954 # elif PGM_SHW_TYPE == PGM_TYPE_PAE2042 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 1955 2043 X86PDEPAE PdeDst = pgmShwGetPaePDE(&pVCpu->pgm.s, GCPtrPage); 1956 2044 1957 # elif PGM_SHW_TYPE == PGM_TYPE_AMD642045 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 1958 2046 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK); 1959 2047 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; NOREF(iPdpt); … … 1966 2054 Assert(pPDDst && pPdptDst); 1967 2055 PdeDst = pPDDst->a[iPDDst]; 1968 # elif PGM_SHW_TYPE == PGM_TYPE_EPT2056 # elif PGM_SHW_TYPE == PGM_TYPE_EPT 1969 2057 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK); 1970 2058 PEPTPD pPDDst; … … 1979 2067 Assert(pPDDst); 1980 2068 PdeDst = pPDDst->a[iPDDst]; 1981 # endif2069 # endif 1982 2070 /* In the guest SMP case we could have blocked while another VCPU reused this page table. */ 1983 2071 if (!PdeDst.n.u1Present) … … 2024 2112 if (!pPTDst->a[iPTDst].n.u1Present) 2025 2113 { 2026 GSTPTE PteSrc;2027 2028 2114 RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT); 2115 GSTPTE PteSrc; 2029 2116 2030 2117 /* Fake the page table entry */ … … 2056 2143 # endif /* PGM_SYNC_N_PAGES */ 2057 2144 { 2058 GSTPTE PteSrc;2059 const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;2060 RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT);2145 const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK; 2146 RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT); 2147 GSTPTE PteSrc; 2061 2148 2062 2149 /* Fake the page table entry */ … … 2087 2174 2088 2175 #if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 2176 2089 2177 /** 2090 * Investigate page fault and handle write protection page faults caused by 2091 * dirty bit tracking. 2178 * CheckPageFault helper for returning a page fault indicating a non-present 2179 * (NP) entry in the page translation structures. 2180 * 2181 * @returns VINF_EM_RAW_GUEST_TRAP. 2182 * @param pVCpu The virtual CPU to operate on. 2183 * @param uErr The error code of the shadow fault. Corrections to 2184 * TRPM's copy will be made if necessary. 2185 * @param GCPtrPage For logging. 2186 * @param uPageFaultLevel For logging. 2187 */ 2188 DECLINLINE(int) PGM_BTH_NAME(CheckPageFaultReturnNP)(PVMCPU pVCpu, uint32_t uErr, RTGCPTR GCPtrPage, unsigned uPageFaultLevel) 2189 { 2190 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF)); 2191 AssertMsg(!(uErr & X86_TRAP_PF_P), ("%#x\n", uErr)); 2192 AssertMsg(!(uErr & X86_TRAP_PF_RSVD), ("%#x\n", uErr)); 2193 if (uErr & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) 2194 TRPMSetErrorCode(pVCpu, uErr & ~(X86_TRAP_PF_RSVD | X86_TRAP_PF_P)); 2195 2196 Log(("CheckPageFault: real page fault (notp) at %RGv (%d)\n", GCPtrPage, uPageFaultLevel)); 2197 return VINF_EM_RAW_GUEST_TRAP; 2198 } 2199 2200 2201 /** 2202 * CheckPageFault helper for returning a page fault indicating a reserved bit 2203 * (RSVD) error in the page translation structures. 2204 * 2205 * @returns VINF_EM_RAW_GUEST_TRAP. 2206 * @param pVCpu The virtual CPU to operate on. 2207 * @param uErr The error code of the shadow fault. Corrections to 2208 * TRPM's copy will be made if necessary. 2209 * @param GCPtrPage For logging. 2210 * @param uPageFaultLevel For logging. 2211 */ 2212 DECLINLINE(int) PGM_BTH_NAME(CheckPageFaultReturnRSVD)(PVMCPU pVCpu, uint32_t uErr, RTGCPTR GCPtrPage, unsigned uPageFaultLevel) 2213 { 2214 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF)); 2215 if ((uErr & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) != (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) 2216 TRPMSetErrorCode(pVCpu, uErr | X86_TRAP_PF_RSVD | X86_TRAP_PF_P); 2217 2218 Log(("CheckPageFault: real page fault (rsvd) at %RGv (%d)\n", GCPtrPage, uPageFaultLevel)); 2219 return VINF_EM_RAW_GUEST_TRAP; 2220 } 2221 2222 2223 /** 2224 * CheckPageFault helper for returning a page protection fault (P). 2225 * 2226 * @returns VINF_EM_RAW_GUEST_TRAP. 2227 * @param pVCpu The virtual CPU to operate on. 2228 * @param uErr The error code of the shadow fault. Corrections to 2229 * TRPM's copy will be made if necessary. 2230 * @param GCPtrPage For logging. 2231 * @param uPageFaultLevel For logging. 2232 */ 2233 DECLINLINE(int) PGM_BTH_NAME(CheckPageFaultReturnProt)(PVMCPU pVCpu, uint32_t uErr, RTGCPTR GCPtrPage, unsigned uPageFaultLevel) 2234 { 2235 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF)); 2236 AssertMsg(uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_US | X86_TRAP_PF_ID), ("%#x\n", uErr)); 2237 if ((uErr & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) != X86_TRAP_PF_P) 2238 TRPMSetErrorCode(pVCpu, (uErr & ~X86_TRAP_PF_RSVD) | X86_TRAP_PF_P); 2239 2240 Log(("CheckPageFault: real page fault (prot) at %RGv (%d)\n", GCPtrPage, uPageFaultLevel)); 2241 return VINF_EM_RAW_GUEST_TRAP; 2242 } 2243 2244 2245 /** 2246 * Investigate a page fault to identify ones targetted at the guest and to 2247 * handle write protection page faults caused by dirty bit tracking. 2248 * 2249 * This will do detect invalid entries and raise X86_TRAP_PF_RSVD. 2092 2250 * 2093 2251 * @returns VBox status code. 2094 2252 * @param pVCpu The VMCPU handle. 2095 * @param uErr Page fault error code. 2253 * @param uErr Page fault error code. The X86_TRAP_PF_RSVD flag 2254 * cannot be trusted as it is used for MMIO optimizations. 2096 2255 * @param pPdeSrc Guest page directory entry. 2097 2256 * @param GCPtrPage Guest context page address. … … 2099 2258 PGM_BTH_DECL(int, CheckPageFault)(PVMCPU pVCpu, uint32_t uErr, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage) 2100 2259 { 2101 bool fUserLevelFault = !!(uErr & X86_TRAP_PF_US); 2102 bool fWriteFault = !!(uErr & X86_TRAP_PF_RW); 2103 bool fMaybeWriteProtFault = fWriteFault && (fUserLevelFault || CPUMIsGuestR0WriteProtEnabled(pVCpu)); 2260 bool fUserLevelFault = !!(uErr & X86_TRAP_PF_US); 2261 bool fWriteFault = !!(uErr & X86_TRAP_PF_RW); 2104 2262 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE) 2105 bool fMaybeNXEFault =(uErr & X86_TRAP_PF_ID) && CPUMIsGuestNXEnabled(pVCpu);2106 # endif 2107 unsigned uPageFaultLevel;2108 int rc;2109 PVM pVM = pVCpu->CTX_SUFF(pVM);2263 bool fMaybeNXEFault = (uErr & X86_TRAP_PF_ID) && CPUMIsGuestNXEnabled(pVCpu); 2264 # endif 2265 bool fMaybeWriteProtFault = fWriteFault && (fUserLevelFault || CPUMIsGuestR0WriteProtEnabled(pVCpu)); 2266 PVM pVM = pVCpu->CTX_SUFF(pVM); 2267 int rc; 2110 2268 2111 2269 LogFlow(("CheckPageFault: GCPtrPage=%RGv uErr=%#x PdeSrc=%08x\n", GCPtrPage, uErr, pPdeSrc->u)); 2112 2270 2113 # if PGM_GST_TYPE == PGM_TYPE_PAE \ 2114 || PGM_GST_TYPE == PGM_TYPE_AMD642115 2116 # if PGM_GST_TYPE == PGM_TYPE_AMD64 2117 PX86PML4E pPml4eSrc;2118 PX86PDPE pPdpeSrc;2119 2120 pPdpeSrc = pgmGstGetLongModePDPTPtr(&pVCpu->pgm.s, GCPtrPage, &pPml4eSrc);2121 Assert(pPml4eSrc); 2122 2271 /* 2272 * Note! For PAE it is safe to assume that bad guest physical addresses 2273 * (which returns all FFs) in the translation tables will cause 2274 * #PF(RSVD). The same will be the case for long mode provided the 2275 * physical address width is less than 52 bits - this we ASSUME. 2276 * 2277 * Note! No convenient shortcuts here, we have to validate everything! 2278 */ 2279 2280 # if PGM_GST_TYPE == PGM_TYPE_AMD64 2123 2281 /* 2124 2282 * Real page fault? (PML4E level) 2125 2283 */ 2126 if ( (uErr & X86_TRAP_PF_RSVD) 2127 || !pPml4eSrc->n.u1Present 2128 || (fMaybeWriteProtFault && !pPml4eSrc->n.u1Write) 2129 || (fMaybeNXEFault && pPml4eSrc->n.u1NoExecute) 2130 || (fUserLevelFault && !pPml4eSrc->n.u1User) 2131 ) 2132 { 2133 uPageFaultLevel = 0; 2134 goto l_UpperLevelPageFault; 2135 } 2136 Assert(pPdpeSrc); 2137 2138 # else /* PAE */ 2139 PX86PDPE pPdpeSrc = pgmGstGetPaePDPEPtr(&pVCpu->pgm.s, GCPtrPage); 2140 # endif /* PAE */ 2284 PX86PML4 pPml4Src = pgmGstGetLongModePML4Ptr(pVCpu); 2285 if (RT_UNLIKELY(!pPml4Src)) 2286 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 0); 2287 2288 PX86PML4E pPml4eSrc = &pPml4Src->a[(GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK]; 2289 if (!pPml4eSrc->n.u1Present) 2290 return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 0); 2291 if (RT_UNLIKELY(!GST_IS_PML4E_VALID(pVCpu, *pPml4eSrc))) 2292 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 0); 2293 if ( (fMaybeWriteProtFault && !pPml4eSrc->n.u1Write) 2294 || (fMaybeNXEFault && pPml4eSrc->n.u1NoExecute) 2295 || (fUserLevelFault && !pPml4eSrc->n.u1User) ) 2296 return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 0); 2141 2297 2142 2298 /* 2143 2299 * Real page fault? (PDPE level) 2144 2300 */ 2145 if ( (uErr & X86_TRAP_PF_RSVD) 2146 || !pPdpeSrc->n.u1Present 2147 # if PGM_GST_TYPE == PGM_TYPE_AMD64 /* NX, r/w, u/s bits in the PDPE are long mode only */ 2148 || (fMaybeWriteProtFault && !pPdpeSrc->lm.u1Write) 2149 || (fMaybeNXEFault && pPdpeSrc->lm.u1NoExecute) 2150 || (fUserLevelFault && !pPdpeSrc->lm.u1User) 2151 # endif 2152 ) 2153 { 2154 uPageFaultLevel = 1; 2155 goto l_UpperLevelPageFault; 2156 } 2157 # endif 2301 PX86PDPT pPdptSrc; 2302 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPml4eSrc->u & X86_PML4E_PG_MASK, &pPdptSrc); 2303 if (RT_FAILURE(rc)) 2304 { 2305 AssertMsgReturn(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc), rc); 2306 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1); 2307 } 2308 2309 PX86PDPE pPdpeSrc = &pPdptSrc->a[(GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64]; 2310 if (!pPdpeSrc->n.u1Present) 2311 return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 1); 2312 if (!GST_IS_PDPE_VALID(pVCpu, *pPdpeSrc)) 2313 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1); 2314 if ( (fMaybeWriteProtFault && !pPdpeSrc->lm.u1Write) 2315 || (fMaybeNXEFault && pPdpeSrc->lm.u1NoExecute) 2316 || (fUserLevelFault && !pPdpeSrc->lm.u1User) ) 2317 return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 1); 2318 2319 # elif PGM_GST_TYPE == PGM_TYPE_PAE 2320 /* 2321 * Real page fault? (PDPE level) 2322 */ 2323 PX86PDPT pPdptSrc = pgmGstGetPaePDPTPtr(pVCpu); 2324 if (RT_UNLIKELY(!pPdptSrc)) 2325 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1); 2326 /** @todo Handle bad CR3 address. */ 2327 PX86PDPE pPdpeSrc = pgmGstGetPaePDPEPtr(pVCpu, GCPtrPage); 2328 if (!pPdpeSrc->n.u1Present) 2329 return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 1); 2330 if (!GST_IS_PDPE_VALID(pVCpu, *pPdpeSrc)) 2331 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1); 2332 # endif /* PGM_GST_TYPE == PGM_TYPE_PAE */ 2158 2333 2159 2334 /* 2160 2335 * Real page fault? (PDE level) 2161 2336 */ 2162 if ( (uErr & X86_TRAP_PF_RSVD) 2163 || !pPdeSrc->n.u1Present 2164 || (fMaybeWriteProtFault && !pPdeSrc->n.u1Write) 2337 if (!pPdeSrc->n.u1Present) 2338 return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 2); 2339 # if PGM_GST_TYPE == PGM_TYPE_32BIT 2340 bool const fBigPage = pPdeSrc->b.u1Size && CPUMIsGuestPageSizeExtEnabled(pVCpu); 2341 # else 2342 bool const fBigPage = pPdeSrc->b.u1Size; 2343 # endif 2344 if (!fBigPage ? !GST_IS_PDE_VALID(pVCpu, *pPdeSrc) : !GST_IS_BIG_PDE_VALID(pVCpu, *pPdeSrc)) 2345 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 2); 2346 if ( (fMaybeWriteProtFault && !pPdeSrc->n.u1Write) 2165 2347 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE) 2166 || (fMaybeNXEFault && pPdeSrc->n.u1NoExecute) 2167 # endif 2168 || (fUserLevelFault && !pPdeSrc->n.u1User) ) 2169 { 2170 uPageFaultLevel = 2; 2171 goto l_UpperLevelPageFault; 2172 } 2348 || (fMaybeNXEFault && pPdeSrc->n.u1NoExecute) 2349 # endif 2350 || (fUserLevelFault && !pPdeSrc->n.u1User) ) 2351 return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 2); 2173 2352 2174 2353 /* 2175 * First check the easy case where the page directory has been marked read-only to track 2176 * the dirty bit of an emulated BIG page 2177 */ 2178 if ( pPdeSrc->b.u1Size 2179 # if PGM_GST_TYPE == PGM_TYPE_32BIT 2180 && CPUMIsGuestPageSizeExtEnabled(pVCpu) 2181 # endif 2182 ) 2354 * First check the easy case where the page directory has been marked 2355 * read-only to track the dirty bit of an emulated BIG page. 2356 */ 2357 if (fBigPage) 2183 2358 { 2184 2359 /* Mark guest page directory as accessed */ 2185 # if PGM_GST_TYPE == PGM_TYPE_AMD642360 # if PGM_GST_TYPE == PGM_TYPE_AMD64 2186 2361 pPml4eSrc->n.u1Accessed = 1; 2187 2362 pPdpeSrc->lm.u1Accessed = 1; 2188 # endif2363 # endif 2189 2364 pPdeSrc->b.u1Accessed = 1; 2190 2365 2366 /* Mark the entry guest PDE dirty it it's a write access. */ 2367 if (fWriteFault) 2368 pPdeSrc->b.u1Dirty = 1; 2369 } 2370 else 2371 { 2191 2372 /* 2192 * Only write protection page faults are relevant here. 2373 * Map the guest page table. 2374 */ 2375 PGSTPT pPTSrc; 2376 PGSTPTE pPteSrc; 2377 GSTPTE PteSrc; 2378 rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc); 2379 if (RT_SUCCESS(rc)) 2380 { 2381 pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK]; 2382 PteSrc.u = pPteSrc->u; 2383 } 2384 else if (rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS) 2385 { 2386 /* All bits in the PTE are set. */ 2387 # if PGM_GST_TYPE == PGM_TYPE_32BIT 2388 PteSrc.u = UINT32_MAX; 2389 # else 2390 PteSrc.u = UINT64_MAX; 2391 # endif 2392 pPteSrc = &PteSrc; 2393 } 2394 else 2395 AssertRCReturn(rc, rc); 2396 2397 /* 2398 * Real page fault? 2399 */ 2400 if (!PteSrc.n.u1Present) 2401 return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 3); 2402 if (!GST_IS_PTE_VALID(pVCpu, PteSrc)) 2403 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 3); 2404 if ( (fMaybeWriteProtFault && !PteSrc.n.u1Write) 2405 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE) 2406 || (fMaybeNXEFault && PteSrc.n.u1NoExecute) 2407 # endif 2408 || (fUserLevelFault && !PteSrc.n.u1User) ) 2409 return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 0); 2410 2411 LogFlow(("CheckPageFault: page fault at %RGv PteSrc.u=%08x\n", GCPtrPage, PteSrc.u)); 2412 2413 /* 2414 * Set the accessed bits in the page directory and the page table. 2415 */ 2416 # if PGM_GST_TYPE == PGM_TYPE_AMD64 2417 pPml4eSrc->n.u1Accessed = 1; 2418 pPdpeSrc->lm.u1Accessed = 1; 2419 # endif 2420 pPdeSrc->n.u1Accessed = 1; 2421 pPteSrc->n.u1Accessed = 1; 2422 2423 /* 2424 * Set the dirty flag in the PTE if it's a write access. 2193 2425 */ 2194 2426 if (fWriteFault) 2195 2427 { 2196 /* Mark guest page directory as dirty (BIG page only). */ 2197 pPdeSrc->b.u1Dirty = 1; 2198 } 2199 return VINF_SUCCESS; 2200 } 2201 /* else: 4KB page table */ 2202 2203 /* 2204 * Map the guest page table. 2205 */ 2206 PGSTPT pPTSrc; 2207 rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc); 2208 if (RT_SUCCESS(rc)) 2209 { 2210 /* 2211 * Real page fault? 2212 */ 2213 PGSTPTE pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK]; 2214 const GSTPTE PteSrc = *pPteSrc; 2215 if ( !PteSrc.n.u1Present 2216 || (fMaybeWriteProtFault && !PteSrc.n.u1Write) 2217 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE) 2218 || (fMaybeNXEFault && PteSrc.n.u1NoExecute) 2219 # endif 2220 || (fUserLevelFault && !PteSrc.n.u1User) 2221 ) 2222 { 2223 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF)); 2224 LogFlow(("CheckPageFault: real page fault at %RGv PteSrc.u=%08x (2)\n", GCPtrPage, PteSrc.u)); 2225 2226 /* Check the present bit as the shadow tables can cause different error codes by being out of sync. 2227 * See the 2nd case above as well. 2228 */ 2229 if (pPdeSrc->n.u1Present && pPteSrc->n.u1Present) 2230 TRPMSetErrorCode(pVCpu, uErr | X86_TRAP_PF_P); /* page-level protection violation */ 2231 2232 return VINF_EM_RAW_GUEST_TRAP; 2233 } 2234 LogFlow(("CheckPageFault: page fault at %RGv PteSrc.u=%08x\n", GCPtrPage, PteSrc.u)); 2235 2236 /* 2237 * Set the accessed bits in the page directory and the page table. 2238 */ 2239 # if PGM_GST_TYPE == PGM_TYPE_AMD64 2240 pPml4eSrc->n.u1Accessed = 1; 2241 pPdpeSrc->lm.u1Accessed = 1; 2242 # endif 2243 pPdeSrc->n.u1Accessed = 1; 2244 pPteSrc->n.u1Accessed = 1; 2245 2246 /* 2247 * Only write protection page faults are relevant here. 2248 */ 2249 if (fWriteFault) 2250 { 2251 /* Write access, so mark guest entry as dirty. */ 2252 # ifdef VBOX_WITH_STATISTICS 2428 # ifdef VBOX_WITH_STATISTICS 2253 2429 if (!pPteSrc->n.u1Dirty) 2254 2430 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtiedPage)); 2255 2431 else 2256 2432 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageAlreadyDirty)); 2257 # endif2433 # endif 2258 2434 2259 2435 pPteSrc->n.u1Dirty = 1; 2260 2436 } 2261 return VINF_SUCCESS; 2262 } 2263 AssertRC(rc); 2264 return rc; 2265 2266 2267 l_UpperLevelPageFault: 2268 /* 2269 * Pagefault detected while checking the PML4E, PDPE or PDE. 2270 * Single exit handler to get rid of duplicate code paths. 2271 */ 2272 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF)); 2273 Log(("CheckPageFault: real page fault at %RGv (%d)\n", GCPtrPage, uPageFaultLevel)); 2274 2275 if ( 1 2276 # if PGM_GST_TYPE == PGM_TYPE_AMD64 2277 && pPml4eSrc->n.u1Present 2278 # endif 2279 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 2280 && pPdpeSrc->n.u1Present 2281 # endif 2282 && pPdeSrc->n.u1Present) 2283 { 2284 /* Check the present bit as the shadow tables can cause different error codes by being out of sync. */ 2285 if ( pPdeSrc->b.u1Size 2286 # if PGM_GST_TYPE == PGM_TYPE_32BIT 2287 && CPUMIsGuestPageSizeExtEnabled(pVCpu) 2288 # endif 2289 ) 2290 { 2291 TRPMSetErrorCode(pVCpu, uErr | X86_TRAP_PF_P); /* page-level protection violation */ 2292 } 2293 else 2294 { 2295 /* 2296 * Map the guest page table. 2297 */ 2298 PGSTPT pPTSrc2; 2299 rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc2); 2300 if (RT_SUCCESS(rc)) 2301 { 2302 PGSTPTE pPteSrc = &pPTSrc2->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK]; 2303 if (pPteSrc->n.u1Present) 2304 TRPMSetErrorCode(pVCpu, uErr | X86_TRAP_PF_P); /* page-level protection violation */ 2305 } 2306 AssertRC(rc); 2307 } 2308 } 2309 return VINF_EM_RAW_GUEST_TRAP; 2437 } 2438 return VINF_SUCCESS; 2310 2439 } 2440 2311 2441 2312 2442 /** … … 2323 2453 { 2324 2454 # if PGM_GST_TYPE == PGM_TYPE_32BIT 2325 const bool fBigPagesSupported = CPUMIsGuestPageSizeExtEnabled(pVCpu);2455 const bool fBigPagesSupported = CPUMIsGuestPageSizeExtEnabled(pVCpu); 2326 2456 # else 2327 const bool fBigPagesSupported = true;2328 # endif 2329 PVM pVM= pVCpu->CTX_SUFF(pVM);2330 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);2457 const bool fBigPagesSupported = true; 2458 # endif 2459 PVM pVM = pVCpu->CTX_SUFF(pVM); 2460 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2331 2461 2332 2462 Assert(PGMIsLockOwner(pVM)); 2333 2463 2464 /* 2465 * Handle big page. 2466 */ 2334 2467 if (pPdeSrc->b.u1Size && fBigPagesSupported) 2335 2468 { … … 2343 2476 2344 2477 /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply 2345 * fault again and take this path to only invalidate the entry .2478 * fault again and take this path to only invalidate the entry (see below). 2346 2479 */ 2347 2480 PdeDst.n.u1Write = 1; … … 2352 2485 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; /* restarts the instruction. */ 2353 2486 } 2487 2354 2488 # ifdef IN_RING0 2355 else2356 2489 /* Check for stale TLB entry; only applies to the SMP guest case. */ 2357 2490 if ( pVM->cCpus > 1 … … 2383 2516 PGSTPT pPTSrc; 2384 2517 int rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc); 2385 if (RT_SUCCESS(rc)) 2386 { 2387 if (pPdeDst->n.u1Present) 2518 if (RT_FAILURE(rc)) 2519 { 2520 AssertRC(rc); 2521 return rc; 2522 } 2523 2524 if (pPdeDst->n.u1Present) 2525 { 2526 PGSTPTE pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK]; 2527 const GSTPTE PteSrc = *pPteSrc; 2528 2529 #ifndef IN_RING0 2530 /* Bail out here as pgmPoolGetPage will return NULL and we'll crash below. 2531 * Our individual shadow handlers will provide more information and force a fatal exit. 2532 */ 2533 if (MMHyperIsInsideArea(pVM, (RTGCPTR)GCPtrPage)) 2388 2534 { 2389 PGSTPTE pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK]; 2390 const GSTPTE PteSrc = *pPteSrc; 2391 #ifndef IN_RING0 2392 /* Bail out here as pgmPoolGetPageByHCPhys will return NULL and we'll crash below. 2393 * Our individual shadow handlers will provide more information and force a fatal exit. 2394 */ 2395 if (MMHyperIsInsideArea(pVM, (RTGCPTR)GCPtrPage)) 2535 LogRel(("CheckPageFault: write to hypervisor region %RGv\n", GCPtrPage)); 2536 return VINF_PGM_NO_DIRTY_BIT_TRACKING; 2537 } 2538 #endif 2539 /* 2540 * Map shadow page table. 2541 */ 2542 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPdeDst->u & SHW_PDE_PG_MASK); 2543 if (pShwPage) 2544 { 2545 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage); 2546 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK]; 2547 if (pPteDst->n.u1Present) /** @todo Optimize accessed bit emulation? */ 2396 2548 { 2397 LogRel(("CheckPageFault: write to hypervisor region %RGv\n", GCPtrPage)); 2398 return VINF_PGM_NO_DIRTY_BIT_TRACKING; 2399 } 2400 #endif 2401 /* 2402 * Map shadow page table. 2403 */ 2404 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPdeDst->u & SHW_PDE_PG_MASK); 2405 if (pShwPage) 2406 { 2407 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage); 2408 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK]; 2409 if (pPteDst->n.u1Present) /** @todo Optimize accessed bit emulation? */ 2549 if (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY) 2410 2550 { 2411 if (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY) 2551 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK); 2552 SHWPTE PteDst = *pPteDst; 2553 2554 LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage)); 2555 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap)); 2556 2557 Assert(pPteSrc->n.u1Write); 2558 2559 /* Note: No need to invalidate this entry on other VCPUs as a stale TLB 2560 * entry will not harm; write access will simply fault again and 2561 * take this path to only invalidate the entry. 2562 */ 2563 if (RT_LIKELY(pPage)) 2412 2564 { 2413 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK); 2414 SHWPTE PteDst = *pPteDst; 2415 2416 LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage)); 2417 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap)); 2418 2419 Assert(pPteSrc->n.u1Write); 2420 2421 /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply 2422 * fault again and take this path to only invalidate the entry. 2423 */ 2424 if (RT_LIKELY(pPage)) 2565 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 2425 2566 { 2426 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 2567 AssertMsgFailed(("%R[pgmpage] - we don't set PGM_PTFLAGS_TRACK_DIRTY for these pages\n", pPage)); 2568 Assert(!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)); 2569 /* Assuming write handlers here as the PTE is present (otherwise we wouldn't be here). */ 2570 PteDst.n.u1Write = 0; 2571 } 2572 else 2573 { 2574 if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED 2575 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM) 2427 2576 { 2428 /* Assuming write handlers here as the PTE is present (otherwise we wouldn't be here). */ 2577 rc = pgmPhysPageMakeWritable(pVM, pPage, pPteSrc->u & GST_PTE_PG_MASK); 2578 AssertRC(rc); 2579 } 2580 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED) 2581 PteDst.n.u1Write = 1; 2582 else 2583 { 2584 /* Still applies to shared pages. */ 2585 Assert(!PGM_PAGE_IS_ZERO(pPage)); 2429 2586 PteDst.n.u1Write = 0; 2430 2587 } 2431 else2432 {2433 if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED2434 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)2435 {2436 rc = pgmPhysPageMakeWritable(pVM, pPage, pPteSrc->u & GST_PTE_PG_MASK);2437 AssertRC(rc);2438 }2439 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED)2440 {2441 PteDst.n.u1Write = 1;2442 }2443 else2444 {2445 /* Still applies to shared pages. */2446 Assert(!PGM_PAGE_IS_ZERO(pPage));2447 PteDst.n.u1Write = 0;2448 }2449 }2450 2588 } 2451 else2452 PteDst.n.u1Write = 1;2453 2454 PteDst.n.u1Dirty = 1;2455 PteDst.n.u1Accessed = 1;2456 PteDst.au32[0] &= ~PGM_PTFLAGS_TRACK_DIRTY;2457 ASMAtomicWriteSize(pPteDst, PteDst.u);2458 PGM_INVL_PG(pVCpu, GCPtrPage);2459 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; /* restarts the instruction. */2460 2589 } 2590 else 2591 PteDst.n.u1Write = 1; /** @todo r=bird: This doesn't make sense to me. */ 2592 2593 PteDst.n.u1Dirty = 1; 2594 PteDst.n.u1Accessed = 1; 2595 PteDst.au32[0] &= ~PGM_PTFLAGS_TRACK_DIRTY; 2596 ASMAtomicWriteSize(pPteDst, PteDst.u); 2597 PGM_INVL_PG(pVCpu, GCPtrPage); 2598 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; /* restarts the instruction. */ 2599 } 2600 2461 2601 # ifdef IN_RING0 2462 else 2463 /* Check for stale TLB entry; only applies to the SMP guest case. */ 2464 if ( pVM->cCpus > 1 2465 && pPteDst->n.u1Write == 1 2466 && pPteDst->n.u1Accessed == 1) 2467 { 2468 /* Stale TLB entry. */ 2469 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageStale)); 2470 PGM_INVL_PG(pVCpu, GCPtrPage); 2471 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; /* restarts the instruction. */ 2472 } 2473 # endif 2602 /* Check for stale TLB entry; only applies to the SMP guest case. */ 2603 if ( pVM->cCpus > 1 2604 && pPteDst->n.u1Write == 1 2605 && pPteDst->n.u1Accessed == 1) 2606 { 2607 /* Stale TLB entry. */ 2608 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageStale)); 2609 PGM_INVL_PG(pVCpu, GCPtrPage); 2610 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; /* restarts the instruction. */ 2474 2611 } 2612 # endif 2475 2613 } 2476 else2477 AssertMsgFailed(("pgmPoolGetPageByHCPhys %RGp failed!\n", pPdeDst->u & SHW_PDE_PG_MASK));2478 2614 } 2479 return VINF_PGM_NO_DIRTY_BIT_TRACKING; 2480 } 2481 AssertRC(rc); 2482 return rc; 2615 else 2616 AssertMsgFailed(("pgmPoolGetPageByHCPhys %RGp failed!\n", pPdeDst->u & SHW_PDE_PG_MASK)); 2617 } 2618 2619 return VINF_PGM_NO_DIRTY_BIT_TRACKING; 2483 2620 } 2621 2484 2622 #endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 2485 2623 … … 2500 2638 PGM_BTH_DECL(int, SyncPT)(PVMCPU pVCpu, unsigned iPDSrc, PGSTPD pPDSrc, RTGCPTR GCPtrPage) 2501 2639 { 2502 PVM pVM= pVCpu->CTX_SUFF(pVM);2503 PPGMPOOL pPool= pVM->pgm.s.CTX_SUFF(pPool);2640 PVM pVM = pVCpu->CTX_SUFF(pVM); 2641 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2504 2642 2505 2643 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPT), a); … … 2517 2655 && PGM_SHW_TYPE != PGM_TYPE_EPT 2518 2656 2519 int rc = VINF_SUCCESS;2657 int rc = VINF_SUCCESS; 2520 2658 2521 2659 /* … … 2532 2670 2533 2671 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 2534 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;2535 PPGMPOOLPAGE pShwPde = NULL;2672 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; 2673 PPGMPOOLPAGE pShwPde = NULL; 2536 2674 PX86PDPAE pPDDst; 2537 2675 PSHWPDE pPdeDst; … … 2553 2691 AssertRCSuccessReturn(rc, rc); 2554 2692 Assert(pPDDst); 2555 PSHWPDE pPdeDst = &pPDDst->a[iPDDst];2556 # endif 2557 SHWPDE PdeDst = *pPdeDst;2693 PSHWPDE pPdeDst = &pPDDst->a[iPDDst]; 2694 # endif 2695 SHWPDE PdeDst = *pPdeDst; 2558 2696 2559 2697 # if PGM_GST_TYPE == PGM_TYPE_AMD64 2560 2698 /* Fetch the pgm pool shadow descriptor. */ 2561 PPGMPOOLPAGE pShwPde= pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK);2699 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK); 2562 2700 Assert(pShwPde); 2563 2701 # endif … … 2566 2704 /* 2567 2705 * Check for conflicts. 2568 * GC: In case of a conflict we'll go to Ring-3 and do a full SyncCR3.2569 * HC: Simply resolve the conflict.2706 * RC: In case of a conflict we'll go to Ring-3 and do a full SyncCR3. 2707 * R3: Simply resolve the conflict. 2570 2708 */ 2571 2709 if (PdeDst.u & PGM_PDFLAGS_MAPPING) … … 2576 2714 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPT), a); 2577 2715 return VERR_ADDRESS_CONFLICT; 2578 # else 2716 2717 # else /* IN_RING3 */ 2579 2718 PPGMMAPPING pMapping = pgmGetMapping(pVM, (RTGCPTR)GCPtrPage); 2580 2719 Assert(pMapping); … … 2592 2731 } 2593 2732 PdeDst = *pPdeDst; 2594 # endif 2733 # endif /* IN_RING3 */ 2595 2734 } 2596 2735 # endif /* !PGM_WITHOUT_MAPPINGS */ … … 2637 2776 # endif 2638 2777 2639 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS( PdeSrc);2778 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(pVM, PdeSrc); 2640 2779 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 2641 2780 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/ … … 2648 2787 enmAccess = (fNoExecute) ? PGMPOOLACCESS_USER_RW_NX : PGMPOOLACCESS_USER_RW; 2649 2788 else 2650 enmAccess = (fNoExecute) ? PGMPOOLACCESS_USER_R_NX : PGMPOOLACCESS_USER_R;2789 enmAccess = (fNoExecute) ? PGMPOOLACCESS_USER_R_NX : PGMPOOLACCESS_USER_R; 2651 2790 } 2652 2791 else … … 2655 2794 enmAccess = (fNoExecute) ? PGMPOOLACCESS_SUPERVISOR_RW_NX : PGMPOOLACCESS_SUPERVISOR_RW; 2656 2795 else 2657 enmAccess = (fNoExecute) ? PGMPOOLACCESS_SUPERVISOR_R_NX : PGMPOOLACCESS_SUPERVISOR_R;2796 enmAccess = (fNoExecute) ? PGMPOOLACCESS_SUPERVISOR_R_NX : PGMPOOLACCESS_SUPERVISOR_R; 2658 2797 } 2659 2798 rc = pgmPoolAllocEx(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, enmAccess, pShwPde->idx, iPDDst, &pShwPage); … … 2765 2904 const GSTPTE PteSrc = pPTSrc->a[iPTSrc]; 2766 2905 2767 if (PteSrc.n.u1Present) /* we've already cleared it above */2906 if (PteSrc.n.u1Present) 2768 2907 { 2769 2908 # ifndef IN_RING0 … … 2790 2929 (RTGCPHYS)((PdeSrc.u & GST_PDE_PG_MASK) + iPTSrc*sizeof(PteSrc)) )); 2791 2930 } 2931 /* else: the page table was cleared by the pool */ 2792 2932 } /* for PTEs */ 2793 2933 } … … 2897 3037 PteDst.u = 0; 2898 3038 } 2899 else 2900 if (PGM_PAGE_IS_BALLOONED(pPage)) 3039 else if (PGM_PAGE_IS_BALLOONED(pPage)) 2901 3040 { 2902 3041 /* Skip ballooned pages. */ … … 3041 3180 Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/ 3042 3181 3043 # if defined(PGM_WITH_LARGE_PAGES) && (PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE) 3044 # if (PGM_SHW_TYPE != PGM_TYPE_EPT) /* PGM_TYPE_EPT implies nested paging */ 3045 if (HWACCMIsNestedPagingActive(pVM)) 3046 # endif 3182 # if defined(PGM_WITH_LARGE_PAGES) && PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE 3183 if (BTH_IS_NP_ACTIVE(pVM)) 3047 3184 { 3048 3185 PPGMPAGE pPage; … … 3060 3197 HCPhys = PGM_PAGE_GET_HCPHYS(pPage); 3061 3198 } 3062 else 3063 if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED) 3199 else if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED) 3064 3200 { 3065 3201 /* Recheck the entire 2 MB range to see if we can use it again as a large page. */ … … 3072 3208 } 3073 3209 } 3074 else 3075 if (PGMIsUsingLargePages(pVM)) 3210 else if (PGMIsUsingLargePages(pVM)) 3076 3211 { 3077 3212 rc = pgmPhysAllocLargePage(pVM, GCPtrPage); … … 3174 3309 PGM_BTH_DECL(int, PrefetchPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage) 3175 3310 { 3176 #if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \ 3177 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT 3311 #if ( PGM_GST_TYPE == PGM_TYPE_32BIT \ 3312 || PGM_GST_TYPE == PGM_TYPE_REAL \ 3313 || PGM_GST_TYPE == PGM_TYPE_PROT \ 3314 || PGM_GST_TYPE == PGM_TYPE_PAE \ 3315 || PGM_GST_TYPE == PGM_TYPE_AMD64 ) \ 3316 && PGM_SHW_TYPE != PGM_TYPE_NESTED \ 3317 && PGM_SHW_TYPE != PGM_TYPE_EPT 3318 3178 3319 /* 3179 3320 * Check that all Guest levels thru the PDE are present, getting the … … 3184 3325 # if PGM_GST_TYPE == PGM_TYPE_32BIT 3185 3326 const unsigned iPDSrc = GCPtrPage >> GST_PD_SHIFT; 3186 PGSTPD pPDSrc = pgmGstGet32bitPDPtr( &pVCpu->pgm.s);3327 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(pVCpu); 3187 3328 # elif PGM_GST_TYPE == PGM_TYPE_PAE 3188 3329 unsigned iPDSrc; 3189 3330 X86PDPE PdpeSrc; 3190 PGSTPD pPDSrc = pgmGstGetPaePDPtr( &pVCpu->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc);3331 PGSTPD pPDSrc = pgmGstGetPaePDPtr(pVCpu, GCPtrPage, &iPDSrc, &PdpeSrc); 3191 3332 if (!pPDSrc) 3192 3333 return VINF_SUCCESS; /* not present */ … … 3195 3336 PX86PML4E pPml4eSrc; 3196 3337 X86PDPE PdpeSrc; 3197 PGSTPD pPDSrc = pgmGstGetLongModePDPtr( &pVCpu->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);3338 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(pVCpu, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc); 3198 3339 if (!pPDSrc) 3199 3340 return VINF_SUCCESS; /* not present */ … … 3269 3410 if (!PdeDst.n.u1Present) 3270 3411 { 3271 /** r=bird: This guy will set the A bit on the PDE, probably harmless. */ 3412 /** @todo r=bird: This guy will set the A bit on the PDE, 3413 * probably harmless. */ 3272 3414 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage); 3273 3415 } 3274 3416 else 3275 3417 { 3276 /* * @noteWe used to sync PGM_SYNC_NR_PAGES pages, which triggered assertions in CSAM, because3277 * R/W attributes of nearby pages were reset. Not sure how that could happen. Anyway, it3278 * makes no sense to prefetch more than one page.3418 /* Note! We used to sync PGM_SYNC_NR_PAGES pages, which triggered assertions in CSAM, because 3419 * R/W attributes of nearby pages were reset. Not sure how that could happen. Anyway, it 3420 * makes no sense to prefetch more than one page. 3279 3421 */ 3280 3422 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, GCPtrPage, 1, 0); … … 3289 3431 #elif PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT 3290 3432 return VINF_SUCCESS; /* ignore */ 3433 #else 3434 AssertCompile(0); 3291 3435 #endif 3292 3436 } … … 3303 3447 * @param fPage The effective guest page flags. 3304 3448 * @param uErr The trap error code. 3449 * @remarks This will normally never be called on invalid guest page 3450 * translation entries. 3305 3451 */ 3306 3452 PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fPage, unsigned uErr) … … 3311 3457 3312 3458 Assert(!HWACCMIsNestedPagingActive(pVM)); 3313 #if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_TYPE_AMD64) \ 3314 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT 3459 #if ( PGM_GST_TYPE == PGM_TYPE_32BIT \ 3460 || PGM_GST_TYPE == PGM_TYPE_REAL \ 3461 || PGM_GST_TYPE == PGM_TYPE_PROT \ 3462 || PGM_GST_TYPE == PGM_TYPE_PAE \ 3463 || PGM_GST_TYPE == PGM_TYPE_AMD64 ) \ 3464 && PGM_SHW_TYPE != PGM_TYPE_NESTED \ 3465 && PGM_SHW_TYPE != PGM_TYPE_EPT 3315 3466 3316 3467 # ifndef IN_RING0 … … 3329 3480 * Get guest PD and index. 3330 3481 */ 3482 /** @todo Performance: We've done all this a jiffy ago in the 3483 * PGMGstGetPage call. */ 3331 3484 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 3332 3485 # if PGM_GST_TYPE == PGM_TYPE_32BIT 3333 3486 const unsigned iPDSrc = GCPtrPage >> GST_PD_SHIFT; 3334 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVCpu->pgm.s); 3487 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(pVCpu); 3488 3335 3489 # elif PGM_GST_TYPE == PGM_TYPE_PAE 3336 unsigned iPDSrc = 0;3490 unsigned iPDSrc = 0; 3337 3491 X86PDPE PdpeSrc; 3338 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVCpu->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc); 3339 3340 if (pPDSrc) 3492 PGSTPD pPDSrc = pgmGstGetPaePDPtr(pVCpu, GCPtrPage, &iPDSrc, &PdpeSrc); 3493 if (RT_UNLIKELY(!pPDSrc)) 3341 3494 { 3342 3495 Log(("PGMVerifyAccess: access violation for %RGv due to non-present PDPTR\n", GCPtrPage)); 3343 3496 return VINF_EM_RAW_GUEST_TRAP; 3344 3497 } 3498 3345 3499 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 3346 3500 unsigned iPDSrc; 3347 3501 PX86PML4E pPml4eSrc; 3348 3502 X86PDPE PdpeSrc; 3349 PGSTPD pPDSrc = pgmGstGetLongModePDPtr( &pVCpu->pgm.s, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);3350 if ( !pPDSrc)3503 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(pVCpu, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc); 3504 if (RT_UNLIKELY(!pPDSrc)) 3351 3505 { 3352 3506 Log(("PGMVerifyAccess: access violation for %RGv due to non-present PDPTR\n", GCPtrPage)); … … 3354 3508 } 3355 3509 # endif 3356 # else 3510 3511 # else /* !PGM_WITH_PAGING */ 3357 3512 PGSTPD pPDSrc = NULL; 3358 3513 const unsigned iPDSrc = 0; 3359 # endif 3514 # endif /* !PGM_WITH_PAGING */ 3360 3515 int rc = VINF_SUCCESS; 3361 3516 … … 3367 3522 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 3368 3523 PX86PDE pPdeDst = pgmShwGet32BitPDEPtr(&pVCpu->pgm.s, GCPtrPage); 3524 3369 3525 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 3370 3526 PX86PDEPAE pPdeDst; … … 3372 3528 PX86PDPAE pPDDst; 3373 3529 # if PGM_GST_TYPE != PGM_TYPE_PAE 3530 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */ 3374 3531 X86PDPE PdpeSrc; 3375 3376 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */3377 3532 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */ 3378 3533 # endif … … 3393 3548 3394 3549 # if PGM_GST_TYPE == PGM_TYPE_PROT 3395 /* AMD-V nested paging */3550 /* AMD-V nested paging: Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */ 3396 3551 X86PML4E Pml4eSrc; 3397 3552 X86PDPE PdpeSrc; 3398 3553 PX86PML4E pPml4eSrc = &Pml4eSrc; 3399 3400 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */3401 3554 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A; 3402 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US| X86_PDPE_A;3555 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A; 3403 3556 # endif 3404 3557 … … 3440 3593 Log(("PGMVerifyAccess: success (dirty)\n")); 3441 3594 else 3442 { 3443 GSTPDE PdeSrc = pPDSrc->a[iPDSrc]; 3595 # endif 3596 { 3597 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 3598 GSTPDE PdeSrc = pPDSrc->a[iPDSrc]; 3444 3599 # else 3445 {3446 3600 GSTPDE PdeSrc; 3447 3601 PdeSrc.u = 0; /* faked so we don't have to #ifdef everything */ … … 3450 3604 PdeSrc.n.u1Accessed = 1; 3451 3605 PdeSrc.n.u1User = 1; 3452 3453 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 3606 # endif 3607 3454 3608 Assert(rc != VINF_EM_RAW_GUEST_TRAP); 3455 3609 if (uErr & X86_TRAP_PF_US) … … 3467 3621 else 3468 3622 { 3469 Log(("PGMVerifyAccess: access violation for %RGv rc=% d\n", GCPtrPage, rc));3623 Log(("PGMVerifyAccess: access violation for %RGv rc=%Rrc\n", GCPtrPage, rc)); 3470 3624 rc = VINF_EM_RAW_GUEST_TRAP; 3471 3625 } … … 3478 3632 return rc; 3479 3633 3480 #else /* PGM_GST_TYPE != PGM_TYPE_32BIT*/3634 #else /* PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_NESTED */ 3481 3635 3482 3636 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE)); 3483 3637 return VERR_INTERNAL_ERROR; 3484 #endif /* PGM_ GST_TYPE != PGM_TYPE_32BIT*/3638 #endif /* PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_NESTED */ 3485 3639 } 3486 3640 … … 3652 3806 AssertMsgReturn(HCPhys == HCPhysShw, ("HCPhys=%RHp HCPhyswShw=%RHp (cr3)\n", HCPhys, HCPhysShw), false); 3653 3807 # if PGM_GST_TYPE == PGM_TYPE_32BIT && defined(IN_RING3) 3654 pgmGstGet32bitPDPtr(p PGM);3808 pgmGstGet32bitPDPtr(pVCpu); 3655 3809 RTGCPHYS GCPhys; 3656 3810 rc = PGMR3DbgR3Ptr2GCPhys(pVM, pPGM->pGst32BitPdR3, &GCPhys); … … 3692 3846 RTGCPHYS GCPhysPdptSrc; 3693 3847 3694 pPml4eSrc = pgmGstGetLongModePML4EPtr( &pVCpu->pgm.s, iPml4);3848 pPml4eSrc = pgmGstGetLongModePML4EPtr(pVCpu, iPml4); 3695 3849 pPml4eDst = pgmShwGetLongModePML4EPtr(&pVCpu->pgm.s, iPml4); 3696 3850 … … 3748 3902 # if PGM_GST_TYPE == PGM_TYPE_PAE 3749 3903 X86PDPE PdpeSrc; 3750 PGSTPD pPDSrc = pgmGstGetPaePDPtr( &pVCpu->pgm.s, GCPtr, &iPDSrc, &PdpeSrc);3904 PGSTPD pPDSrc = pgmGstGetPaePDPtr(pVCpu, GCPtr, &iPDSrc, &PdpeSrc); 3751 3905 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVCpu->pgm.s); 3752 3906 # else … … 3755 3909 PX86PDPT pPdptDst; 3756 3910 PX86PDPAE pPDDst; 3757 PGSTPD pPDSrc = pgmGstGetLongModePDPtr( &pVCpu->pgm.s, GCPtr, &pPml4eSrcIgn, &PdpeSrc, &iPDSrc);3911 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(pVCpu, GCPtr, &pPml4eSrcIgn, &PdpeSrc, &iPDSrc); 3758 3912 3759 3913 rc = pgmShwGetLongModePDPtr(pVCpu, GCPtr, NULL, &pPdptDst, &pPDDst); … … 3815 3969 # endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */ 3816 3970 # if PGM_GST_TYPE == PGM_TYPE_32BIT 3817 GSTPD const *pPDSrc = pgmGstGet32bitPDPtr( &pVCpu->pgm.s);3971 GSTPD const *pPDSrc = pgmGstGet32bitPDPtr(pVCpu); 3818 3972 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 3819 3973 PCX86PD pPDDst = pgmShwGet32BitPDPtr(&pVCpu->pgm.s); … … 3902 4056 } 3903 4057 # endif 3904 GCPhysGst = GST_GET_PDE_BIG_PG_GCPHYS( PdeSrc);4058 GCPhysGst = GST_GET_PDE_BIG_PG_GCPHYS(pVM, PdeSrc); 3905 4059 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 3906 4060 GCPhysGst |= GCPtr & RT_BIT(X86_PAGE_2M_SHIFT); … … 4072 4226 else 4073 4227 { 4074 if (PteDst.n.u1Present) 4228 if ( PteDst.n.u1Present 4229 # if PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64 4230 && !PGM_PAGE_IS_MMIO(pPhysPage) 4231 # endif 4232 ) 4075 4233 { 4076 4234 AssertMsgFailed(("ALL access flagged at %RGv but the page is present! pPhysPage=%R[pgmpage] PteSrc=%#RX64 PteDst=%#RX64\n", … … 4303 4461 else 4304 4462 { 4305 if (PteDst.n.u1Present) 4463 if ( PteDst.n.u1Present 4464 # if PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64 4465 && !PGM_PAGE_IS_MMIO(pPhysPage) 4466 # endif 4467 ) 4306 4468 { 4307 4469 AssertMsgFailed(("ALL access flagged at %RGv but the page is present! pPhysPage=%R[pgmpage] PdeSrc=%#RX64 PteDst=%#RX64\n", … … 4412 4574 * Map the 4 PDs too. 4413 4575 */ 4414 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr( &pVCpu->pgm.s);4576 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu); 4415 4577 RTGCPTR GCPtr = pVM->pgm.s.GCPtrCR3Mapping + PAGE_SIZE; 4416 4578 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++, GCPtr += PAGE_SIZE) -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r30654 r30889 5 5 6 6 /* 7 * Copyright (C) 2006-20 07Oracle Corporation7 * Copyright (C) 2006-2010 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 21 21 *******************************************************************************/ 22 22 RT_C_DECLS_BEGIN 23 PGM_GST_DECL(int, GetPage)(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys); 24 PGM_GST_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask); 25 PGM_GST_DECL(int, GetPDE)(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPDE); 23 #if PGM_GST_TYPE == PGM_TYPE_32BIT \ 24 || PGM_GST_TYPE == PGM_TYPE_PAE \ 25 || PGM_GST_TYPE == PGM_TYPE_AMD64 26 PGM_GST_DECL(int, Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk); 27 #endif 28 PGM_GST_DECL(int, GetPage)(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys); 29 PGM_GST_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask); 30 PGM_GST_DECL(int, GetPDE)(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPDE); 26 31 PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4); 27 32 RT_C_DECLS_END 28 33 29 34 35 #if PGM_GST_TYPE == PGM_TYPE_32BIT \ 36 || PGM_GST_TYPE == PGM_TYPE_PAE \ 37 || PGM_GST_TYPE == PGM_TYPE_AMD64 38 39 40 DECLINLINE(int) PGM_GST_NAME(WalkReturnNotPresent)(PVMCPU pVCpu, PGSTPTWALK pWalk, int iLevel) 41 { 42 NOREF(iLevel); 43 pWalk->Core.fNotPresent = true; 44 pWalk->Core.uLevel = (uint8_t)iLevel; 45 return VERR_PAGE_TABLE_NOT_PRESENT; 46 } 47 48 DECLINLINE(int) PGM_GST_NAME(WalkReturnBadPhysAddr)(PVMCPU pVCpu, PGSTPTWALK pWalk, int rc, int iLevel) 49 { 50 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); 51 pWalk->Core.fBadPhysAddr = true; 52 pWalk->Core.uLevel = (uint8_t)iLevel; 53 return VERR_PAGE_TABLE_NOT_PRESENT; 54 } 55 56 DECLINLINE(int) PGM_GST_NAME(WalkReturnRsvdError)(PVMCPU pVCpu, PGSTPTWALK pWalk, int iLevel) 57 { 58 pWalk->Core.fRsvdError = true; 59 pWalk->Core.uLevel = (uint8_t)iLevel; 60 return VERR_PAGE_TABLE_NOT_PRESENT; 61 } 62 63 64 /** 65 * Performs a guest page table walk. 66 * 67 * @returns VBox status code. 68 * @retval VINF_SUCCESS on success. 69 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details. 70 * 71 * @param pVCpu The current CPU. 72 * @param GCPtr The guest virtual address to walk by. 73 * @param pWalk Where to return the walk result. This is always set. 74 */ 75 PGM_GST_DECL(int, Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk) 76 { 77 int rc; 78 79 /* 80 * Init the walking structure. 81 */ 82 RT_ZERO(*pWalk); 83 pWalk->Core.GCPtr = GCPtr; 84 85 # if PGM_GST_TYPE == PGM_TYPE_32BIT \ 86 || PGM_GST_TYPE == PGM_TYPE_PAE 87 /* 88 * Boundary check for PAE and 32-bit (prevents trouble further down). 89 */ 90 if (RT_UNLIKELY(GCPtr >= _4G)) 91 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 8); 92 # endif 93 94 { 95 # if PGM_GST_TYPE == PGM_TYPE_AMD64 96 /* 97 * The PMLE4. 98 */ 99 rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pWalk->pPml4); 100 if (RT_FAILURE(rc)) 101 return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc); 102 103 PX86PML4 register pPml4 = pWalk->pPml4; 104 X86PML4E register Pml4e; 105 PX86PML4E register pPml4e; 106 107 pWalk->pPml4e = pPml4e = &pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK]; 108 pWalk->Pml4e.u = Pml4e.u = pPml4e->u; 109 if (!Pml4e.n.u1Present) 110 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 4); 111 if (RT_UNLIKELY(!GST_IS_PML4E_VALID(pVCpu, Pml4e))) 112 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4); 113 114 /* 115 * The PDPE. 116 */ 117 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & X86_PML4E_PG_MASK_FULL, &pWalk->pPdpt); 118 if (RT_FAILURE(rc)) 119 return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc); 120 121 # elif PGM_GST_TYPE == PGM_TYPE_PAE 122 rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pWalk->pPdpt); 123 if (RT_FAILURE(rc)) 124 return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc); 125 # endif 126 } 127 { 128 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 129 PX86PDPT register pPdpt = pWalk->pPdpt; 130 PX86PDPE register pPdpe; 131 X86PDPE register Pdpe; 132 133 pWalk->pPdpe = pPdpe = &pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK]; 134 pWalk->Pdpe.u = Pdpe.u = pPdpe->u; 135 if (!Pdpe.n.u1Present) 136 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 3); 137 if (RT_UNLIKELY(!GST_IS_PDPE_VALID(pVCpu, Pdpe))) 138 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 3); 139 140 /* 141 * The PDE. 142 */ 143 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pdpe.u & X86_PDPE_PG_MASK_FULL, &pWalk->pPd); 144 if (RT_FAILURE(rc)) 145 return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 2, rc); 146 # elif PGM_GST_TYPE == PGM_TYPE_32BIT 147 rc = pgmGstGet32bitPDPtrEx(pVCpu, &pWalk->pPd); 148 if (RT_FAILURE(rc)) 149 return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc); 150 # endif 151 } 152 { 153 PGSTPD register pPd = pWalk->pPd; 154 PGSTPDE register pPde; 155 GSTPDE register Pde; 156 157 pWalk->pPde = pPde = &pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK]; 158 pWalk->Pde.u = Pde.u = pPde->u; 159 if (!Pde.n.u1Present) 160 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 2); 161 if (Pde.n.u1Size && GST_IS_PSE_ACTIVE(pVCpu)) 162 { 163 if (RT_UNLIKELY(!GST_IS_BIG_PDE_VALID(pVCpu, Pde))) 164 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2); 165 166 pWalk->Core.GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde) 167 | (GCPtr & GST_BIG_PAGE_OFFSET_MASK); 168 pWalk->Core.fBigPage = true; 169 pWalk->Core.fSucceeded = true; 170 return VINF_SUCCESS; 171 } 172 173 if (RT_UNLIKELY(!GST_IS_PDE_VALID(pVCpu, Pde))) 174 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2); 175 176 /* 177 * The PTE. 178 */ 179 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pde.u & GST_PDE_PG_MASK, &pWalk->pPt); 180 if (RT_FAILURE(rc)) 181 return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc); 182 } 183 { 184 PGSTPT register pPt = pWalk->pPt; 185 PGSTPTE register pPte; 186 GSTPTE register Pte; 187 188 pWalk->pPte = pPte = &pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK]; 189 pWalk->Pte.u = Pte.u = pPte->u; 190 if (!Pte.n.u1Present) 191 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 1); 192 if (RT_UNLIKELY(!GST_IS_PTE_VALID(pVCpu, Pte))) 193 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 1); 194 195 /* 196 * We're done. 197 */ 198 pWalk->Core.GCPhys = (Pte.u & GST_PDE_PG_MASK) 199 | (GCPtr & PAGE_OFFSET_MASK); 200 pWalk->Core.fSucceeded = true; 201 return VINF_SUCCESS; 202 } 203 } 204 205 #endif /* 32BIT, PAE, AMD64 */ 30 206 31 207 /** … … 57 233 return VINF_SUCCESS; 58 234 59 #elif PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64 60 61 # if PGM_GST_TYPE != PGM_TYPE_AMD64 62 /* Boundary check. */ 63 if (GCPtr >= _4G) 64 return VERR_PAGE_TABLE_NOT_PRESENT; 65 # endif 66 67 PVM pVM = pVCpu->CTX_SUFF(pVM); 68 /* 69 * Get the PDE. 70 */ 71 # if PGM_GST_TYPE == PGM_TYPE_32BIT 72 X86PDE Pde = pgmGstGet32bitPDE(&pVCpu->pgm.s, GCPtr); 73 74 #elif PGM_GST_TYPE == PGM_TYPE_PAE 75 /* pgmGstGetPaePDE will return 0 if the PDPTE is marked as not present. 76 * All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx). */ 77 X86PDEPAE Pde = pgmGstGetPaePDE(&pVCpu->pgm.s, GCPtr); 78 79 #elif PGM_GST_TYPE == PGM_TYPE_AMD64 80 PX86PML4E pPml4e; 81 X86PDPE Pdpe; 82 X86PDEPAE Pde = pgmGstGetLongModePDEEx(&pVCpu->pgm.s, GCPtr, &pPml4e, &Pdpe); 83 84 Assert(pPml4e); 85 if (!(pPml4e->n.u1Present & Pdpe.n.u1Present)) 86 return VERR_PAGE_TABLE_NOT_PRESENT; 87 88 /* Merge accessed, write, user and no-execute bits into the PDE. */ 89 Pde.n.u1Accessed &= pPml4e->n.u1Accessed & Pdpe.lm.u1Accessed; 90 Pde.n.u1Write &= pPml4e->n.u1Write & Pdpe.lm.u1Write; 91 Pde.n.u1User &= pPml4e->n.u1User & Pdpe.lm.u1User; 92 Pde.n.u1NoExecute |= pPml4e->n.u1NoExecute | Pdpe.lm.u1NoExecute; 93 # endif 94 95 /* 96 * Lookup the page. 97 */ 98 if (!Pde.n.u1Present) 99 return VERR_PAGE_TABLE_NOT_PRESENT; 100 101 if ( !Pde.b.u1Size 102 # if PGM_GST_TYPE == PGM_TYPE_32BIT 103 || !CPUMIsGuestPageSizeExtEnabled(pVCpu) 104 # endif 105 ) 106 { 107 PGSTPT pPT; 108 int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT); 109 if (RT_FAILURE(rc)) 110 return rc; 111 112 /* 113 * Get PT entry and check presence. 114 */ 115 const GSTPTE Pte = pPT->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK]; 116 if (!Pte.n.u1Present) 117 return VERR_PAGE_NOT_PRESENT; 118 119 /* 120 * Store the result. 121 * RW and US flags depend on all levels (bitwise AND) - except for legacy PAE 122 * where the PDPE is simplified. 123 */ 124 if (pfFlags) 125 { 126 *pfFlags = (Pte.u & ~GST_PTE_PG_MASK) 127 & ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US)); 235 #elif PGM_GST_TYPE == PGM_TYPE_32BIT \ 236 || PGM_GST_TYPE == PGM_TYPE_PAE \ 237 || PGM_GST_TYPE == PGM_TYPE_AMD64 238 239 GSTPTWALK Walk; 240 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk); 241 if (RT_FAILURE(rc)) 242 return rc; 243 244 if (pGCPhys) 245 *pGCPhys = Walk.Core.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK; 246 247 if (pfFlags) 248 { 249 /* The RW and US flags are determined via bitwise AND across all levels. */ 250 uint64_t fUpperRwUs = (X86_PTE_RW | X86_PTE_US) 251 # if PGM_GST_TYPE == PGM_TYPE_AMD64 252 & Walk.Pml4e.u 253 & Walk.Pdpe.u 254 # endif 255 & Walk.Pde.u; 256 fUpperRwUs |= ~(uint64_t)(X86_PTE_RW | X86_PTE_US); 257 258 /* The RW and US flags are determined via bitwise AND across all levels. */ 128 259 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE) 129 /* The NX bit is determined by a bitwise OR between the PT and PD */ 130 if (((Pte.u | Pde.u) & X86_PTE_PAE_NX) && CPUMIsGuestNXEnabled(pVCpu)) 260 bool fUpperNx = 0 261 # if PGM_GST_TYPE == PGM_TYPE_AMD64 262 | Walk.Pml4e.n.u1NoExecute 263 | Walk.Pdpe.lm.u1NoExecute 264 # endif 265 | Walk.Pde.n.u1NoExecute; 266 # endif 267 268 if (!Walk.Core.fBigPage) 269 { 270 *pfFlags = (Walk.Pte.u & ~GST_PTE_PG_MASK) & fUpperRwUs; 271 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE) 272 if (Walk.Pte.n.u1NoExecute || fUpperNx) 273 { 274 Assert(GST_IS_NX_ACTIVE(pVCpu)); /* should trigger RSVD error otherwise. */ 131 275 *pfFlags |= X86_PTE_PAE_NX; 132 # endif 133 } 134 if (pGCPhys) 135 *pGCPhys = Pte.u & GST_PTE_PG_MASK; 136 } 137 else 138 { 139 /* 140 * Map big to 4k PTE and store the result 141 */ 142 if (pfFlags) 143 { 144 *pfFlags = (Pde.u & ~(GST_PTE_PG_MASK | X86_PTE_PAT)) 145 | ((Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT); 276 } 277 # endif 278 } 279 else 280 { 281 *pfFlags = ( (Walk.Pde.u & ~(GST_PTE_PG_MASK | X86_PTE_PAT)) 282 | ((Walk.Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT)) 283 & fUpperRwUs; 146 284 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE) 147 if ((Pde.u & X86_PTE_PAE_NX) && CPUMIsGuestNXEnabled(pVCpu)) 285 if (fUpperNx) 286 { 287 Assert(GST_IS_NX_ACTIVE(pVCpu)); /* should trigger RSVD error otherwise. */ 148 288 *pfFlags |= X86_PTE_PAE_NX; 149 # endif 150 } 151 if (pGCPhys)152 *pGCPhys = GST_GET_PDE_BIG_PG_GCPHYS(Pde) | (GCPtr & (~GST_PDE_BIG_PG_MASK ^ ~GST_PTE_PG_MASK));153 } 289 } 290 # endif 291 } 292 } 293 154 294 return VINF_SUCCESS; 295 155 296 #else 156 297 # error "shouldn't be here!" … … 175 316 PGM_GST_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask) 176 317 { 318 Assert((cb & PAGE_OFFSET_MASK) == 0); 319 177 320 #if PGM_GST_TYPE == PGM_TYPE_32BIT \ 178 321 || PGM_GST_TYPE == PGM_TYPE_PAE \ 179 322 || PGM_GST_TYPE == PGM_TYPE_AMD64 180 181 Assert((cb & PAGE_OFFSET_MASK) == 0);182 183 # if PGM_GST_TYPE != PGM_TYPE_AMD64184 /* Boundary check. */185 if (GCPtr >= _4G)186 return VERR_PAGE_TABLE_NOT_PRESENT;187 # endif188 189 PVM pVM = pVCpu->CTX_SUFF(pVM);190 323 for (;;) 191 324 { 192 /* 193 * Get the PD entry. 194 */ 195 # if PGM_GST_TYPE == PGM_TYPE_32BIT 196 PX86PDE pPde = pgmGstGet32bitPDEPtr(&pVCpu->pgm.s, GCPtr); 197 198 # elif PGM_GST_TYPE == PGM_TYPE_PAE 199 /* pgmGstGetPaePDEPtr will return 0 if the PDPTE is marked as not present 200 * All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx) 201 */ 202 PX86PDEPAE pPde = pgmGstGetPaePDEPtr(&pVCpu->pgm.s, GCPtr); 203 Assert(pPde); 204 if (!pPde) 205 return VERR_PAGE_TABLE_NOT_PRESENT; 206 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 207 /** @todo Setting the r/w, u/s & nx bits might have no effect depending on the pdpte & pml4 values */ 208 PX86PDEPAE pPde = pgmGstGetLongModePDEPtr(&pVCpu->pgm.s, GCPtr); 209 Assert(pPde); 210 if (!pPde) 211 return VERR_PAGE_TABLE_NOT_PRESENT; 212 # endif 213 GSTPDE Pde = *pPde; 214 Assert(Pde.n.u1Present); 215 if (!Pde.n.u1Present) 216 return VERR_PAGE_TABLE_NOT_PRESENT; 217 218 if ( !Pde.b.u1Size 219 # if PGM_GST_TYPE == PGM_TYPE_32BIT 220 || !CPUMIsGuestPageSizeExtEnabled(pVCpu) 221 # endif 222 ) 325 GSTPTWALK Walk; 326 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk); 327 if (RT_FAILURE(rc)) 328 return rc; 329 330 if (!Walk.Core.fBigPage) 223 331 { 224 332 /* 225 * 4KB Page table 333 * 4KB Page table, process 226 334 * 227 * Walk page tables and pages till we're done.335 * Walk pages till we're done. 228 336 */ 229 PGSTPT pPT;230 int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);231 if (RT_FAILURE(rc))232 return rc;233 234 337 unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK; 235 while (iPTE < RT_ELEMENTS( pPT->a))338 while (iPTE < RT_ELEMENTS(Walk.pPt->a)) 236 339 { 237 GSTPTE Pte = pPT->a[iPTE];340 GSTPTE Pte = Walk.pPt->a[iPTE]; 238 341 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK)) 239 342 | (fFlags & ~GST_PTE_PG_MASK); 240 pPT->a[iPTE] = Pte;343 Walk.pPt->a[iPTE] = Pte; 241 344 242 345 /* next page */ … … 253 356 * 4MB Page table 254 357 */ 358 GSTPDE PdeNew; 255 359 # if PGM_GST_TYPE == PGM_TYPE_32BIT 256 Pde .u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))360 PdeNew.u = (Walk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS)) 257 361 # else 258 Pde .u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))259 # endif 260 | (fFlags & ~GST_PTE_PG_MASK)261 | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);262 * pPde = Pde;362 PdeNew.u = (Walk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS)) 363 # endif 364 | (fFlags & ~GST_PTE_PG_MASK) 365 | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT); 366 *Walk.pPde = PdeNew; 263 367 264 368 /* advance */ … … 279 383 280 384 /** 281 * Retrieve guest PDE information 385 * Retrieve guest PDE information. 282 386 * 283 387 * @returns VBox status code. 284 388 * @param pVCpu The VMCPU handle. 285 * @param GCPtr Guest context pointer 286 * @param pPDE Pointer to guest PDE structure 389 * @param GCPtr Guest context pointer. 390 * @param pPDE Pointer to guest PDE structure. 287 391 */ 288 392 PGM_GST_DECL(int, GetPDE)(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPDE) … … 294 398 # if PGM_GST_TYPE != PGM_TYPE_AMD64 295 399 /* Boundary check. */ 296 if ( GCPtr >= _4G)400 if (RT_UNLIKELY(GCPtr >= _4G)) 297 401 return VERR_PAGE_TABLE_NOT_PRESENT; 298 402 # endif 299 403 300 404 # if PGM_GST_TYPE == PGM_TYPE_32BIT 301 X86PDE Pde = pgmGstGet32bitPDE(&pVCpu->pgm.s, GCPtr); 405 unsigned iPd = (GCPtr >> GST_PD_SHIFT) & GST_PD_MASK; 406 PX86PD pPd = pgmGstGet32bitPDPtr(pVCpu); 407 302 408 # elif PGM_GST_TYPE == PGM_TYPE_PAE 303 X86PDEPAE Pde = pgmGstGetPaePDE(&pVCpu->pgm.s, GCPtr); 409 unsigned iPd; 410 PCX86PDPAE pPd = pgmGstGetPaePDPtr(pVCpu, GCPtr, &iPd, NULL); 411 304 412 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 305 X86PDEPAE Pde = pgmGstGetLongModePDE(&pVCpu->pgm.s, GCPtr); 306 # endif 307 308 pPDE->u = (X86PGPAEUINT)Pde.u; 413 PX86PML4E pPml4eIgn; 414 X86PDPE PdpeIgn; 415 unsigned iPd; 416 PCX86PDPAE pPd = pgmGstGetLongModePDPtr(pVCpu, GCPtr, &pPml4eIgn, &PdpeIgn, &iPd); 417 /* Note! We do not return an effective PDE here like we do for the PTE in GetPage method. */ 418 # endif 419 420 if (RT_LIKELY(pPd)) 421 pPDE->u = (X86PGPAEUINT)pPd->a[iPd].u; 422 else 423 pPDE->u = 0; 309 424 return VINF_SUCCESS; 425 310 426 #else 311 427 AssertFailed(); … … 333 449 Assert(pCur->enmType != PGMVIRTHANDLERTYPE_HYPERVISOR); 334 450 335 # if PGM_GST_TYPE == PGM_TYPE_32BIT336 PX86PD pPDSrc = pgmGstGet32bitPDPtr( &pVCpu->pgm.s);337 # endif451 # if PGM_GST_TYPE == PGM_TYPE_32BIT 452 PX86PD pPDSrc = pgmGstGet32bitPDPtr(pVCpu); 453 # endif 338 454 339 455 RTGCPTR GCPtr = pCur->Core.Key; 340 # if PGM_GST_TYPE != PGM_TYPE_AMD64456 # if PGM_GST_TYPE != PGM_TYPE_AMD64 341 457 /* skip all stuff above 4GB if not AMD64 mode. */ 342 if ( GCPtr >= _4G)458 if (RT_UNLIKELY(GCPtr >= _4G)) 343 459 return 0; 344 # endif460 # endif 345 461 346 462 unsigned offPage = GCPtr & PAGE_OFFSET_MASK; … … 348 464 while (iPage < pCur->cPages) 349 465 { 350 # if PGM_GST_TYPE == PGM_TYPE_32BIT466 # if PGM_GST_TYPE == PGM_TYPE_32BIT 351 467 X86PDE Pde = pPDSrc->a[GCPtr >> X86_PD_SHIFT]; 352 #elif PGM_GST_TYPE == PGM_TYPE_PAE 353 X86PDEPAE Pde = pgmGstGetPaePDE(&pVCpu->pgm.s, GCPtr); 354 #elif PGM_GST_TYPE == PGM_TYPE_AMD64 355 X86PDEPAE Pde = pgmGstGetLongModePDE(&pVCpu->pgm.s, GCPtr); 356 #endif 357 if (Pde.n.u1Present) 358 { 359 if ( !Pde.b.u1Size 468 # elif PGM_GST_TYPE == PGM_TYPE_PAE 469 X86PDEPAE Pde = pgmGstGetPaePDE(pVCpu, GCPtr); 470 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 471 X86PDEPAE Pde = pgmGstGetLongModePDE(pVCpu, GCPtr); 472 # endif 360 473 # if PGM_GST_TYPE == PGM_TYPE_32BIT 361 || !(pState->cr4 & X86_CR4_PSE) 362 # endif 363 ) 474 bool const fBigPage = Pde.b.u1Size; 475 # else 476 bool const fBigPage = Pde.b.u1Size && !(pState->cr4 & X86_CR4_PSE); 477 # endif 478 if ( Pde.n.u1Present 479 && ( !fBigPage 480 ? GST_IS_PDE_VALID(pVCpu, Pde) 481 : GST_IS_BIG_PDE_VALID(pVCpu, Pde)) ) 482 { 483 if (!fBigPage) 364 484 { 365 485 /* … … 448 568 else 449 569 { 450 /* not-present . */570 /* not-present / invalid. */ 451 571 for (unsigned cPages = (GST_PT_MASK + 1) - ((GCPtr >> GST_PT_SHIFT) & GST_PT_MASK); 452 572 cPages && iPage < pCur->cPages; -
trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp
r29250 r30889 292 292 else 293 293 { 294 PX86PDPE pGstPdpe = pgmGstGetPaePDPEPtr( &pVCpu->pgm.s, iPdPt << X86_PDPT_SHIFT);294 PX86PDPE pGstPdpe = pgmGstGetPaePDPEPtr(pVCpu, iPdPt << X86_PDPT_SHIFT); 295 295 if (pGstPdpe) 296 296 GstPdpe = *pGstPdpe; … … 721 721 * Resolve the page directory. 722 722 */ 723 PX86PD pPD = pgmGstGet32bitPDPtr( &pVCpu->pgm.s);723 PX86PD pPD = pgmGstGet32bitPDPtr(pVCpu); 724 724 Assert(pPD); 725 725 … … 759 759 while (iPT-- > 0) 760 760 { 761 X86PDEPAE Pde = pgmGstGetPaePDE( &pVCpu->pgm.s, GCPtr);761 X86PDEPAE Pde = pgmGstGetPaePDE(pVCpu, GCPtr); 762 762 763 763 if ( Pde.n.u1Present … … 810 810 * Resolve the page directory. 811 811 */ 812 PX86PD pPD = pgmGstGet32bitPDPtr( &pVCpu->pgm.s);812 PX86PD pPD = pgmGstGet32bitPDPtr(pVCpu); 813 813 Assert(pPD); 814 814 … … 862 862 while (iPT-- > 0) 863 863 { 864 X86PDEPAE Pde = pgmGstGetPaePDE( &pVCpu->pgm.s, GCPtr);864 X86PDEPAE Pde = pgmGstGetPaePDE(pVCpu, GCPtr); 865 865 866 866 if ( Pde.n.u1Present -
trunk/src/VBox/VMM/VMMR0/PGMR0.cpp
r30111 r30889 5 5 6 6 /* 7 * Copyright (C) 2007 Oracle Corporation7 * Copyright (C) 2007-2010 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 30 30 #include <iprt/mem.h> 31 31 32 RT_C_DECLS_BEGIN 32 33 /* 34 * Instantiate the ring-0 header/code templates. 35 */ 33 36 #define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name) 34 37 #include "PGMR0Bth.h" … … 46 49 #include "PGMR0Bth.h" 47 50 #undef PGM_BTH_NAME 48 49 RT_C_DECLS_END50 51 51 52 … … 162 163 } 163 164 165 164 166 /** 165 167 * Worker function for PGMR3PhysAllocateLargeHandyPage … … 186 188 return rc; 187 189 } 190 188 191 189 192 /** … … 208 211 209 212 /* AMD uses the host's paging mode; Intel has a single mode (EPT). */ 210 AssertMsg(enmShwPagingMode == PGMMODE_32_BIT || enmShwPagingMode == PGMMODE_PAE || enmShwPagingMode == PGMMODE_PAE_NX || enmShwPagingMode == PGMMODE_AMD64 || enmShwPagingMode == PGMMODE_AMD64_NX || enmShwPagingMode == PGMMODE_EPT, ("enmShwPagingMode=%d\n", enmShwPagingMode)); 213 AssertMsg( enmShwPagingMode == PGMMODE_32_BIT || enmShwPagingMode == PGMMODE_PAE || enmShwPagingMode == PGMMODE_PAE_NX 214 || enmShwPagingMode == PGMMODE_AMD64 || enmShwPagingMode == PGMMODE_AMD64_NX || enmShwPagingMode == PGMMODE_EPT, 215 ("enmShwPagingMode=%d\n", enmShwPagingMode)); 211 216 212 217 #ifdef VBOX_WITH_STATISTICS … … 253 258 * Call the worker. 254 259 * 255 * We pretend the guest is in protected mode without paging, so we can use existing code to build the256 * nested page tables.260 * Note! We pretend the guest is in protected mode without paging, so we 261 * can use existing code to build the nested page tables. 257 262 */ 258 263 bool fLockTaken = false; 259 264 switch(enmShwPagingMode) 260 265 { 261 case PGMMODE_32_BIT:262 rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);263 break;264 case PGMMODE_PAE:265 case PGMMODE_PAE_NX:266 rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);267 break;268 case PGMMODE_AMD64:269 case PGMMODE_AMD64_NX:270 rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);271 break;272 case PGMMODE_EPT:273 rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);274 break;275 default:276 AssertFailed();277 rc = VERR_INVALID_PARAMETER;278 break;266 case PGMMODE_32_BIT: 267 rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken); 268 break; 269 case PGMMODE_PAE: 270 case PGMMODE_PAE_NX: 271 rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken); 272 break; 273 case PGMMODE_AMD64: 274 case PGMMODE_AMD64_NX: 275 rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken); 276 break; 277 case PGMMODE_EPT: 278 rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken); 279 break; 280 default: 281 AssertFailed(); 282 rc = VERR_INVALID_PARAMETER; 283 break; 279 284 } 280 285 if (fLockTaken) … … 283 288 pgmUnlock(pVM); 284 289 } 290 285 291 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE) 286 292 rc = VINF_SUCCESS; 287 else288 293 /* Note: hack alert for difficult to reproduce problem. */ 289 if (rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */290 ||rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */291 ||rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */292 ||rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */294 else if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */ 295 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */ 296 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */ 297 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */ 293 298 { 294 299 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGp error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip)); 295 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */ 300 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about 301 single VCPU VMs though. */ 296 302 rc = VINF_SUCCESS; 297 303 } -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r30660 r30889 1309 1309 } 1310 1310 1311 1312 1311 #ifdef LOG_ENABLED 1312 1313 1313 /** 1314 1314 * Disables flushing of the ring-0 debug log. … … 1335 1335 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false; 1336 1336 } 1337 #endif 1337 1338 #endif /* LOG_ENABLED */ 1338 1339 1339 1340 /**
Note:
See TracChangeset
for help on using the changeset viewer.

