- Timestamp:
- Feb 24, 2022 4:02:00 PM (3 years ago)
- Location:
- trunk
- Files:
-
- 12 edited
-
include/iprt/param.h (modified) (2 diffs)
-
src/VBox/Devices/VMMDev/VMMDevHGCM.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMAll/PGMAll.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMAll/PGMAllGst.h (modified) (1 diff)
-
src/VBox/VMM/VMMAll/PGMAllPool.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h (modified) (1 diff)
-
src/VBox/VMM/VMMR0/HMSVMR0.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMR3/CPUM.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMR3/DBGFDisas.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMR3/PGMPool.cpp (modified) (1 diff)
-
src/bldprogs/scm.cpp (modified) (1 diff)
-
src/bldprogs/scmrw.cpp (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/iprt/param.h
r93115 r93931 75 75 * i386 Page offset mask. 76 76 * 77 * Do NOT one-complement this for whatever purpose. You may get a 32-bit const when you want a 64-bit one.78 * Use PAGE_BASE_MASK, PAGE_BASE_GC_MASK, PAGE_BASE_HC_MASK, PAGE_ADDRESS() or X86_PTE_PAE_PG_MASK.77 * @note If you do one-complement this, always insert a target type case after 78 * the operator! Otherwise you may end up with weird results. 79 79 */ 80 80 #if defined(RT_ARCH_SPARC64) … … 87 87 88 88 /** 89 * Page address mask for the guest context POINTERS. 90 * @remark Physical addresses are always masked using X86_PTE_PAE_PG_MASK! 91 */ 92 #define PAGE_BASE_GC_MASK (~(RTGCUINTPTR)PAGE_OFFSET_MASK) 93 94 /** 95 * Page address mask for the host context POINTERS. 96 * @remark Physical addresses are always masked using X86_PTE_PAE_PG_MASK! 97 */ 98 #define PAGE_BASE_HC_MASK (~(RTHCUINTPTR)PAGE_OFFSET_MASK) 99 100 /** 101 * Page address mask for the both context POINTERS. 89 * Page address mask for the uintptr_t sized pointers. 102 90 * 103 91 * Be careful when using this since it may be a size too big! 104 92 * @remark Physical addresses are always masked using X86_PTE_PAE_PG_MASK! 105 93 */ 106 #define PAGE_BASE_MASK (~( RTUINTPTR)PAGE_OFFSET_MASK)94 #define PAGE_BASE_MASK (~(uintptr_t)PAGE_OFFSET_MASK) 107 95 108 96 /** -
trunk/src/VBox/Devices/VMMDev/VMMDevHGCM.cpp
r93115 r93931 1109 1109 1110 1110 /* Gonvert the guest linear pointers of pages to physical addresses. */ 1111 GCPtr &= PAGE_BASE_GC_MASK;1111 GCPtr &= ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK; 1112 1112 for (uint32_t iPage = 0; iPage < cPages; ++iPage) 1113 1113 { -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r93922 r93931 1137 1137 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT))); 1138 1138 1139 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */1139 GCPtr &= ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK; /** @todo this ain't necessary, right... */ 1140 1140 1141 1141 PVMCC pVM = pVCpu->CTX_SUFF(pVM); … … 2070 2070 cb += GCPtr & GUEST_PAGE_OFFSET_MASK; 2071 2071 cb = RT_ALIGN_Z(cb, GUEST_PAGE_SIZE); 2072 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);2072 GCPtr &= ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK; 2073 2073 2074 2074 /* -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r93922 r93931 390 390 pWalk->fSucceeded = true; 391 391 pWalk->GCPtr = GCPtr; 392 pWalk->GCPhys = GCPtr & PAGE_BASE_GC_MASK;392 pWalk->GCPhys = GCPtr & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; 393 393 pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US; 394 394 NOREF(pVCpu); -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r93725 r93931 1190 1190 1191 1191 /* Temporarily allow write access to the page table again. */ 1192 rc = PGMHandlerPhysicalPageTempOff(pVM, pPage->GCPhys & PAGE_BASE_GC_MASK, pPage->GCPhys & PAGE_BASE_GC_MASK); 1192 rc = PGMHandlerPhysicalPageTempOff(pVM, 1193 pPage->GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK, 1194 pPage->GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK); 1193 1195 if (rc == VINF_SUCCESS) 1194 1196 { … … 1641 1643 1642 1644 /* First write protect the page again to catch all write accesses. (before checking for changes -> SMP) */ 1643 int rc = PGMHandlerPhysicalReset(pVM, pPage->GCPhys & PAGE_BASE_GC_MASK);1645 int rc = PGMHandlerPhysicalReset(pVM, pPage->GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK); 1644 1646 Assert(rc == VINF_SUCCESS); 1645 1647 pPage->fDirty = false; -
trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h
r93930 r93931 8520 8520 8521 8521 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */ 8522 GCPhys &= PAGE_BASE_GC_MASK;8522 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; 8523 8523 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual); 8524 8524 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys, -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r93574 r93931 7926 7926 { 7927 7927 RTGCPHYS GCPhysApicBase = APICGetBaseMsrNoCheck(pVCpu); 7928 GCPhysApicBase &= PAGE_BASE_GC_MASK;7928 GCPhysApicBase &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; 7929 7929 7930 7930 if (GCPhysFaultAddr == GCPhysApicBase + XAPIC_OFF_TPR) … … 8281 8281 RTGCPHYS GCPhysApicBase; 8282 8282 GCPhysApicBase = APICGetBaseMsrNoCheck(pVCpu); 8283 GCPhysApicBase &= PAGE_BASE_GC_MASK;8283 GCPhysApicBase &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; 8284 8284 8285 8285 /* Check if the page at the fault-address is the APIC base. */ -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r93905 r93931 4186 4186 { 4187 4187 /* translate the address */ 4188 pState->pvPageGC = GCPtr & PAGE_BASE_GC_MASK;4188 pState->pvPageGC = GCPtr & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK; 4189 4189 4190 4190 /* Release mapping lock previously acquired. */ -
trunk/src/VBox/VMM/VMMR3/DBGFDisas.cpp
r93554 r93931 221 221 222 222 /* translate the address */ 223 pState->GCPtrPage = GCPtr & PAGE_BASE_GC_MASK;223 pState->GCPtrPage = GCPtr & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK; 224 224 if (pState->fLocked) 225 225 PGMPhysReleasePageMappingLock(pState->pVM, &pState->PageMapLock); -
trunk/src/VBox/VMM/VMMR3/PGMPool.cpp
r93716 r93931 703 703 704 704 /* First write protect the page again to catch all write accesses. (before checking for changes -> SMP) */ 705 int rc = PGMHandlerPhysicalReset(pVM, pPage->GCPhys & PAGE_BASE_GC_MASK);705 int rc = PGMHandlerPhysicalReset(pVM, pPage->GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK); 706 706 AssertRCSuccess(rc); 707 707 pPage->fDirty = false; -
trunk/src/bldprogs/scm.cpp
r93687 r93931 2919 2919 case SCMOPT_ONLY_GUEST_HOST_PAGE: 2920 2920 RTPrintf(" No PAGE_SIZE, PAGE_SHIFT or PAGE_OFFSET_MASK allowed, must have\n" 2921 " GUEST_ or HOST_ prefix. Default: %RTbool\n", g_Defaults.fOnlyGuestHostPage); 2921 " GUEST_ or HOST_ prefix. Also forbids use of PAGE_BASE_MASK,\n" 2922 " PAGE_BASE_HC_MASK and PAGE_BASE_GC_MASK Default: %RTbool\n", g_Defaults.fOnlyGuestHostPage); 2922 2923 break; 2923 2924 case SCMOPT_NO_ASM_MEM_PAGE_USE: -
trunk/src/bldprogs/scmrw.cpp
r93686 r93931 3254 3254 /** 3255 3255 * Checks for PAGE_SIZE, PAGE_SHIFT and PAGE_OFFSET_MASK w/o a GUEST_ or HOST_ 3256 * prefix. 3256 * prefix as well as banning PAGE_BASE_HC_MASK, PAGE_BASE_GC_MASK and 3257 * PAGE_BASE_MASK. 3257 3258 * 3258 3259 * @returns true if modifications were made, false if not. … … 3272 3273 { RT_STR_TUPLE("PAGE_SHIFT") }, 3273 3274 { RT_STR_TUPLE("PAGE_OFFSET_MASK") }, 3275 { RT_STR_TUPLE("PAGE_BASE_MASK") }, 3276 { RT_STR_TUPLE("PAGE_BASE_GC_MASK") }, 3277 { RT_STR_TUPLE("PAGE_BASE_HC_MASK") }, 3274 3278 { RT_STR_TUPLE("ASMMemIsZeroPage") }, 3275 3279 { RT_STR_TUPLE("ASMMemZeroPage") }, 3276 3280 }; 3277 size_t const iFirstWord = pSettings->fOnlyGuestHostPage ? 0 : 3;3278 size_t const iEndWords = pSettings->fNoASMMemPageUse ? 5 : 3;3281 size_t const iFirstWord = pSettings->fOnlyGuestHostPage ? 0 : 5; 3282 size_t const iEndWords = pSettings->fNoASMMemPageUse ? 7 : 5; 3279 3283 3280 3284 uint32_t iLine = 0;
Note:
See TracChangeset
for help on using the changeset viewer.

