Changeset 92426 in vbox
- Timestamp:
- Nov 15, 2021 1:25:47 PM (3 years ago)
- Location:
- trunk
- Files:
-
- 14 edited
-
include/VBox/vmm/pgm.h (modified) (3 diffs)
-
src/VBox/VMM/VMMAll/IEMAll.cpp (modified) (14 diffs)
-
src/VBox/VMM/VMMAll/PGMAll.cpp (modified) (13 diffs)
-
src/VBox/VMM/VMMAll/PGMAllBth.h (modified) (21 diffs)
-
src/VBox/VMM/VMMAll/PGMAllGst.h (modified) (24 diffs)
-
src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h (modified) (12 diffs)
-
src/VBox/VMM/VMMAll/PGMAllPhys.cpp (modified) (8 diffs)
-
src/VBox/VMM/VMMAll/PGMAllShw.h (modified) (1 diff)
-
src/VBox/VMM/VMMR0/HMSVMR0.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMR0/PGMR0SharedPage.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMR3/DBGFAddr.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMR3/PGMDbg.cpp (modified) (7 diffs)
-
src/VBox/VMM/VMMR3/PGMSharedPage.cpp (modified) (1 diff)
-
src/VBox/VMM/include/PGMInternal.h (modified) (9 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/pgm.h
r92409 r92426 34 34 #include <VBox/vmm/vmapi.h> 35 35 #include <VBox/vmm/gmm.h> /* for PGMMREGISTERSHAREDMODULEREQ */ 36 #include <VBox/vmm/hm_vmx.h> 36 37 #include <iprt/x86.h> 37 38 #include <VBox/param.h> … … 295 296 } PGMSLAT; 296 297 298 299 /** @name PGMPTATTRS - PGM page-table attributes. 300 * 301 * This is VirtualBox's combined page table attributes. It combines regular page 302 * table and Intel EPT attributes. It's 64-bit in size so there's ample room for 303 * bits added in the future to EPT or regular page tables (for e.g. Protection Key). 304 * 305 * The following bits map 1:1 (shifted by PGM_PTATTRS_EPT_SHIFT) to the Intel EPT 306 * attributes as these are unique to EPT and fit within 64-bits despite the shift: 307 * - EPT_R : Read access. 308 * - EPT_W : Write access. 309 * - EPT_X_SUPER : Execute or execute for supervisor-mode linear addr access. 310 * - EPT_MEMTYPE : EPT memory type. 311 * - EPT_IGNORE_PAT: Ignore PAT memory type. 312 * - EPT_X_USER : Execute access for user-mode linear addresses. 313 * 314 * For regular page tables, the R bit is always 1 (same as P bit). 315 * For Intel EPT, the EPT_R and EPT_W bits are copied to R and W bits respectively. 316 * 317 * The following EPT attributes are mapped to the following positions because they 318 * exist in the regular page tables at these positions OR are exclusive to EPT and 319 * have been mapped to arbitrarily chosen positions: 320 * - EPT_A : Accessed (EPT bit 8 maps to bit 5). 321 * - EPT_D : Dirty (EPT bit 9 maps to bit 6). 322 * - EPT_SUPER_SHW_STACK : Supervisor Shadow Stack (EPT bit 60 maps to bit 24). 323 * - EPT_SUPPRESS_VE_XCPT: Suppress \#VE exception (EPT bit 63 maps to bit 25). 324 * 325 * Bits 12, 11:9 and 43 are deliberately kept unused (correspond to bit PS and bits 326 * 11:9 in the regular page-table structures and to bit 11 in the EPT structures 327 * respectively) as bit 12 is the page-size bit and bits 11:9 are reserved for 328 * use by software and we may want to use/preserve them in the future. 329 * 330 * @{ */ 331 typedef uint64_t PGMPTATTRS; 332 /** Pointer to a PGMPTATTRS type. */ 333 typedef PGMPTATTRS *PPGMPTATTRS; 334 335 /** Read bit (always 1 for regular PT, copy of EPT_R for EPT). */ 336 #define PGM_PTATTRS_R_SHIFT 0 337 #define PGM_PTATTRS_R_MASK RT_BIT_64(PGM_PTATTRS_R_SHIFT) 338 /** Write access bit (aka read/write bit for regular PT). */ 339 #define PGM_PTATTRS_W_SHIFT 1 340 #define PGM_PTATTRS_W_MASK RT_BIT_64(PGM_PTATTRS_W_SHIFT) 341 /** User-mode access bit. */ 342 #define PGM_PTATTRS_US_SHIFT 2 343 #define PGM_PTATTRS_US_MASK RT_BIT_64(PGM_PTATTRS_US_SHIFT) 344 /** Write through cache bit. */ 345 #define PGM_PTATTRS_PWT_SHIFT 3 346 #define PGM_PTATTRS_PWT_MASK RT_BIT_64(PGM_PTATTRS_PWT_SHIFT) 347 /** Cache disabled bit. */ 348 #define PGM_PTATTRS_PCD_SHIFT 4 349 #define PGM_PTATTRS_PCD_MASK RT_BIT_64(PGM_PTATTRS_PCD_SHIFT) 350 /** Accessed bit. */ 351 #define PGM_PTATTRS_A_SHIFT 5 352 #define PGM_PTATTRS_A_MASK RT_BIT_64(PGM_PTATTRS_A_SHIFT) 353 /** Dirty bit. */ 354 #define PGM_PTATTRS_D_SHIFT 6 355 #define PGM_PTATTRS_D_MASK RT_BIT_64(PGM_PTATTRS_D_SHIFT) 356 /** The PAT bit. */ 357 #define PGM_PTATTRS_PAT_SHIFT 7 358 #define PGM_PTATTRS_PAT_MASK RT_BIT_64(PGM_PTATTRS_PAT_SHIFT) 359 /** The global bit. */ 360 #define PGM_PTATTRS_G_SHIFT 8 361 #define PGM_PTATTRS_G_MASK RT_BIT_64(PGM_PTATTRS_G_SHIFT) 362 /** Reserved (bits 12:9) unused. */ 363 #define PGM_PTATTRS_RSVD_12_9_SHIFT 9 364 #define PGM_PTATTRS_RSVD_12_9_MASK UINT64_C(0x0000000000001e00) 365 /** Read access bit - EPT only. */ 366 #define PGM_PTATTRS_EPT_R_SHIFT 13 367 #define PGM_PTATTRS_EPT_R_MASK RT_BIT_64(PGM_PTATTRS_EPT_R_SHIFT) 368 /** Write access bit - EPT only. */ 369 #define PGM_PTATTRS_EPT_W_SHIFT 14 370 #define PGM_PTATTRS_EPT_W_MASK RT_BIT_64(PGM_PTATTRS_EPT_W_SHIFT) 371 /** Execute or execute access for supervisor-mode linear addresses - EPT only. */ 372 #define PGM_PTATTRS_EPT_X_SUPER_SHIFT 15 373 #define PGM_PTATTRS_EPT_X_SUPER_MASK RT_BIT_64(PGM_PTATTRS_EPT_X_SUPER_SHIFT) 374 /** EPT memory type - EPT only. */ 375 #define PGM_PTATTRS_EPT_MEMTYPE_SHIFT 16 376 #define PGM_PTATTRS_EPT_MEMTYPE_MASK UINT64_C(0x0000000000070000) 377 /** Ignore PAT memory type - EPT only. */ 378 #define PGM_PTATTRS_EPT_IGNORE_PAT_SHIFT 19 379 #define PGM_PTATTRS_EPT_IGNORE_PAT_MASK RT_BIT_64(PGM_PTATTRS_EPT_IGNORE_PAT_SHIFT) 380 /** Reserved (bits 22:20) unused. */ 381 #define PGM_PTATTRS_RSVD_22_20_SHIFT 20 382 #define PGM_PTATTRS_RSVD_22_20_MASK UINT64_C(0x0000000000700000) 383 /** Execute access for user-mode linear addresses - EPT only. */ 384 #define PGM_PTATTRS_EPT_X_USER_SHIFT 23 385 #define PGM_PTATTRS_EPT_X_USER_MASK RT_BIT_64(PGM_PTATTRS_EPT_X_USER_SHIFT) 386 /** Reserved (bit 23) - unused. */ 387 #define PGM_PTATTRS_RSVD_23_SHIFT 24 388 #define PGM_PTATTRS_RSVD_23_MASK UINT64_C(0x0000000001000000) 389 /** Supervisor shadow stack - EPT only. */ 390 #define PGM_PTATTRS_EPT_SUPER_SHW_STACK_SHIFT 25 391 #define PGM_PTATTRS_EPT_SUPER_SHW_STACK_MASK RT_BIT_64(PGM_PTATTRS_EPT_SUPER_SHW_STACK_SHIFT) 392 /** Suppress \#VE exception - EPT only. */ 393 #define PGM_PTATTRS_EPT_SUPPRESS_VE_XCPT_SHIFT 26 394 #define PGM_PTATTRS_EPT_SUPPRESS_VE_XCPT_MASK RT_BIT_64(PGM_PTATTRS_EPT_SUPPRESS_VE_XCPT_SHIFT) 395 /** Reserved (bits 62:27) - unused. */ 396 #define PGM_PTATTRS_RSVD_62_27_SHIFT 27 397 #define PGM_PTATTRS_RSVD_62_27_MASK UINT64_C(0x7ffffffff8000000) 398 /** No-execute bit. */ 399 #define PGM_PTATTRS_NX_SHIFT 63 400 #define PGM_PTATTRS_NX_MASK RT_BIT_64(PGM_PTATTRS_NX_SHIFT) 401 402 RT_BF_ASSERT_COMPILE_CHECKS(PGM_PTATTRS_, UINT64_C(0), UINT64_MAX, 403 (R, W, US, PWT, PCD, A, D, PAT, G, RSVD_12_9, EPT_R, EPT_W, EPT_X_SUPER, EPT_MEMTYPE, EPT_IGNORE_PAT, 404 RSVD_22_20, EPT_X_USER, RSVD_23, EPT_SUPER_SHW_STACK, EPT_SUPPRESS_VE_XCPT, RSVD_62_27, NX)); 405 406 /** The bit position where the EPT specific attributes begin. */ 407 #define PGM_PTATTRS_EPT_SHIFT PGM_PTATTRS_EPT_R_SHIFT 408 /** The mask of EPT bits (bits 26:ATTR_SHIFT). In the future we might choose to 409 * use higher unused bits for something else, in that case adjust this mask. */ 410 #define PGM_PTATTRS_EPT_MASK UINT64_C(0x0000000007ffe000) 411 412 /** The mask of all PGM page attribute bits for regular page-tables. */ 413 #define PGM_PTATTRS_PT_VALID_MASK ( PGM_PTATTRS_R_MASK \ 414 | PGM_PTATTRS_W_MASK \ 415 | PGM_PTATTRS_US_MASK \ 416 | PGM_PTATTRS_PWT_MASK \ 417 | PGM_PTATTRS_PCD_MASK \ 418 | PGM_PTATTRS_A_MASK \ 419 | PGM_PTATTRS_D_MASK \ 420 | PGM_PTATTRS_PAT_MASK \ 421 | PGM_PTATTRS_G_MASK \ 422 | PGM_PTATTRS_NX_MASK) 423 424 /** The mask of all PGM page attribute bits for EPT. */ 425 #define PGM_PTATTRS_EPT_VALID_MASK ( PGM_PTATTRS_R_MASK \ 426 | PGM_PTATTRS_W_MASK \ 427 | PGM_PTATTRS_A_MASK \ 428 | PGM_PTATTRS_D_MASK \ 429 | PGM_PTATTRS_EPT_R_MASK \ 430 | PGM_PTATTRS_EPT_W_MASK \ 431 | PGM_PTATTRS_EPT_X_SUPER \ 432 | PGM_PTATTRS_EPT_MEMTYPE \ 433 | PGM_PTATTRS_EPT_IGNORE_PAT \ 434 | PGM_PTATTRS_EPT_X_USER \ 435 | PGM_PTATTRS_EPT_SUPER_SHW_STACK \ 436 | PGM_PTATTRS_EPT_SUPPRESS_VE_XCPT) 437 438 /* The mask of all PGM page attribute bits (combined). */ 439 #define PGM_PTATTRS_VALID_MASK (PGM_PTATTRS_PT_VALID_MASK | PGM_PTATTRS_PT_VALID_MASK) 440 441 /* Verify bits match the regular PT bits. */ 442 AssertCompile(PGM_PTATTRS_W_SHIFT == X86_PTE_BIT_RW); 443 AssertCompile(PGM_PTATTRS_US_SHIFT == X86_PTE_BIT_US); 444 AssertCompile(PGM_PTATTRS_PWT_SHIFT == X86_PTE_BIT_PWT); 445 AssertCompile(PGM_PTATTRS_PCD_SHIFT == X86_PTE_BIT_PCD); 446 AssertCompile(PGM_PTATTRS_A_SHIFT == X86_PTE_BIT_A); 447 AssertCompile(PGM_PTATTRS_D_SHIFT == X86_PTE_BIT_D); 448 AssertCompile(PGM_PTATTRS_PAT_SHIFT == X86_PTE_BIT_PAT); 449 AssertCompile(PGM_PTATTRS_G_SHIFT == X86_PTE_BIT_G); 450 AssertCompile(PGM_PTATTRS_W_MASK == X86_PTE_RW); 451 AssertCompile(PGM_PTATTRS_US_MASK == X86_PTE_US); 452 AssertCompile(PGM_PTATTRS_PWT_MASK == X86_PTE_PWT); 453 AssertCompile(PGM_PTATTRS_PCD_MASK == X86_PTE_PCD); 454 AssertCompile(PGM_PTATTRS_A_MASK == X86_PTE_A); 455 AssertCompile(PGM_PTATTRS_D_MASK == X86_PTE_D); 456 AssertCompile(PGM_PTATTRS_PAT_MASK == X86_PTE_PAT); 457 AssertCompile(PGM_PTATTRS_G_MASK == X86_PTE_G); 458 AssertCompile(PGM_PTATTRS_NX_MASK == X86_PTE_PAE_NX); 459 460 /* Verify those EPT bits that must map 1:1 (after shifting). */ 461 AssertCompile(PGM_PTATTRS_EPT_R_SHIFT - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_READ); 462 AssertCompile(PGM_PTATTRS_EPT_W_SHIFT - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_WRITE); 463 AssertCompile(PGM_PTATTRS_EPT_X_SUPER_SHIFT - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_EXECUTE); 464 AssertCompile(PGM_PTATTRS_EPT_IGNORE_PAT_SHIFT - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_IGNORE_PAT); 465 AssertCompile(PGM_PTATTRS_EPT_X_USER_SHIFT - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_USER_EXECUTE); 466 /** @} */ 467 468 469 /** 470 * Page table walk information. 471 * 472 * This provides extensive information regarding page faults (or EPT 473 * violations/misconfigurations) while traversing page tables. 474 */ 475 typedef struct PGMPTWALK 476 { 477 /** The linear address that is being resolved (input). */ 478 RTGCPTR GCPtr; 479 480 /** The second-level physical address (input/output). 481 * @remarks only valid if fIsSlat is set. */ 482 RTGCPHYS GCPhysNested; 483 484 /** The physical address that is the result of the walk (output). 485 * @remarks This is page aligned and only valid if fSucceeded is set. */ 486 RTGCPHYS GCPhys; 487 488 /** Set if the walk succeeded. */ 489 bool fSucceeded; 490 /** Whether this is a second-level address translation. */ 491 bool fIsSlat; 492 /** Whether the linear address (GCPtr) caused the second-level 493 * address translation. */ 494 bool fIsLinearAddrValid; 495 /** The level problem arrised at. 496 * PTE is level 1, PDE is level 2, PDPE is level 3, PML4 is level 4, CR3 is 497 * level 8. This is 0 on success. */ 498 uint8_t uLevel; 499 /** Set if the page isn't present. */ 500 bool fNotPresent; 501 /** Encountered a bad physical address. */ 502 bool fBadPhysAddr; 503 /** Set if there was reserved bit violations. */ 504 bool fRsvdError; 505 /** Set if it involves a big page (2/4 MB). */ 506 bool fBigPage; 507 /** Set if it involves a gigantic page (1 GB). */ 508 bool fGigantPage; 509 /** Set if the second-level fault was caused by an EPT misconfiguration. */ 510 bool fEptMisconfig; 511 bool afPadding[6]; 512 513 /** The effective attributes, PGM_PTATTRS_XXX. */ 514 PGMPTATTRS fEffective; 515 } PGMPTWALK; 516 /** Pointer to page walk information. */ 517 typedef PGMPTWALK *PPGMPTWALK; 518 /** Pointer to const page walk information. */ 519 typedef PGMPTWALK const *PCPGMPTWALK; 520 521 297 522 /** Macro for checking if the guest is using paging. 298 523 * @param enmMode PGMMODE_*. … … 351 576 #define PGM_MK_PG_IS_MMIO2 RT_BIT(1) 352 577 /** @}*/ 353 VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys); 354 VMMDECL(bool) PGMGstIsPagePresent(PVMCPUCC pVCpu, RTGCPTR GCPtr); 578 VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk); 355 579 VMMDECL(int) PGMGstSetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags); 356 580 VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask); -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r91580 r92426 1426 1426 } 1427 1427 1428 RTGCPHYS GCPhys;1429 uint64_t fFlags;1430 i nt rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);1431 if (RT_SUCCESS(rc)) { /* probable */ }1428 PGMPTWALK Walk; 1429 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk); 1430 if (RT_SUCCESS(rc)) 1431 Assert(Walk.fSucceeded); /* probable. */ 1432 1432 else 1433 1433 { … … 1435 1435 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc); 1436 1436 } 1437 if (( fFlags& X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }1437 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ } 1438 1438 else 1439 1439 { … … 1441 1441 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1442 1442 } 1443 if (!( fFlags& X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }1443 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ } 1444 1444 else 1445 1445 { … … 1447 1447 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1448 1448 } 1449 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;1449 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & PAGE_OFFSET_MASK); 1450 1450 /** @todo Check reserved bits and such stuff. PGM is better at doing 1451 1451 * that, so do it when implementing the guest virtual address … … 1744 1744 { 1745 1745 pVCpu->iem.s.CodeTlb.cTlbMisses++; 1746 RTGCPHYS GCPhys; 1747 uint64_t fFlags; 1748 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys); 1746 PGMPTWALK Walk; 1747 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk); 1749 1748 if (RT_FAILURE(rc)) 1750 1749 { … … 1754 1753 1755 1754 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1); 1755 Assert(Walk.fSucceeded); 1756 1756 pTlbe->uTag = uTag; 1757 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX); 1758 pTlbe->GCPhys = GCPhys; 1757 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) 1758 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX); 1759 pTlbe->GCPhys = Walk.GCPhys; 1759 1760 pTlbe->pbMappingR3 = NULL; 1760 1761 } … … 1961 1962 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */ 1962 1963 1963 RTGCPHYS GCPhys; 1964 uint64_t fFlags; 1965 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys); 1964 PGMPTWALK Walk; 1965 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk); 1966 1966 if (RT_FAILURE(rc)) 1967 1967 { … … 1969 1969 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc); 1970 1970 } 1971 if (!( fFlags& X86_PTE_US) && pVCpu->iem.s.uCpl == 3)1971 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3) 1972 1972 { 1973 1973 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext)); 1974 1974 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1975 1975 } 1976 if (( fFlags& X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))1976 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) 1977 1977 { 1978 1978 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext)); 1979 1979 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1980 1980 } 1981 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;1981 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & PAGE_OFFSET_MASK); 1982 1982 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode)); 1983 1983 /** @todo Check reserved bits and such stuff. PGM is better at doing … … 8137 8137 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault 8138 8138 * here. */ 8139 RTGCPHYS GCPhys; 8140 uint64_t fFlags; 8141 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys); 8139 PGMPTWALK Walk; 8140 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk); 8142 8141 if (RT_FAILURE(rc)) 8143 8142 { … … 8151 8150 /* If the page is writable and does not have the no-exec bit set, all 8152 8151 access is allowed. Otherwise we'll have to check more carefully... */ 8153 if (( fFlags& (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))8152 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US)) 8154 8153 { 8155 8154 /* Write to read only memory? */ 8156 8155 if ( (fAccess & IEM_ACCESS_TYPE_WRITE) 8157 && !( fFlags& X86_PTE_RW)8156 && !(Walk.fEffective & X86_PTE_RW) 8158 8157 && ( ( pVCpu->iem.s.uCpl == 3 8159 8158 && !(fAccess & IEM_ACCESS_WHAT_SYS)) … … 8166 8165 8167 8166 /* Kernel memory accessed by userland? */ 8168 if ( !( fFlags& X86_PTE_US)8167 if ( !(Walk.fEffective & X86_PTE_US) 8169 8168 && pVCpu->iem.s.uCpl == 3 8170 8169 && !(fAccess & IEM_ACCESS_WHAT_SYS)) … … 8177 8176 /* Executing non-executable memory? */ 8178 8177 if ( (fAccess & IEM_ACCESS_TYPE_EXEC) 8179 && ( fFlags& X86_PTE_PAE_NX)8178 && (Walk.fEffective & X86_PTE_PAE_NX) 8180 8179 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) 8181 8180 { … … 8193 8192 /** @todo testcase: check when A and D bits are actually set by the CPU. */ 8194 8193 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A; 8195 if (( fFlags& fAccessedDirty) != fAccessedDirty)8194 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty) 8196 8195 { 8197 8196 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty); … … 8199 8198 } 8200 8199 8201 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;8200 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & PAGE_OFFSET_MASK); 8202 8201 *pGCPhysMem = GCPhys; 8203 8202 return VINF_SUCCESS; -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r92344 r92426 51 51 DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde); 52 52 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 53 static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALKGST pWalk); 53 static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk, 54 PPGMPTWALKGST pGstWalk); 54 55 #endif 55 56 static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD); … … 1723 1724 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 1724 1725 * @param GCPtr Guest Context virtual address of the page. 1725 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages. 1726 * @param pGCPhys Where to store the GC physical address of the page. 1727 * This is page aligned. The fact that the 1728 */ 1729 VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys) 1726 * @param pWalk Where to store the page walk information. 1727 */ 1728 VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk) 1730 1729 { 1731 1730 VMCPU_ASSERT_EMT(pVCpu); 1731 Assert(pWalk); 1732 RT_BZERO(pWalk, sizeof(*pWalk)); 1732 1733 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData; 1733 1734 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE); 1734 1735 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE); 1735 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, p fFlags, pGCPhys);1736 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pWalk); 1736 1737 } 1737 1738 … … 1753 1754 * @param pWalk Where to return the walk result. This is valid for some 1754 1755 * error codes as well. 1755 */ 1756 int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk) 1756 * @param pGstWalk The guest mode specific page walk information. 1757 */ 1758 int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk) 1757 1759 { 1758 1760 VMCPU_ASSERT_EMT(pVCpu); … … 1760 1762 { 1761 1763 case PGMMODE_32_BIT: 1762 p Walk->enmType = PGMPTWALKGSTTYPE_32BIT;1763 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, &pWalk->u.Legacy);1764 pGstWalk->enmType = PGMPTWALKGSTTYPE_32BIT; 1765 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Legacy); 1764 1766 1765 1767 case PGMMODE_PAE: 1766 1768 case PGMMODE_PAE_NX: 1767 p Walk->enmType = PGMPTWALKGSTTYPE_PAE;1768 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, &pWalk->u.Pae);1769 pGstWalk->enmType = PGMPTWALKGSTTYPE_PAE; 1770 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Pae); 1769 1771 1770 1772 case PGMMODE_AMD64: 1771 1773 case PGMMODE_AMD64_NX: 1772 p Walk->enmType = PGMPTWALKGSTTYPE_AMD64;1773 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, &pWalk->u.Amd64);1774 pGstWalk->enmType = PGMPTWALKGSTTYPE_AMD64; 1775 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Amd64); 1774 1776 1775 1777 case PGMMODE_REAL: 1776 1778 case PGMMODE_PROTECTED: 1777 p Walk->enmType = PGMPTWALKGSTTYPE_INVALID;1779 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID; 1778 1780 return VERR_PGM_NOT_USED_IN_MODE; 1779 1781 … … 1784 1786 default: 1785 1787 AssertFailed(); 1786 p Walk->enmType = PGMPTWALKGSTTYPE_INVALID;1788 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID; 1787 1789 return VERR_PGM_NOT_USED_IN_MODE; 1788 1790 } … … 1813 1815 * @param pWalk Where to return the walk result. This is valid for 1814 1816 * some error codes as well. 1817 * @param pGstWalk The second-level paging-mode specific walk 1818 * information. 1815 1819 */ 1816 1820 static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, 1817 PPGMPTWALKGST pWalk) 1818 { 1819 Assert(pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT); 1821 PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk) 1822 { 1823 Assert( pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT 1824 && pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID); 1820 1825 switch (pVCpu->pgm.s.enmGuestSlatMode) 1821 1826 { 1822 1827 case PGMSLAT_EPT: 1823 p Walk->enmType = PGMPTWALKGSTTYPE_EPT;1824 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, &pWalk->u.Ept);1828 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT; 1829 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk, &pGstWalk->u.Ept); 1825 1830 1826 1831 default: 1827 1832 AssertFailed(); 1828 p Walk->enmType = PGMPTWALKGSTTYPE_INVALID;1833 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID; 1829 1834 return VERR_PGM_NOT_USED_IN_MODE; 1830 1835 } … … 1851 1856 * the result of this walk. This is valid for some error 1852 1857 * codes as well. 1853 */ 1854 int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk) 1858 * @param pGstWalk The guest-mode specific walk information. 1859 */ 1860 int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk) 1855 1861 { 1856 1862 /* … … 1858 1864 * We also limit ourselves to the next page. 1859 1865 */ 1860 if ( pWalk-> u.Core.fSucceeded1861 && GCPtr - pWalk-> u.Core.GCPtr == PAGE_SIZE)1862 { 1863 Assert(pWalk->u .Core.uLevel == 0);1864 if (p Walk->enmType == PGMPTWALKGSTTYPE_AMD64)1866 if ( pWalk->fSucceeded 1867 && GCPtr - pWalk->GCPtr == PAGE_SIZE) 1868 { 1869 Assert(pWalk->uLevel == 0); 1870 if (pGstWalk->enmType == PGMPTWALKGSTTYPE_AMD64) 1865 1871 { 1866 1872 /* 1867 1873 * AMD64 1868 1874 */ 1869 if (!pWalk-> u.Core.fGigantPage && !pWalk->u.Core.fBigPage)1875 if (!pWalk->fGigantPage && !pWalk->fBigPage) 1870 1876 { 1871 1877 /* … … 1878 1884 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS; 1879 1885 1880 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk-> u.Core.GCPtr >> X86_PD_PAE_SHIFT))1886 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->GCPtr >> X86_PD_PAE_SHIFT)) 1881 1887 { 1882 if (p Walk->u.Amd64.pPte)1888 if (pGstWalk->u.Amd64.pPte) 1883 1889 { 1884 1890 X86PTEPAE Pte; 1885 Pte.u = p Walk->u.Amd64.pPte[1].u;1886 if ( (Pte.u & fPteSame) == (p Walk->u.Amd64.Pte.u & fPteSame)1891 Pte.u = pGstWalk->u.Amd64.pPte[1].u; 1892 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame) 1887 1893 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask)) 1888 1894 { 1889 1890 pWalk->u.Core.GCPtr = GCPtr; 1891 pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK; 1892 pWalk->u.Amd64.Pte.u = Pte.u; 1893 pWalk->u.Amd64.pPte++; 1895 pWalk->GCPtr = GCPtr; 1896 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK; 1897 pGstWalk->u.Amd64.Pte.u = Pte.u; 1898 pGstWalk->u.Amd64.pPte++; 1894 1899 return VINF_SUCCESS; 1895 1900 } 1896 1901 } 1897 1902 } 1898 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk-> u.Core.GCPtr >> X86_PDPT_SHIFT))1903 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->GCPtr >> X86_PDPT_SHIFT)) 1899 1904 { 1900 1905 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */ 1901 if (p Walk->u.Amd64.pPde)1906 if (pGstWalk->u.Amd64.pPde) 1902 1907 { 1903 1908 X86PDEPAE Pde; 1904 Pde.u = p Walk->u.Amd64.pPde[1].u;1905 if ( (Pde.u & fPdeSame) == (p Walk->u.Amd64.Pde.u & fPdeSame)1909 Pde.u = pGstWalk->u.Amd64.pPde[1].u; 1910 if ( (Pde.u & fPdeSame) == (pGstWalk->u.Amd64.Pde.u & fPdeSame) 1906 1911 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask)) 1907 1912 { 1908 1913 /* Get the new PTE and check out the first entry. */ 1909 1914 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)), 1910 &p Walk->u.Amd64.pPt);1915 &pGstWalk->u.Amd64.pPt); 1911 1916 if (RT_SUCCESS(rc)) 1912 1917 { 1913 p Walk->u.Amd64.pPte = &pWalk->u.Amd64.pPt->a[0];1918 pGstWalk->u.Amd64.pPte = &pGstWalk->u.Amd64.pPt->a[0]; 1914 1919 X86PTEPAE Pte; 1915 Pte.u = p Walk->u.Amd64.pPte->u;1916 if ( (Pte.u & fPteSame) == (p Walk->u.Amd64.Pte.u & fPteSame)1920 Pte.u = pGstWalk->u.Amd64.pPte->u; 1921 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame) 1917 1922 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask)) 1918 1923 { 1919 pWalk-> u.Core.GCPtr = GCPtr;1920 pWalk-> u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;1921 p Walk->u.Amd64.Pte.u = Pte.u;1922 p Walk->u.Amd64.Pde.u = Pde.u;1923 p Walk->u.Amd64.pPde++;1924 pWalk->GCPtr = GCPtr; 1925 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK; 1926 pGstWalk->u.Amd64.Pte.u = Pte.u; 1927 pGstWalk->u.Amd64.Pde.u = Pde.u; 1928 pGstWalk->u.Amd64.pPde++; 1924 1929 return VINF_SUCCESS; 1925 1930 } … … 1929 1934 } 1930 1935 } 1931 else if (!pWalk-> u.Core.fGigantPage)1936 else if (!pWalk->fGigantPage) 1932 1937 { 1933 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk-> u.Core.GCPtr & X86_PAGE_2M_BASE_MASK))1938 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_2M_BASE_MASK)) 1934 1939 { 1935 pWalk-> u.Core.GCPtr = GCPtr;1936 pWalk-> u.Core.GCPhys += PAGE_SIZE;1940 pWalk->GCPtr = GCPtr; 1941 pWalk->GCPhys += PAGE_SIZE; 1937 1942 return VINF_SUCCESS; 1938 1943 } … … 1940 1945 else 1941 1946 { 1942 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk-> u.Core.GCPtr & X86_PAGE_1G_BASE_MASK))1947 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_1G_BASE_MASK)) 1943 1948 { 1944 pWalk-> u.Core.GCPtr = GCPtr;1945 pWalk-> u.Core.GCPhys += PAGE_SIZE;1949 pWalk->GCPtr = GCPtr; 1950 pWalk->GCPhys += PAGE_SIZE; 1946 1951 return VINF_SUCCESS; 1947 1952 } … … 1950 1955 } 1951 1956 /* Case we don't handle. Do full walk. */ 1952 return pgmGstPtWalk(pVCpu, GCPtr, pWalk); 1953 } 1954 1955 1956 /** 1957 * Checks if the page is present. 1958 * 1959 * @returns true if the page is present. 1960 * @returns false if the page is not present. 1961 * @param pVCpu The cross context virtual CPU structure. 1962 * @param GCPtr Address within the page. 1963 */ 1964 VMMDECL(bool) PGMGstIsPagePresent(PVMCPUCC pVCpu, RTGCPTR GCPtr) 1965 { 1966 VMCPU_ASSERT_EMT(pVCpu); 1967 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL); 1968 return RT_SUCCESS(rc); 1957 return pgmGstPtWalk(pVCpu, GCPtr, pWalk, pGstWalk); 1969 1958 } 1970 1959 … … 3179 3168 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 3180 3169 /* Update the guest SLAT mode if it's a nested-guest. */ 3181 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu)) 3182 { 3183 if (PGMMODE_WITH_PAGING(enmGuestMode)) 3184 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT; 3185 else 3186 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_DIRECT; 3187 } 3170 if ( CPUMIsGuestVmxEptPagingEnabled(pVCpu) 3171 && PGMMODE_WITH_PAGING(enmGuestMode)) 3172 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT; 3188 3173 else 3189 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_DIRECT);3174 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_DIRECT; 3190 3175 #endif 3191 3176 -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r92381 r92426 170 170 * 171 171 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 172 * @param p GstWalkThe guest page table walk result.172 * @param pWalk The guest page table walk result. 173 173 * @param uErr The error code. 174 174 */ 175 PGM_BTH_DECL(VBOXSTRICTRC, Trap0eHandlerGuestFault)(PVMCPUCC pVCpu, P GSTPTWALK pGstWalk, RTGCUINT uErr)175 PGM_BTH_DECL(VBOXSTRICTRC, Trap0eHandlerGuestFault)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, RTGCUINT uErr) 176 176 { 177 177 /* … … 181 181 ? uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_US | X86_TRAP_PF_ID) 182 182 : uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_US); 183 if ( p GstWalk->Core.fRsvdError184 || p GstWalk->Core.fBadPhysAddr)183 if ( pWalk->fRsvdError 184 || pWalk->fBadPhysAddr) 185 185 { 186 186 uNewErr |= X86_TRAP_PF_RSVD | X86_TRAP_PF_P; 187 Assert(!p GstWalk->Core.fNotPresent);188 } 189 else if (!p GstWalk->Core.fNotPresent)187 Assert(!pWalk->fNotPresent); 188 } 189 else if (!pWalk->fNotPresent) 190 190 uNewErr |= X86_TRAP_PF_P; 191 191 TRPMSetErrorCode(pVCpu, uNewErr); 192 192 193 LogFlow(("Guest trap; cr2=%RGv uErr=%RGv lvl=%d\n", p GstWalk->Core.GCPtr, uErr, pGstWalk->Core.uLevel));193 LogFlow(("Guest trap; cr2=%RGv uErr=%RGv lvl=%d\n", pWalk->GCPtr, uErr, pWalk->uLevel)); 194 194 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2GuestTrap; }); 195 195 return VINF_EM_RAW_GUEST_TRAP; … … 211 211 * @param pvFault The fault address. 212 212 * @param pPage The guest page at @a pvFault. 213 * @param pGstWalk The guest page table walk result. 213 * @param pWalk The guest page table walk result. 214 * @param pGstWalk The guest paging-mode specific walk information. 214 215 * @param pfLockTaken PGM lock taken here or not (out). This is true 215 216 * when we're called. … … 218 219 RTGCPTR pvFault, PPGMPAGE pPage, bool *pfLockTaken 219 220 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) || defined(DOXYGEN_RUNNING) 221 , PPGMPTWALK pWalk 220 222 , PGSTPTWALK pGstWalk 221 223 # endif … … 234 236 */ 235 237 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 236 const RTGCPHYS GCPhysFault = p GstWalk->Core.GCPhys;238 const RTGCPHYS GCPhysFault = pWalk->GCPhys; 237 239 # else 238 240 const RTGCPHYS GCPhysFault = PGM_A20_APPLY(pVCpu, (RTGCPHYS)pvFault); … … 277 279 && pCurType->enmKind != PGMPHYSHANDLERKIND_WRITE 278 280 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 279 && (p GstWalk->Core.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK))280 == PGM_PTATTRS_W_MASK /** @todo Remove pGstWalk->Core.fEffectiveUS and X86_PTE_US further down in the sync code. */281 && (pWalk->fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK)) 282 == PGM_PTATTRS_W_MASK /** @todo Remove pGstWalk->Core.fEffectiveUS and X86_PTE_US further down in the sync code. */ 281 283 # endif 282 284 ) … … 418 420 * Walk the guest page translation tables and check if it's a guest fault. 419 421 */ 422 PGMPTWALK Walk; 420 423 GSTPTWALK GstWalk; 421 rc = PGM_GST_NAME(Walk)(pVCpu, pvFault, & GstWalk);424 rc = PGM_GST_NAME(Walk)(pVCpu, pvFault, &Walk, &GstWalk); 422 425 if (RT_FAILURE_NP(rc)) 423 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, & GstWalk, uErr));426 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, &Walk, uErr)); 424 427 425 428 /* assert some GstWalk sanity. */ … … 432 435 /*AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u)); - ditto */ 433 436 /*AssertMsg(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u)); - ditto */ 434 Assert( GstWalk.Core.fSucceeded);437 Assert(Walk.fSucceeded); 435 438 436 439 if (uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_US | X86_TRAP_PF_ID)) 437 440 { 438 441 if ( ( (uErr & X86_TRAP_PF_RW) 439 && !( GstWalk.Core.fEffective & PGM_PTATTRS_W_MASK)442 && !(Walk.fEffective & PGM_PTATTRS_W_MASK) 440 443 && ( (uErr & X86_TRAP_PF_US) 441 444 || CPUMIsGuestR0WriteProtEnabled(pVCpu)) ) 442 || ((uErr & X86_TRAP_PF_US) && !( GstWalk.Core.fEffective & PGM_PTATTRS_US_MASK))443 || ((uErr & X86_TRAP_PF_ID) && (GstWalk.Core.fEffective & PGM_PTATTRS_NX_MASK))445 || ((uErr & X86_TRAP_PF_US) && !(Walk.fEffective & PGM_PTATTRS_US_MASK)) 446 || ((uErr & X86_TRAP_PF_ID) && (Walk.fEffective & PGM_PTATTRS_NX_MASK)) 444 447 ) 445 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, & GstWalk, uErr));448 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, &Walk, uErr)); 446 449 } 447 450 … … 468 471 } 469 472 # endif 470 if ( GstWalk.Core.fBigPage)473 if (Walk.fBigPage) 471 474 { 472 475 Assert(GstWalk.Pde.u & X86_PDE_PS); … … 521 524 Assert(GstWalk.Pte.u == GstWalk.pPte->u); 522 525 } 526 #if 0 527 /* Disabling this since it's not reliable for SMP, see @bugref{10092#c22}. */ 523 528 AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u, 524 529 ("%RX64 %RX64 pPte=%p pPde=%p Pte=%RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u, GstWalk.pPte, GstWalk.pPde, (uint64_t)GstWalk.pPte->u)); 530 #endif 531 525 532 # else /* !PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 526 533 GSTPDE const PdeSrcDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A}; /** @todo eliminate this */ … … 541 548 PPGMPAGE pPage; 542 549 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 543 rc = pgmPhysGetPageEx(pVM, GstWalk.Core.GCPhys, &pPage);550 rc = pgmPhysGetPageEx(pVM, Walk.GCPhys, &pPage); 544 551 if (RT_SUCCESS(rc) && PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) 545 552 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, 546 pfLockTaken, & GstWalk));553 pfLockTaken, &Walk, &GstWalk)); 547 554 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, 1, uErr); 548 555 # else … … 618 625 #ifdef DEBUG_bird 619 626 AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u || pVM->cCpus > 1, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u)); // - triggers with smp w7 guests. 620 AssertMsg( GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u || pVM->cCpus > 1, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u)); // - ditto.627 AssertMsg(Walk.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u || pVM->cCpus > 1, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u)); // - ditto. 621 628 #endif 622 629 } … … 669 676 */ 670 677 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 671 RTGCPHYS GCPhys = GstWalk.Core.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;678 RTGCPHYS GCPhys = Walk.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK; 672 679 # else 673 680 RTGCPHYS GCPhys = PGM_A20_APPLY(pVCpu, (RTGCPHYS)pvFault & ~(RTGCPHYS)PAGE_OFFSET_MASK); … … 694 701 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 695 702 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, pfLockTaken, 696 & GstWalk));703 &Walk, &GstWalk)); 697 704 # else 698 705 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, pfLockTaken)); … … 778 785 * Check to see if we need to emulate the instruction if CR0.WP=0. 779 786 */ 780 if ( !( GstWalk.Core.fEffective & PGM_PTATTRS_W_MASK)787 if ( !(Walk.fEffective & PGM_PTATTRS_W_MASK) 781 788 && (CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG 782 789 && CPUMGetGuestCPL(pVCpu) < 3) … … 797 804 */ 798 805 # if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE) && 1 799 if ( ( GstWalk.Core.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK)) == PGM_PTATTRS_US_MASK800 && ( GstWalk.Core.fBigPage || (GstWalk.Pde.u & X86_PDE_RW))806 if ( (Walk.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK)) == PGM_PTATTRS_US_MASK 807 && (Walk.fBigPage || (GstWalk.Pde.u & X86_PDE_RW)) 801 808 && pVM->cCpus == 1 /* Sorry, no go on SMP. Add CFGM option? */) 802 809 { 803 Log(("PGM #PF: Netware WP0+RO+US hack: pvFault=%RGp uErr=%#x (big=%d)\n", pvFault, uErr, GstWalk.Core.fBigPage));804 rc = pgmShwMakePageSupervisorAndWritable(pVCpu, pvFault, GstWalk.Core.fBigPage, PGM_MK_PG_IS_WRITE_FAULT);810 Log(("PGM #PF: Netware WP0+RO+US hack: pvFault=%RGp uErr=%#x (big=%d)\n", pvFault, uErr, Walk.fBigPage)); 811 rc = pgmShwMakePageSupervisorAndWritable(pVCpu, pvFault, Walk.fBigPage, PGM_MK_PG_IS_WRITE_FAULT); 805 812 if (rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3) 806 813 { … … 817 824 /* Interpret the access. */ 818 825 rc = VBOXSTRICTRC_TODO(PGMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault)); 819 Log(("PGM #PF: WP0 emulation (pvFault=%RGp uErr=%#x cpl=%d fBig=%d fEffUs=%d)\n", pvFault, uErr, CPUMGetGuestCPL(pVCpu), GstWalk.Core.fBigPage, !!(GstWalk.Core.fEffective & PGM_PTATTRS_US_MASK)));826 Log(("PGM #PF: WP0 emulation (pvFault=%RGp uErr=%#x cpl=%d fBig=%d fEffUs=%d)\n", pvFault, uErr, CPUMGetGuestCPL(pVCpu), Walk.fBigPage, !!(Walk.fEffective & PGM_PTATTRS_US_MASK))); 820 827 if (RT_SUCCESS(rc)) 821 828 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eWPEmulInRZ); … … 855 862 # endif 856 863 # ifdef VBOX_STRICT 857 RTGCPHYS GCPhys2 = RTGCPHYS_MAX;858 uint64_t fPageGst = UINT64_MAX;864 PGMPTWALK GstPageWalk; 865 GstPageWalk.GCPhys = RTGCPHYS_MAX; 859 866 if (!pVM->pgm.s.fNestedPaging) 860 867 { 861 rc = PGMGstGetPage(pVCpu, pvFault, & fPageGst, &GCPhys2);862 AssertMsg(RT_SUCCESS(rc) && (( fPageGst & X86_PTE_RW) || ((CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG && CPUMGetGuestCPL(pVCpu) < 3)), ("rc=%Rrc fPageGst=%RX64\n", rc, fPageGst));863 LogFlow(("Obsolete physical monitor page out of sync %RGv - phys %RGp flags=%08llx\n", pvFault, G CPhys2, (uint64_t)fPageGst));868 rc = PGMGstGetPage(pVCpu, pvFault, &GstPageWalk); 869 AssertMsg(RT_SUCCESS(rc) && ((GstPageWalk.fEffective & X86_PTE_RW) || ((CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG && CPUMGetGuestCPL(pVCpu) < 3)), ("rc=%Rrc fPageGst=%RX64\n", rc, GstPageWalk.fEffective)); 870 LogFlow(("Obsolete physical monitor page out of sync %RGv - phys %RGp flags=%08llx\n", pvFault, GstPageWalk.GCPhys, GstPageWalk.fEffective)); 864 871 } 865 872 # if 0 /* Bogus! Triggers incorrectly with w7-64 and later for the SyncPage case: "Pde at %RGv changed behind our back?" */ … … 867 874 rc = PGMShwGetPage(pVCpu, pvFault, &fPageShw, NULL); 868 875 AssertMsg((RT_SUCCESS(rc) && (fPageShw & X86_PTE_RW)) || pVM->cCpus > 1 /* new monitor can be installed/page table flushed between the trap exit and PGMTrap0eHandler */, 869 ("rc=%Rrc fPageShw=%RX64 GCPhys2=%RGp fPageGst=%RX64 pvFault=%RGv\n", rc, fPageShw, G CPhys2, fPageGst, pvFault));876 ("rc=%Rrc fPageShw=%RX64 GCPhys2=%RGp fPageGst=%RX64 pvFault=%RGv\n", rc, fPageShw, GstPageWalk.GCPhys, fPageGst, pvFault)); 870 877 # endif 871 878 # endif /* VBOX_STRICT */ … … 879 886 * mode accesses the page again. 880 887 */ 881 else if ( ( GstWalk.Core.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK)) == PGM_PTATTRS_US_MASK882 && ( GstWalk.Core.fBigPage || (GstWalk.Pde.u & X86_PDE_RW))888 else if ( (Walk.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK)) == PGM_PTATTRS_US_MASK 889 && (Walk.fBigPage || (GstWalk.Pde.u & X86_PDE_RW)) 883 890 && pVCpu->pgm.s.cNetwareWp0Hacks > 0 884 891 && (CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG … … 909 916 { 910 917 /* Get guest page flags. */ 911 uint64_t fPageGst;912 int rc2 = PGMGstGetPage(pVCpu, pvFault, & fPageGst, NULL);918 PGMPTWALK GstPageWalk; 919 int rc2 = PGMGstGetPage(pVCpu, pvFault, &GstPageWalk); 913 920 if (RT_SUCCESS(rc2)) 914 921 { -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r92336 r92426 24 24 || PGM_GST_TYPE == PGM_TYPE_PAE \ 25 25 || PGM_GST_TYPE == PGM_TYPE_AMD64 26 DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, P GSTPTWALK pWalk);27 #endif 28 PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);26 DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PGSTPTWALK pGstWalk); 27 #endif 28 PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk); 29 29 PGM_GST_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask); 30 30 … … 76 76 77 77 78 DECLINLINE(int) PGM_GST_NAME(WalkReturnNotPresent)(PVMCPUCC pVCpu, P GSTPTWALK pWalk, int iLevel)78 DECLINLINE(int) PGM_GST_NAME(WalkReturnNotPresent)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel) 79 79 { 80 80 NOREF(iLevel); NOREF(pVCpu); 81 pWalk-> Core.fNotPresent = true;82 pWalk-> Core.uLevel = (uint8_t)iLevel;81 pWalk->fNotPresent = true; 82 pWalk->uLevel = (uint8_t)iLevel; 83 83 return VERR_PAGE_TABLE_NOT_PRESENT; 84 84 } 85 85 86 DECLINLINE(int) PGM_GST_NAME(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, P GSTPTWALK pWalk, int iLevel, int rc)86 DECLINLINE(int) PGM_GST_NAME(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel, int rc) 87 87 { 88 88 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu); 89 pWalk-> Core.fBadPhysAddr = true;90 pWalk-> Core.uLevel = (uint8_t)iLevel;89 pWalk->fBadPhysAddr = true; 90 pWalk->uLevel = (uint8_t)iLevel; 91 91 return VERR_PAGE_TABLE_NOT_PRESENT; 92 92 } 93 93 94 DECLINLINE(int) PGM_GST_NAME(WalkReturnRsvdError)(PVMCPUCC pVCpu, P GSTPTWALK pWalk, int iLevel)94 DECLINLINE(int) PGM_GST_NAME(WalkReturnRsvdError)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel) 95 95 { 96 96 NOREF(pVCpu); 97 pWalk-> Core.fRsvdError = true;98 pWalk-> Core.uLevel = (uint8_t)iLevel;97 pWalk->fRsvdError = true; 98 pWalk->uLevel = (uint8_t)iLevel; 99 99 return VERR_PAGE_TABLE_NOT_PRESENT; 100 100 } … … 110 110 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 111 111 * @param GCPtr The guest virtual address to walk by. 112 * @param pWalk Where to return the walk result. This is always set. 113 */ 114 DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk) 112 * @param pWalk The common page walk information. 113 * @param pGstWalk The guest mode specific page walk information. 114 * 115 * @warning Callers must initialize @a pWalk and @a pGstWalk before calling this 116 * function. 117 */ 118 DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PGSTPTWALK pGstWalk) 115 119 { 116 120 int rc; 117 121 118 122 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 123 /** @def PGM_GST_SLAT_WALK 124 * Macro to perform guest second-level address translation (EPT or Nested). 125 * 126 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 127 * @param a_GCPtrNested The nested-guest linear address that caused the 128 * second-level translation. 129 * @param a_GCPhysNested The nested-guest physical address to translate. 130 * @param a_GCPhysOut Where to store the guest-physical address (result). 131 */ 119 132 # define PGM_GST_SLAT_WALK(a_pVCpu, a_GCPtrNested, a_GCPhysNested, a_GCPhysOut, a_pWalk) \ 120 133 do { \ 121 134 if ((a_pVCpu)->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT) \ 122 135 { \ 123 PGMPTWALKGST SlatWalk; \ 124 int const rcX = pgmGstSlatWalk(a_pVCpu, a_GCPhysNested, true /* fIsLinearAddrValid */, a_GCPtrNested, &SlatWalk); \ 136 PGMPTWALK SlatWalk; \ 137 PGMPTWALKGST SlatGstWalk; \ 138 int const rcX = pgmGstSlatWalk(a_pVCpu, a_GCPhysNested, true /* fIsLinearAddrValid */, a_GCPtrNested, &SlatWalk, \ 139 &SlatGstWalk); \ 125 140 if (RT_SUCCESS(rcX)) \ 126 (a_GCPhysOut) = SlatWalk. u.Core.GCPhys; \141 (a_GCPhysOut) = SlatWalk.GCPhys; \ 127 142 else \ 128 143 { \ 129 (a_pWalk)->Core = SlatWalk.u.Core; \144 *(a_pWalk) = SlatWalk; \ 130 145 return rcX; \ 131 146 } \ … … 135 150 136 151 /* 137 * Init the walking structure .152 * Init the walking structures. 138 153 */ 139 154 RT_ZERO(*pWalk); 140 pWalk->Core.GCPtr = GCPtr; 155 RT_ZERO(*pGstWalk); 156 pWalk->GCPtr = GCPtr; 141 157 142 158 # if PGM_GST_TYPE == PGM_TYPE_32BIT \ … … 155 171 * The PML4 table. 156 172 */ 157 rc = pgmGstGetLongModePML4PtrEx(pVCpu, &p Walk->pPml4);173 rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGstWalk->pPml4); 158 174 if (RT_SUCCESS(rc)) { /* probable */ } 159 175 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc); 160 176 161 177 PX86PML4E pPml4e; 162 p Walk->pPml4e = pPml4e = &pWalk->pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK];178 pGstWalk->pPml4e = pPml4e = &pGstWalk->pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK]; 163 179 X86PML4E Pml4e; 164 p Walk->Pml4e.u = Pml4e.u = pPml4e->u;180 pGstWalk->Pml4e.u = Pml4e.u = pPml4e->u; 165 181 166 182 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ } … … 170 186 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4); 171 187 172 pWalk-> Core.fEffective = fEffective = Pml4e.u & ( X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_PWT173 | X86_PML4E_PCD | X86_PML4E_A | X86_PML4E_NX);188 pWalk->fEffective = fEffective = Pml4e.u & ( X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_PWT 189 | X86_PML4E_PCD | X86_PML4E_A | X86_PML4E_NX); 174 190 175 191 /* … … 180 196 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPdpt, GCPhysPdpt, pWalk); 181 197 #endif 182 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPdpt, &p Walk->pPdpt);198 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPdpt, &pGstWalk->pPdpt); 183 199 if (RT_SUCCESS(rc)) { /* probable */ } 184 200 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc); 185 201 186 202 # elif PGM_GST_TYPE == PGM_TYPE_PAE 187 rc = pgmGstGetPaePDPTPtrEx(pVCpu, &p Walk->pPdpt);203 rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGstWalk->pPdpt); 188 204 if (RT_SUCCESS(rc)) { /* probable */ } 189 205 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc); … … 193 209 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 194 210 PX86PDPE pPdpe; 195 p Walk->pPdpe = pPdpe = &pWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];211 pGstWalk->pPdpe = pPdpe = &pGstWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK]; 196 212 X86PDPE Pdpe; 197 p Walk->Pdpe.u = Pdpe.u = pPdpe->u;213 pGstWalk->Pdpe.u = Pdpe.u = pPdpe->u; 198 214 199 215 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pdpe)) { /* probable */ } … … 204 220 205 221 # if PGM_GST_TYPE == PGM_TYPE_AMD64 206 pWalk-> Core.fEffective = fEffective &= (Pdpe.u & ( X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US207 | X86_PDPE_PWT | X86_PDPE_PCD | X86_PDPE_A))208 | (Pdpe.u & X86_PDPE_LM_NX);222 pWalk->fEffective = fEffective &= (Pdpe.u & ( X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US 223 | X86_PDPE_PWT | X86_PDPE_PCD | X86_PDPE_A)) 224 | (Pdpe.u & X86_PDPE_LM_NX); 209 225 # else 210 226 /* NX in the legacy-mode PAE PDPE is reserved. The valid check above ensures the NX bit is not set. */ 211 pWalk-> Core.fEffective = fEffective = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A212 | (Pdpe.u & (X86_PDPE_PWT | X86_PDPE_PCD));227 pWalk->fEffective = fEffective = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A 228 | (Pdpe.u & (X86_PDPE_PWT | X86_PDPE_PCD)); 213 229 # endif 214 230 … … 220 236 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPd, GCPhysPd, pWalk); 221 237 # endif 222 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPd, &p Walk->pPd);238 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPd, &pGstWalk->pPd); 223 239 if (RT_SUCCESS(rc)) { /* probable */ } 224 240 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 2, rc); 225 241 226 242 # elif PGM_GST_TYPE == PGM_TYPE_32BIT 227 rc = pgmGstGet32bitPDPtrEx(pVCpu, &p Walk->pPd);243 rc = pgmGstGet32bitPDPtrEx(pVCpu, &pGstWalk->pPd); 228 244 if (RT_SUCCESS(rc)) { /* probable */ } 229 245 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc); … … 232 248 { 233 249 PGSTPDE pPde; 234 p Walk->pPde = pPde = &pWalk->pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK];250 pGstWalk->pPde = pPde = &pGstWalk->pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK]; 235 251 GSTPDE Pde; 236 p Walk->Pde.u = Pde.u = pPde->u;252 pGstWalk->Pde.u = Pde.u = pPde->u; 237 253 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pde)) { /* probable */ } 238 254 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 2); … … 253 269 fEffective |= Pde.u & (X86_PDE4M_D | X86_PDE4M_G); 254 270 fEffective |= (Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT; 255 pWalk-> Core.fEffective = fEffective;271 pWalk->fEffective = fEffective; 256 272 Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK)); 257 273 Assert(fEffective & PGM_PTATTRS_R_MASK); 258 274 259 pWalk-> Core.fBigPage = true;260 pWalk-> Core.fSucceeded = true;275 pWalk->fBigPage = true; 276 pWalk->fSucceeded = true; 261 277 RTGCPHYS GCPhysPde = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde) 262 278 | (GCPtr & GST_BIG_PAGE_OFFSET_MASK); … … 264 280 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPde, GCPhysPde, pWalk); 265 281 # endif 266 pWalk-> Core.GCPhys = GCPhysPde;267 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk-> Core.GCPhys);282 pWalk->GCPhys = GCPhysPde; 283 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys); 268 284 return VINF_SUCCESS; 269 285 } … … 272 288 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2); 273 289 # if PGM_GST_TYPE == PGM_TYPE_32BIT 274 pWalk-> Core.fEffective = fEffective = Pde.u & ( X86_PDE_P | X86_PDE_RW | X86_PDE_US275 | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);290 pWalk->fEffective = fEffective = Pde.u & ( X86_PDE_P | X86_PDE_RW | X86_PDE_US 291 | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A); 276 292 # else 277 pWalk-> Core.fEffective = fEffective &= (Pde.u & ( X86_PDE_P | X86_PDE_RW | X86_PDE_US278 | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A))279 | (Pde.u & X86_PDE_PAE_NX);293 pWalk->fEffective = fEffective &= (Pde.u & ( X86_PDE_P | X86_PDE_RW | X86_PDE_US 294 | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A)) 295 | (Pde.u & X86_PDE_PAE_NX); 280 296 # endif 281 297 … … 287 303 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPt, GCPhysPt, pWalk); 288 304 # endif 289 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &p Walk->pPt);305 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pGstWalk->pPt); 290 306 if (RT_SUCCESS(rc)) { /* probable */ } 291 307 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc); … … 293 309 { 294 310 PGSTPTE pPte; 295 p Walk->pPte = pPte = &pWalk->pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];311 pGstWalk->pPte = pPte = &pGstWalk->pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK]; 296 312 GSTPTE Pte; 297 p Walk->Pte.u = Pte.u = pPte->u;313 pGstWalk->Pte.u = Pte.u = pPte->u; 298 314 299 315 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pte)) { /* probable */ } … … 313 329 # endif 314 330 fEffective |= Pte.u & (X86_PTE_D | X86_PTE_PAT | X86_PTE_G); 315 pWalk-> Core.fEffective = fEffective;331 pWalk->fEffective = fEffective; 316 332 Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK)); 317 333 Assert(fEffective & PGM_PTATTRS_R_MASK); 318 334 319 pWalk-> Core.fSucceeded = true;335 pWalk->fSucceeded = true; 320 336 RTGCPHYS GCPhysPte = GST_GET_PTE_GCPHYS(Pte) 321 337 | (GCPtr & PAGE_OFFSET_MASK); … … 323 339 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPte, GCPhysPte, pWalk); 324 340 # endif 325 pWalk-> Core.GCPhys = GCPhysPte;341 pWalk->GCPhys = GCPhysPte; 326 342 return VINF_SUCCESS; 327 343 } … … 341 357 * @param pVCpu The cross context virtual CPU structure. 342 358 * @param GCPtr Guest Context virtual address of the page. 343 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages. 344 * @param pGCPhys Where to store the GC physical address of the page. 345 * This is page aligned! 346 */ 347 PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys) 359 * @param pWalk Where to store the page walk info. 360 */ 361 PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk) 348 362 { 349 363 #if PGM_GST_TYPE == PGM_TYPE_REAL \ … … 352 366 * Fake it. 353 367 */ 354 if (pfFlags) 355 *pfFlags = X86_PTE_P | X86_PTE_RW | X86_PTE_US; 356 if (pGCPhys) 357 *pGCPhys = GCPtr & PAGE_BASE_GC_MASK; 368 RT_ZERO(*pWalk); 369 pWalk->fSucceeded = true; 370 pWalk->GCPtr = GCPtr; 371 pWalk->GCPhys = GCPtr & PAGE_BASE_GC_MASK; 372 pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US; 373 pWalk->GCPhys = GCPtr & PAGE_BASE_GC_MASK; 358 374 NOREF(pVCpu); 359 375 return VINF_SUCCESS; … … 363 379 || PGM_GST_TYPE == PGM_TYPE_AMD64 364 380 365 GSTPTWALK Walk; 366 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk); 381 PGMPTWALK Walk; 382 GSTPTWALK GstWalk; 383 RT_ZERO(Walk); 384 RT_ZERO(GstWalk); 385 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk, &GstWalk); 367 386 if (RT_FAILURE(rc)) 368 387 return rc; 369 388 370 if (pGCPhys) 371 *pGCPhys = Walk.Core.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK; 372 373 if (pfFlags) 389 uint64_t fFlags; 390 if (!Walk.fBigPage) 391 fFlags = (GstWalk.Pte.u & ~(GST_PTE_PG_MASK | X86_PTE_RW | X86_PTE_US)) /* NX not needed */ 392 | (Walk.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK)) 393 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE) 394 | (Walk.fEffective & PGM_PTATTRS_NX_MASK) 395 # endif 396 ; 397 else 374 398 { 375 if (!Walk.Core.fBigPage) 376 *pfFlags = (Walk.Pte.u & ~(GST_PTE_PG_MASK | X86_PTE_RW | X86_PTE_US)) /* NX not needed */ 377 | (Walk.Core.fEffective & ( PGM_PTATTRS_W_MASK 378 | PGM_PTATTRS_US_MASK)) 399 fFlags = (GstWalk.Pde.u & ~(GST_PTE_PG_MASK | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PS)) /* NX not needed */ 400 | (Walk.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK | PGM_PTATTRS_PAT_MASK)) 379 401 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE) 380 | (Walk.Core.fEffective & PGM_PTATTRS_NX_MASK) 381 # endif 382 ; 383 else 384 { 385 *pfFlags = (Walk.Pde.u & ~(GST_PTE_PG_MASK | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PS)) /* NX not needed */ 386 | (Walk.Core.fEffective & ( PGM_PTATTRS_W_MASK 387 | PGM_PTATTRS_US_MASK 388 | PGM_PTATTRS_PAT_MASK)) 389 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE) 390 | (Walk.Core.fEffective & PGM_PTATTRS_NX_MASK) 391 # endif 392 ; 393 } 402 | (Walk.fEffective & PGM_PTATTRS_NX_MASK) 403 # endif 404 ; 394 405 } 395 406 407 pWalk->fSucceeded = true; 408 pWalk->GCPtr = GCPtr; 409 pWalk->GCPhys = Walk.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK; 410 pWalk->fEffective = fFlags; 396 411 return VINF_SUCCESS; 397 412 … … 425 440 for (;;) 426 441 { 427 GSTPTWALK Walk; 428 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk); 442 PGMPTWALK Walk; 443 GSTPTWALK GstWalk; 444 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk, &GstWalk); 429 445 if (RT_FAILURE(rc)) 430 446 return rc; 431 447 432 if (!Walk. Core.fBigPage)448 if (!Walk.fBigPage) 433 449 { 434 450 /* … … 438 454 */ 439 455 unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK; 440 while (iPTE < RT_ELEMENTS( Walk.pPt->a))456 while (iPTE < RT_ELEMENTS(GstWalk.pPt->a)) 441 457 { 442 GSTPTE Pte = Walk.pPt->a[iPTE];458 GSTPTE Pte = GstWalk.pPt->a[iPTE]; 443 459 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK)) 444 460 | (fFlags & ~GST_PTE_PG_MASK); 445 Walk.pPt->a[iPTE] = Pte;461 GstWalk.pPt->a[iPTE] = Pte; 446 462 447 463 /* next page */ … … 460 476 GSTPDE PdeNew; 461 477 # if PGM_GST_TYPE == PGM_TYPE_32BIT 462 PdeNew.u = ( Walk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))478 PdeNew.u = (GstWalk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS)) 463 479 # else 464 PdeNew.u = ( Walk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))480 PdeNew.u = (GstWalk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS)) 465 481 # endif 466 482 | (fFlags & ~GST_PTE_PG_MASK) 467 483 | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT); 468 * Walk.pPde = PdeNew;484 *GstWalk.pPde = PdeNew; 469 485 470 486 /* advance */ -
trunk/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h
r92336 r92426 17 17 18 18 #if PGM_GST_TYPE == PGM_TYPE_EPT 19 DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(PVMCPUCC pVCpu, P GSTPTWALK pWalk, int iLevel)19 DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel) 20 20 { 21 21 NOREF(pVCpu); 22 pWalk-> Core.fNotPresent = true;23 pWalk-> Core.uLevel = (uint8_t)iLevel;22 pWalk->fNotPresent = true; 23 pWalk->uLevel = (uint8_t)iLevel; 24 24 return VERR_PAGE_TABLE_NOT_PRESENT; 25 25 } 26 26 27 27 28 DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, P GSTPTWALK pWalk, int iLevel, int rc)28 DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel, int rc) 29 29 { 30 30 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu); 31 pWalk-> Core.fBadPhysAddr = true;32 pWalk-> Core.uLevel = (uint8_t)iLevel;31 pWalk->fBadPhysAddr = true; 32 pWalk->uLevel = (uint8_t)iLevel; 33 33 return VERR_PAGE_TABLE_NOT_PRESENT; 34 34 } 35 35 36 36 37 DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(PVMCPUCC pVCpu, P GSTPTWALK pWalk, int iLevel)37 DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel) 38 38 { 39 39 NOREF(pVCpu); 40 pWalk-> Core.fRsvdError = true;41 pWalk-> Core.uLevel = (uint8_t)iLevel;40 pWalk->fRsvdError = true; 41 pWalk->uLevel = (uint8_t)iLevel; 42 42 return VERR_PAGE_TABLE_NOT_PRESENT; 43 43 } … … 45 45 46 46 DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(Walk)(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, 47 PGSTPTWALK pWalk) 48 { 47 PPGMPTWALK pWalk, PGSTPTWALK pGstWalk) 48 { 49 /** @todo implement figuring out fEptMisconfig. */ 49 50 /* 50 * Init walk structure .51 * Init walk structures. 51 52 */ 52 int rc;53 53 RT_ZERO(*pWalk); 54 pWalk->Core.GCPtr = GCPtrNested; 55 pWalk->Core.GCPhysNested = GCPhysNested; 56 pWalk->Core.fIsSlat = true; 57 pWalk->Core.fIsLinearAddrValid = fIsLinearAddrValid; 54 RT_ZERO(*pGstWalk); 55 56 pWalk->GCPtr = GCPtrNested; 57 pWalk->GCPhysNested = GCPhysNested; 58 pWalk->fIsLinearAddrValid = fIsLinearAddrValid; 59 pWalk->fIsSlat = true; 58 60 59 61 /* … … 82 84 uint64_t fEffective; 83 85 { 84 rc = pgmGstGetEptPML4PtrEx(pVCpu, &pWalk->pPml4);86 int rc = pgmGstGetEptPML4PtrEx(pVCpu, &pGstWalk->pPml4); 85 87 if (RT_SUCCESS(rc)) { /* probable */ } 86 88 else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc); 87 89 88 90 PEPTPML4E pPml4e; 89 p Walk->pPml4e = pPml4e = &pWalk->pPml4->a[(GCPhysNested >> EPT_PML4_SHIFT) & EPT_PML4_MASK];91 pGstWalk->pPml4e = pPml4e = &pGstWalk->pPml4->a[(GCPhysNested >> EPT_PML4_SHIFT) & EPT_PML4_MASK]; 90 92 EPTPML4E Pml4e; 91 p Walk->Pml4e.u = Pml4e.u = pPml4e->u;93 pGstWalk->Pml4e.u = Pml4e.u = pPml4e->u; 92 94 93 95 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ } … … 107 109 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed) 108 110 | fEffectiveEpt; 109 pWalk-> Core.fEffective = fEffective;110 111 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & EPT_PML4E_PG_MASK, &p Walk->pPdpt);111 pWalk->fEffective = fEffective; 112 113 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & EPT_PML4E_PG_MASK, &pGstWalk->pPdpt); 112 114 if (RT_SUCCESS(rc)) { /* probable */ } 113 115 else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc); … … 115 117 { 116 118 PEPTPDPTE pPdpte; 117 p Walk->pPdpte = pPdpte = &pWalk->pPdpt->a[(GCPhysNested >> GST_PDPT_SHIFT) & GST_PDPT_MASK];119 pGstWalk->pPdpte = pPdpte = &pGstWalk->pPdpt->a[(GCPhysNested >> GST_PDPT_SHIFT) & GST_PDPT_MASK]; 118 120 EPTPDPTE Pdpte; 119 p Walk->Pdpte.u = Pdpte.u = pPdpte->u;121 pGstWalk->Pdpte.u = Pdpte.u = pPdpte->u; 120 122 121 123 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pdpte)) { /* probable */ } … … 134 136 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed) 135 137 | (fEffectiveEpt & fCumulativeEpt); 136 pWalk-> Core.fEffective = fEffective;138 pWalk->fEffective = fEffective; 137 139 } 138 140 else if (GST_IS_BIG_PDPE_VALID(pVCpu, Pdpte)) … … 151 153 fEffective |= RT_BF_MAKE(PGM_PTATTRS_D, fDirty) 152 154 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType); 153 pWalk-> Core.fEffective = fEffective;154 155 pWalk-> Core.fGigantPage = true;156 pWalk-> Core.fSucceeded = true;157 pWalk-> Core.GCPhys = GST_GET_BIG_PDPE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pdpte)155 pWalk->fEffective = fEffective; 156 157 pWalk->fGigantPage = true; 158 pWalk->fSucceeded = true; 159 pWalk->GCPhys = GST_GET_BIG_PDPE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pdpte) 158 160 | (GCPhysNested & GST_GIGANT_PAGE_OFFSET_MASK); 159 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk-> Core.GCPhys);161 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys); 160 162 return VINF_SUCCESS; 161 163 } … … 164 166 { 165 167 PGSTPDE pPde; 166 p Walk->pPde = pPde = &pWalk->pPd->a[(GCPhysNested >> GST_PD_SHIFT) & GST_PD_MASK];168 pGstWalk->pPde = pPde = &pGstWalk->pPd->a[(GCPhysNested >> GST_PD_SHIFT) & GST_PD_MASK]; 167 169 GSTPDE Pde; 168 pWalk->Pde.u = Pde.u = pPde->u; 170 pGstWalk->Pde.u = Pde.u = pPde->u; 171 169 172 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pde)) { /* probable */ } 170 173 else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, 2); 174 171 175 if ((Pde.u & X86_PDE_PS) && GST_IS_PSE_ACTIVE(pVCpu)) 172 176 { … … 187 191 fEffective |= RT_BF_MAKE(PGM_PTATTRS_D, fDirty) 188 192 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType); 189 pWalk-> Core.fEffective = fEffective;190 191 pWalk-> Core.fBigPage = true;192 pWalk-> Core.fSucceeded = true;193 pWalk-> Core.GCPhys = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)193 pWalk->fEffective = fEffective; 194 195 pWalk->fBigPage = true; 196 pWalk->fSucceeded = true; 197 pWalk->GCPhys = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde) 194 198 | (GCPhysNested & GST_BIG_PAGE_OFFSET_MASK); 195 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk-> Core.GCPhys);199 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys); 196 200 return VINF_SUCCESS; 197 201 } … … 209 213 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed) 210 214 | (fEffectiveEpt & fCumulativeEpt); 211 pWalk-> Core.fEffective = fEffective;212 213 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GST_GET_PDE_GCPHYS(Pde), &pWalk->pPt);215 pWalk->fEffective = fEffective; 216 217 int const rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GST_GET_PDE_GCPHYS(Pde), &pGstWalk->pPt); 214 218 if (RT_SUCCESS(rc)) { /* probable */ } 215 219 else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc); … … 217 221 { 218 222 PGSTPTE pPte; 219 p Walk->pPte = pPte = &pWalk->pPt->a[(GCPhysNested >> GST_PT_SHIFT) & GST_PT_MASK];223 pGstWalk->pPte = pPte = &pGstWalk->pPt->a[(GCPhysNested >> GST_PT_SHIFT) & GST_PT_MASK]; 220 224 GSTPTE Pte; 221 p Walk->Pte.u = Pte.u = pPte->u;225 pGstWalk->Pte.u = Pte.u = pPte->u; 222 226 223 227 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pte)) { /* probable */ } … … 240 244 fEffective |= RT_BF_MAKE(PGM_PTATTRS_D, fDirty) 241 245 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType); 242 pWalk->Core.fEffective = fEffective; 243 244 pWalk->Core.fSucceeded = true; 245 pWalk->Core.GCPhys = GST_GET_PTE_GCPHYS(Pte) 246 | (GCPhysNested & PAGE_OFFSET_MASK); 246 pWalk->fEffective = fEffective; 247 248 pWalk->fSucceeded = true; 249 pWalk->GCPhys = GST_GET_PTE_GCPHYS(Pte) | (GCPhysNested & PAGE_OFFSET_MASK); 247 250 return VINF_SUCCESS; 248 251 } -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r92391 r92426 2308 2308 VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys) 2309 2309 { 2310 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys); 2310 PGMPTWALK Walk; 2311 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk); 2311 2312 if (pGCPhys && RT_SUCCESS(rc)) 2312 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;2313 *pGCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK); 2313 2314 return rc; 2314 2315 } … … 2327 2328 VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys) 2328 2329 { 2329 PVMCC pVM = pVCpu->CTX_SUFF(pVM);2330 RTGCPHYS GCPhys;2331 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);2330 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 2331 PGMPTWALK Walk; 2332 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk); 2332 2333 if (RT_SUCCESS(rc)) 2333 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);2334 rc = PGMPhysGCPhys2HCPhys(pVM, Walk.GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys); 2334 2335 return rc; 2335 2336 } … … 3429 3430 VMMDECL(VBOXSTRICTRC) PGMPhysReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin) 3430 3431 { 3431 RTGCPHYS GCPhys;3432 uint64_t fFlags;3433 3432 int rc; 3434 3433 PVMCC pVM = pVCpu->CTX_SUFF(pVM); … … 3449 3448 { 3450 3449 /* Convert virtual to physical address + flags */ 3451 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys); 3450 PGMPTWALK Walk; 3451 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk); 3452 3452 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc); 3453 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;3453 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK); 3454 3454 3455 3455 /* mark the guest page as accessed. */ 3456 if (!( fFlags& X86_PTE_A))3456 if (!(Walk.fEffective & X86_PTE_A)) 3457 3457 { 3458 3458 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A)); … … 3469 3469 { 3470 3470 /* Convert virtual to physical address + flags */ 3471 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys); 3471 PGMPTWALK Walk; 3472 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk); 3472 3473 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc); 3473 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;3474 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK); 3474 3475 3475 3476 /* mark the guest page as accessed. */ 3476 if (!( fFlags& X86_PTE_A))3477 if (!(Walk.fEffective & X86_PTE_A)) 3477 3478 { 3478 3479 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A)); … … 3520 3521 VMMDECL(VBOXSTRICTRC) PGMPhysWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin) 3521 3522 { 3522 RTGCPHYS GCPhys;3523 uint64_t fFlags;3524 3523 int rc; 3525 3524 PVMCC pVM = pVCpu->CTX_SUFF(pVM); … … 3540 3539 { 3541 3540 /* Convert virtual to physical address + flags */ 3542 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys); 3541 PGMPTWALK Walk; 3542 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk); 3543 3543 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc); 3544 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;3544 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK); 3545 3545 3546 3546 /* Mention when we ignore X86_PTE_RW... */ 3547 if (!( fFlags& X86_PTE_RW))3547 if (!(Walk.fEffective & X86_PTE_RW)) 3548 3548 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb)); 3549 3549 3550 3550 /* Mark the guest page as accessed and dirty if necessary. */ 3551 if (( fFlags& (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))3551 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D)) 3552 3552 { 3553 3553 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); … … 3564 3564 { 3565 3565 /* Convert virtual to physical address + flags */ 3566 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys); 3566 PGMPTWALK Walk; 3567 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk); 3567 3568 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc); 3568 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;3569 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK); 3569 3570 3570 3571 /* Mention when we ignore X86_PTE_RW... */ 3571 if (!( fFlags& X86_PTE_RW))3572 if (!(Walk.fEffective & X86_PTE_RW)) 3572 3573 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb)); 3573 3574 3574 3575 /* Mark the guest page as accessed and dirty if necessary. */ 3575 if (( fFlags& (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))3576 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D)) 3576 3577 { 3577 3578 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r91854 r92426 569 569 * set instead of resolving the guest physical 570 570 * address yet again. */ 571 RTGCPHYS GCPhys; 572 uint64_t fGstPte; 573 rc = PGMGstGetPage(pVCpu, GCPtr, &fGstPte, &GCPhys); 571 PGMPTWALK GstWalk; 572 rc = PGMGstGetPage(pVCpu, GCPtr, &GstWalk); 574 573 AssertRC(rc); 575 574 if (RT_SUCCESS(rc)) 576 575 { 577 Assert(( fGstPte & X86_PTE_RW) || !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP /* allow netware hack */));578 PPGMPAGE pPage = pgmPhysGetPage(pVM, G CPhys);576 Assert((GstWalk.fEffective & X86_PTE_RW) || !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP /* allow netware hack */)); 577 PPGMPAGE pPage = pgmPhysGetPage(pVM, GstWalk.GCPhys); 579 578 Assert(pPage); 580 579 if (pPage) 581 580 { 582 rc = pgmPhysPageMakeWritable(pVM, pPage, G CPhys);581 rc = pgmPhysPageMakeWritable(pVM, pPage, GstWalk.GCPhys); 583 582 AssertRCReturn(rc, rc); 584 Log(("%s: pgmPhysPageMakeWritable on %RGv / %RGp %R[pgmpage]\n", __PRETTY_FUNCTION__, GCPtr, G CPhys, pPage));583 Log(("%s: pgmPhysPageMakeWritable on %RGv / %RGp %R[pgmpage]\n", __PRETTY_FUNCTION__, GCPtr, GstWalk.GCPhys, pPage)); 585 584 } 586 585 } -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r92392 r92426 7193 7193 7194 7194 /* Check if the page at the fault-address is the APIC base. */ 7195 RTGCPHYS GCPhysPage;7196 int rc2 = PGMGstGetPage(pVCpu, (RTGCPTR)uFaultAddress, NULL /* pfFlags */, &GCPhysPage);7195 PGMPTWALK Walk; 7196 int rc2 = PGMGstGetPage(pVCpu, (RTGCPTR)uFaultAddress, &Walk); 7197 7197 if ( rc2 == VINF_SUCCESS 7198 && GCPhysPage== GCPhysApicBase)7198 && Walk.GCPhys == GCPhysApicBase) 7199 7199 { 7200 7200 /* Only attempt to patch the instruction once. */ -
trunk/src/VBox/VMM/VMMR0/PGMR0SharedPage.cpp
r86473 r92426 74 74 { 75 75 /** @todo inefficient to fetch each guest page like this... */ 76 RTGCPHYS GCPhys; 77 uint64_t fFlags; 78 rc = PGMGstGetPage(pVCpu, GCPtrPage, &fFlags, &GCPhys); 76 PGMPTWALK Walk; 77 rc = PGMGstGetPage(pVCpu, GCPtrPage, &Walk); 79 78 if ( rc == VINF_SUCCESS 80 && !( fFlags& X86_PTE_RW)) /* important as we make assumptions about this below! */79 && !(Walk.fEffective & X86_PTE_RW)) /* important as we make assumptions about this below! */ 81 80 { 82 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);81 PPGMPAGE pPage = pgmPhysGetPage(pVM, Walk.GCPhys); 83 82 Assert(!pPage || !PGM_PAGE_IS_BALLOONED(pPage)); 84 83 if ( pPage … … 89 88 PageDesc.idPage = PGM_PAGE_GET_PAGEID(pPage); 90 89 PageDesc.HCPhys = PGM_PAGE_GET_HCPHYS(pPage); 91 PageDesc.GCPhys = GCPhys;90 PageDesc.GCPhys = Walk.GCPhys; 92 91 93 92 rc = GMMR0SharedModuleCheckPage(pGVM, pModule, idxRegion, idxPage, &PageDesc); -
trunk/src/VBox/VMM/VMMR3/DBGFAddr.cpp
r90784 r92426 231 231 VMCPU_ASSERT_EMT(pVCpu); 232 232 /* This is just a wrapper because we cannot pass FlatPtr thru VMR3ReqCall directly. */ 233 return PGMGstGetPage(pVCpu, pAddress->FlatPtr, NULL, pGCPhys); 233 PGMPTWALK Walk; 234 int const rc = PGMGstGetPage(pVCpu, pAddress->FlatPtr, &Walk); 235 *pGCPhys = Walk.GCPhys; 236 return rc; 234 237 } 235 238 -
trunk/src/VBox/VMM/VMMR3/PGMDbg.cpp
r91904 r92426 869 869 RTGCPHYS GCPhysPrev = NIL_RTGCPHYS; 870 870 bool fFullWalk = true; 871 PGMPTWALK GSTWalk;872 RT_ZERO(Walk);871 PGMPTWALK Walk; 872 PGMPTWALKGST WalkGst; 873 873 874 874 PGM_LOCK_VOID(pVM); … … 877 877 int rc; 878 878 if (fFullWalk) 879 rc = pgmGstPtWalk(pVCpu, GCPtr, &Walk );879 rc = pgmGstPtWalk(pVCpu, GCPtr, &Walk, &WalkGst); 880 880 else 881 rc = pgmGstPtWalkNext(pVCpu, GCPtr, &Walk );882 if (RT_SUCCESS(rc) && Walk. u.Core.fSucceeded)881 rc = pgmGstPtWalkNext(pVCpu, GCPtr, &Walk, &WalkGst); 882 if (RT_SUCCESS(rc) && Walk.fSucceeded) 883 883 { 884 884 fFullWalk = false; 885 885 886 886 /* Skip if same page as previous one (W10 optimization). */ 887 if ( Walk. u.Core.GCPhys != GCPhysPrev887 if ( Walk.GCPhys != GCPhysPrev 888 888 || cbPrev != 0) 889 889 { 890 PPGMPAGE pPage = pgmPhysGetPage(pVM, Walk. u.Core.GCPhys);890 PPGMPAGE pPage = pgmPhysGetPage(pVM, Walk.GCPhys); 891 891 if ( pPage 892 892 && ( !PGM_PAGE_IS_ZERO(pPage) … … 895 895 && !PGM_PAGE_IS_BALLOONED(pPage)) 896 896 { 897 GCPhysPrev = Walk. u.Core.GCPhys;897 GCPhysPrev = Walk.GCPhys; 898 898 void const *pvPage; 899 899 PGMPAGEMAPLOCK Lock; 900 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, Walk. u.Core.GCPhys, &pvPage, &Lock);900 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, Walk.GCPhys, &pvPage, &Lock); 901 901 if (RT_SUCCESS(rc)) 902 902 { … … 933 933 else 934 934 { 935 Assert(Walk .enmType != PGMPTWALKGSTTYPE_INVALID);936 Assert(!Walk. u.Core.fSucceeded);935 Assert(WalkGst.enmType != PGMPTWALKGSTTYPE_INVALID); 936 Assert(!Walk.fSucceeded); 937 937 cbPrev = 0; /* ignore error. */ 938 938 … … 942 942 */ 943 943 uint64_t cPagesCanSkip; 944 switch (Walk.u .Core.uLevel)944 switch (Walk.uLevel) 945 945 { 946 946 case 1: … … 949 949 break; 950 950 case 2: 951 if (Walk .enmType == PGMPTWALKGSTTYPE_32BIT)951 if (WalkGst.enmType == PGMPTWALKGSTTYPE_32BIT) 952 952 { 953 953 cPagesCanSkip = X86_PG_ENTRIES - ((GCPtr >> X86_PT_SHIFT) & X86_PT_MASK); … … 977 977 break; 978 978 default: 979 AssertMsgFailed(("%d\n", Walk.u .Core.uLevel));979 AssertMsgFailed(("%d\n", Walk.uLevel)); 980 980 cPagesCanSkip = 0; 981 981 break; -
trunk/src/VBox/VMM/VMMR3/PGMSharedPage.cpp
r90439 r92426 288 288 { 289 289 /* Debug only API for the page fusion testcase. */ 290 RTGCPHYS GCPhys; 291 uint64_t fFlags; 290 PGMPTWALK Walk; 292 291 293 292 PGM_LOCK_VOID(pVM); 294 293 295 int rc = PGMGstGetPage(VMMGetCpu(pVM), GCPtrPage, & fFlags, &GCPhys);294 int rc = PGMGstGetPage(VMMGetCpu(pVM), GCPtrPage, &Walk); 296 295 switch (rc) 297 296 { 298 297 case VINF_SUCCESS: 299 298 { 300 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);299 PPGMPAGE pPage = pgmPhysGetPage(pVM, Walk.GCPhys); 301 300 if (pPage) 302 301 { 303 302 *pfShared = PGM_PAGE_IS_SHARED(pPage); 304 *pfPageFlags = fFlags;303 *pfPageFlags = Walk.fEffective; 305 304 } 306 305 else -
trunk/src/VBox/VMM/include/PGMInternal.h
r92420 r92426 37 37 #include <VBox/vmm/gmm.h> 38 38 #include <VBox/vmm/hm.h> 39 #include <VBox/vmm/hm_vmx.h>40 39 #include <iprt/asm.h> 41 40 #include <iprt/assert.h> … … 2331 2330 2332 2331 2333 /** @name PGMPTATTRS2334 *2335 * PGM page-table attributes.2336 *2337 * This is VirtualBox's combined page table attributes. It combines regular page2338 * table and Intel EPT attributes. It's 64-bit in size so there's ample room for2339 * bits added in the future to EPT or regular page tables (for e.g. Protection Key).2340 *2341 * The following bits map 1:1 (shifted by PGM_PTATTRS_EPT_SHIFT) to the Intel EPT2342 * attributes as these are unique to EPT and fit within 64-bits despite the shift:2343 * - EPT_R : Read access.2344 * - EPT_W : Write access.2345 * - EPT_X_SUPER : Execute or execute for supervisor-mode linear addr access.2346 * - EPT_MEMTYPE : EPT memory type.2347 * - EPT_IGNORE_PAT: Ignore PAT memory type.2348 * - EPT_X_USER : Execute access for user-mode linear addresses.2349 *2350 * For regular page tables, the R bit is always 1 (same as P bit).2351 * For Intel EPT, the EPT_R and EPT_W bits are copied to R and W bits respectively.2352 *2353 * The following EPT attributes are mapped to the following positions because they2354 * exist in the regular page tables at these positions OR are exclusive to EPT and2355 * have been mapped to arbitrarily chosen positions:2356 * - EPT_A : Accessed (EPT bit 8 maps to bit 5).2357 * - EPT_D : Dirty (EPT bit 9 maps to bit 6).2358 * - EPT_SUPER_SHW_STACK : Supervisor Shadow Stack (EPT bit 60 maps to bit 24).2359 * - EPT_SUPPRESS_VE_XCPT: Suppress \#VE exception (EPT bit 63 maps to bit 25).2360 *2361 * Bits 12, 11:9 and 43 are deliberately kept unused (correspond to bit PS and bits2362 * 11:9 in the regular page-table structures and to bit 11 in the EPT structures2363 * respectively) as bit 12 is the page-size bit and bits 11:9 are reserved for2364 * use by software and we may want to use/preserve them in the future.2365 *2366 * @{ */2367 typedef uint64_t PGMPTATTRS;2368 /** Pointer to a PGMPTATTRS type. */2369 typedef PGMPTATTRS *PPGMPTATTRS;2370 2371 /** Read bit (always 1 for regular PT, copy of EPT_R for EPT). */2372 #define PGM_PTATTRS_R_SHIFT 02373 #define PGM_PTATTRS_R_MASK RT_BIT_64(PGM_PTATTRS_R_SHIFT)2374 /** Write access bit (aka read/write bit for regular PT). */2375 #define PGM_PTATTRS_W_SHIFT 12376 #define PGM_PTATTRS_W_MASK RT_BIT_64(PGM_PTATTRS_W_SHIFT)2377 /** User-mode access bit. */2378 #define PGM_PTATTRS_US_SHIFT 22379 #define PGM_PTATTRS_US_MASK RT_BIT_64(PGM_PTATTRS_US_SHIFT)2380 /** Write through cache bit. */2381 #define PGM_PTATTRS_PWT_SHIFT 32382 #define PGM_PTATTRS_PWT_MASK RT_BIT_64(PGM_PTATTRS_PWT_SHIFT)2383 /** Cache disabled bit. */2384 #define PGM_PTATTRS_PCD_SHIFT 42385 #define PGM_PTATTRS_PCD_MASK RT_BIT_64(PGM_PTATTRS_PCD_SHIFT)2386 /** Accessed bit. */2387 #define PGM_PTATTRS_A_SHIFT 52388 #define PGM_PTATTRS_A_MASK RT_BIT_64(PGM_PTATTRS_A_SHIFT)2389 /** Dirty bit. */2390 #define PGM_PTATTRS_D_SHIFT 62391 #define PGM_PTATTRS_D_MASK RT_BIT_64(PGM_PTATTRS_D_SHIFT)2392 /** The PAT bit. */2393 #define PGM_PTATTRS_PAT_SHIFT 72394 #define PGM_PTATTRS_PAT_MASK RT_BIT_64(PGM_PTATTRS_PAT_SHIFT)2395 /** The global bit. */2396 #define PGM_PTATTRS_G_SHIFT 82397 #define PGM_PTATTRS_G_MASK RT_BIT_64(PGM_PTATTRS_G_SHIFT)2398 /** Reserved (bits 12:9) unused. */2399 #define PGM_PTATTRS_RSVD_12_9_SHIFT 92400 #define PGM_PTATTRS_RSVD_12_9_MASK UINT64_C(0x0000000000001e00)2401 /** Read access bit - EPT only. */2402 #define PGM_PTATTRS_EPT_R_SHIFT 132403 #define PGM_PTATTRS_EPT_R_MASK RT_BIT_64(PGM_PTATTRS_EPT_R_SHIFT)2404 /** Write access bit - EPT only. */2405 #define PGM_PTATTRS_EPT_W_SHIFT 142406 #define PGM_PTATTRS_EPT_W_MASK RT_BIT_64(PGM_PTATTRS_EPT_W_SHIFT)2407 /** Execute or execute access for supervisor-mode linear addresses - EPT only. */2408 #define PGM_PTATTRS_EPT_X_SUPER_SHIFT 152409 #define PGM_PTATTRS_EPT_X_SUPER_MASK RT_BIT_64(PGM_PTATTRS_EPT_X_SUPER_SHIFT)2410 /** EPT memory type - EPT only. */2411 #define PGM_PTATTRS_EPT_MEMTYPE_SHIFT 162412 #define PGM_PTATTRS_EPT_MEMTYPE_MASK UINT64_C(0x0000000000070000)2413 /** Ignore PAT memory type - EPT only. */2414 #define PGM_PTATTRS_EPT_IGNORE_PAT_SHIFT 192415 #define PGM_PTATTRS_EPT_IGNORE_PAT_MASK RT_BIT_64(PGM_PTATTRS_EPT_IGNORE_PAT_SHIFT)2416 /** Reserved (bits 22:20) unused. */2417 #define PGM_PTATTRS_RSVD_22_20_SHIFT 202418 #define PGM_PTATTRS_RSVD_22_20_MASK UINT64_C(0x0000000000700000)2419 /** Execute access for user-mode linear addresses - EPT only. */2420 #define PGM_PTATTRS_EPT_X_USER_SHIFT 232421 #define PGM_PTATTRS_EPT_X_USER_MASK RT_BIT_64(PGM_PTATTRS_EPT_X_USER_SHIFT)2422 /** Reserved (bit 23) - unused. */2423 #define PGM_PTATTRS_RSVD_23_SHIFT 242424 #define PGM_PTATTRS_RSVD_23_MASK UINT64_C(0x0000000001000000)2425 /** Supervisor shadow stack - EPT only. */2426 #define PGM_PTATTRS_EPT_SUPER_SHW_STACK_SHIFT 252427 #define PGM_PTATTRS_EPT_SUPER_SHW_STACK_MASK RT_BIT_64(PGM_PTATTRS_EPT_SUPER_SHW_STACK_SHIFT)2428 /** Suppress \#VE exception - EPT only. */2429 #define PGM_PTATTRS_EPT_SUPPRESS_VE_XCPT_SHIFT 262430 #define PGM_PTATTRS_EPT_SUPPRESS_VE_XCPT_MASK RT_BIT_64(PGM_PTATTRS_EPT_SUPPRESS_VE_XCPT_SHIFT)2431 /** Reserved (bits 62:27) - unused. */2432 #define PGM_PTATTRS_RSVD_62_27_SHIFT 272433 #define PGM_PTATTRS_RSVD_62_27_MASK UINT64_C(0x7ffffffff8000000)2434 /** No-execute bit. */2435 #define PGM_PTATTRS_NX_SHIFT 632436 #define PGM_PTATTRS_NX_MASK RT_BIT_64(PGM_PTATTRS_NX_SHIFT)2437 2438 RT_BF_ASSERT_COMPILE_CHECKS(PGM_PTATTRS_, UINT64_C(0), UINT64_MAX,2439 (R, W, US, PWT, PCD, A, D, PAT, G, RSVD_12_9, EPT_R, EPT_W, EPT_X_SUPER, EPT_MEMTYPE, EPT_IGNORE_PAT,2440 RSVD_22_20, EPT_X_USER, RSVD_23, EPT_SUPER_SHW_STACK, EPT_SUPPRESS_VE_XCPT, RSVD_62_27, NX));2441 2442 /** The bit position where the EPT specific attributes begin. */2443 #define PGM_PTATTRS_EPT_SHIFT PGM_PTATTRS_EPT_R_SHIFT2444 /** The mask of EPT bits (bits 26:ATTR_SHIFT). In the future we might choose to2445 * use higher unused bits for something else, in that case adjust this mask. */2446 #define PGM_PTATTRS_EPT_MASK UINT64_C(0x0000000007ffe000)2447 2448 /** The mask of all PGM page attribute bits for regular page-tables. */2449 #define PGM_PTATTRS_PT_VALID_MASK ( PGM_PTATTRS_R_MASK \2450 | PGM_PTATTRS_W_MASK \2451 | PGM_PTATTRS_US_MASK \2452 | PGM_PTATTRS_PWT_MASK \2453 | PGM_PTATTRS_PCD_MASK \2454 | PGM_PTATTRS_A_MASK \2455 | PGM_PTATTRS_D_MASK \2456 | PGM_PTATTRS_PAT_MASK \2457 | PGM_PTATTRS_G_MASK \2458 | PGM_PTATTRS_NX_MASK)2459 2460 /** The mask of all PGM page attribute bits for EPT. */2461 #define PGM_PTATTRS_EPT_VALID_MASK ( PGM_PTATTRS_R_MASK \2462 | PGM_PTATTRS_W_MASK \2463 | PGM_PTATTRS_A_MASK \2464 | PGM_PTATTRS_D_MASK \2465 | PGM_PTATTRS_EPT_R_MASK \2466 | PGM_PTATTRS_EPT_W_MASK \2467 | PGM_PTATTRS_EPT_X_SUPER \2468 | PGM_PTATTRS_EPT_MEMTYPE \2469 | PGM_PTATTRS_EPT_IGNORE_PAT \2470 | PGM_PTATTRS_EPT_X_USER \2471 | PGM_PTATTRS_EPT_SUPER_SHW_STACK \2472 | PGM_PTATTRS_EPT_SUPPRESS_VE_XCPT)2473 2474 /* The mask of all PGM page attribute bits (combined). */2475 #define PGM_PTATTRS_VALID_MASK (PGM_PTATTRS_PT_VALID_MASK | PGM_PTATTRS_PT_VALID_MASK)2476 2477 /* Verify bits match the regular PT bits. */2478 AssertCompile(PGM_PTATTRS_W_SHIFT == X86_PTE_BIT_RW);2479 AssertCompile(PGM_PTATTRS_US_SHIFT == X86_PTE_BIT_US);2480 AssertCompile(PGM_PTATTRS_PWT_SHIFT == X86_PTE_BIT_PWT);2481 AssertCompile(PGM_PTATTRS_PCD_SHIFT == X86_PTE_BIT_PCD);2482 AssertCompile(PGM_PTATTRS_A_SHIFT == X86_PTE_BIT_A);2483 AssertCompile(PGM_PTATTRS_D_SHIFT == X86_PTE_BIT_D);2484 AssertCompile(PGM_PTATTRS_PAT_SHIFT == X86_PTE_BIT_PAT);2485 AssertCompile(PGM_PTATTRS_G_SHIFT == X86_PTE_BIT_G);2486 AssertCompile(PGM_PTATTRS_W_MASK == X86_PTE_RW);2487 AssertCompile(PGM_PTATTRS_US_MASK == X86_PTE_US);2488 AssertCompile(PGM_PTATTRS_PWT_MASK == X86_PTE_PWT);2489 AssertCompile(PGM_PTATTRS_PCD_MASK == X86_PTE_PCD);2490 AssertCompile(PGM_PTATTRS_A_MASK == X86_PTE_A);2491 AssertCompile(PGM_PTATTRS_D_MASK == X86_PTE_D);2492 AssertCompile(PGM_PTATTRS_PAT_MASK == X86_PTE_PAT);2493 AssertCompile(PGM_PTATTRS_G_MASK == X86_PTE_G);2494 2495 /* Verify those EPT bits that must map 1:1 (after shifting). */2496 AssertCompile(PGM_PTATTRS_EPT_R_SHIFT - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_READ);2497 AssertCompile(PGM_PTATTRS_EPT_W_SHIFT - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_WRITE);2498 AssertCompile(PGM_PTATTRS_EPT_X_SUPER_SHIFT - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_EXECUTE);2499 AssertCompile(PGM_PTATTRS_EPT_IGNORE_PAT_SHIFT - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_IGNORE_PAT);2500 AssertCompile(PGM_PTATTRS_EPT_X_USER_SHIFT - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_USER_EXECUTE);2501 /** @} */2502 2503 2504 /**2505 * Page fault guest state for the AMD64 paging mode.2506 */2507 typedef struct PGMPTWALKCORE2508 {2509 /** The guest virtual address that is being resolved by the walk2510 * (input). */2511 RTGCPTR GCPtr;2512 2513 /** The nested-guest physical address that is being resolved if this is a2514 * second-level walk (input).2515 * @remarks only valid if fIsSlat is set. */2516 RTGCPHYS GCPhysNested;2517 2518 /** The guest physical address that is the result of the walk.2519 * @remarks only valid if fSucceeded is set. */2520 RTGCPHYS GCPhys;2521 2522 /** Set if the walk succeeded, i.d. GCPhys is valid. */2523 bool fSucceeded;2524 /** Whether this is a second-level translation. */2525 bool fIsSlat;2526 /** Whether the linear address (GCPtr) is valid and thus the cause for the2527 * second-level translation. */2528 bool fIsLinearAddrValid;2529 /** The level problem arrised at.2530 * PTE is level 1, PDE is level 2, PDPE is level 3, PML4 is level 4, CR3 is2531 * level 8. This is 0 on success. */2532 uint8_t uLevel;2533 /** Set if the page isn't present. */2534 bool fNotPresent;2535 /** Encountered a bad physical address. */2536 bool fBadPhysAddr;2537 /** Set if there was reserved bit violations. */2538 bool fRsvdError;2539 /** Set if it involves a big page (2/4 MB). */2540 bool fBigPage;2541 /** Set if it involves a gigantic page (1 GB). */2542 bool fGigantPage;2543 bool afPadding[7];2544 /** The effective attributes, PGM_PTATTRS_XXX. */2545 PGMPTATTRS fEffective;2546 } PGMPTWALKCORE;2547 2548 2332 /** 2549 2333 * Guest page table walk for the AMD64 mode. … … 2551 2335 typedef struct PGMPTWALKGSTAMD64 2552 2336 { 2553 /** The common core. */2554 PGMPTWALKCORE Core;2555 2556 2337 PX86PML4 pPml4; 2557 2338 PX86PML4E pPml4e; … … 2580 2361 typedef struct PGMPTWALKGSTEPT 2581 2362 { 2582 /** The common core. */2583 PGMPTWALKCORE Core;2584 2585 2363 PEPTPML4 pPml4; 2586 2364 PEPTPML4E pPml4e; … … 2609 2387 typedef struct PGMPTWALKGSTPAE 2610 2388 { 2611 /** The common core. */2612 PGMPTWALKCORE Core;2613 2614 2389 PX86PDPT pPdpt; 2615 2390 PX86PDPE pPdpe; … … 2634 2409 typedef struct PGMPTWALKGST32BIT 2635 2410 { 2636 /** The common core. */2637 PGMPTWALKCORE Core;2638 2639 2411 PX86PD pPd; 2640 2412 PX86PDE pPde; … … 2676 2448 union 2677 2449 { 2678 /** The page walker core - always valid. */2679 PGMPTWALKCORE Core;2680 2450 /** The page walker for AMD64. */ 2681 2451 PGMPTWALKGSTAMD64 Amd64; … … 2866 2636 /** The guest mode type. */ 2867 2637 uint32_t uType; 2868 DECLCALLBACKMEMBER(int, pfnGetPage,(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));2638 DECLCALLBACKMEMBER(int, pfnGetPage,(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)); 2869 2639 DECLCALLBACKMEMBER(int, pfnModifyPage,(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask)); 2870 2640 DECLCALLBACKMEMBER(int, pfnEnter,(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3)); … … 3915 3685 int pgmGstLazyMapEptPml4(PVMCPUCC pVCpu, PEPTPML4 *ppPml4); 3916 3686 #endif 3917 int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK GST pWalk);3918 int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK GST pWalk);3687 int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk); 3688 int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk); 3919 3689 3920 3690 # if defined(VBOX_STRICT) && HC_ARCH_BITS == 64 && defined(IN_RING3)
Note:
See TracChangeset
for help on using the changeset viewer.

