Changeset 104933 in vbox
- Timestamp:
- Jun 15, 2024 12:44:02 AM (3 months ago)
- Location:
- trunk
- Files:
-
- 3 edited
-
include/VBox/vmm/pgm.h (modified) (1 diff)
-
src/VBox/VMM/VMMAll/IEMAll.cpp (modified) (3 diffs)
-
src/VBox/VMM/VMMAll/PGMAllGst.h (modified) (4 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/pgm.h
r104932 r104933 348 348 * level 8. This is 0 on success. */ 349 349 #define PGM_WALKFAIL_LEVEL_MASK UINT32_C(0x0000f100) 350 /** Level shift (see PGM_WALK INFO_LEVEL_MASK). */350 /** Level shift (see PGM_WALKFAIL_LEVEL_MASK). */ 351 351 #define PGM_WALKFAIL_LEVEL_SHIFT 11 352 352 -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r104932 r104933 554 554 } 555 555 #else 556 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) );556 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3); 557 557 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)); 558 558 #endif 559 RTGCPHYS const GCPhys = Walk.GCPhys; 560 /** @todo Check reserved bits and such stuff. PGM is better at doing 561 * that, so do it when implementing the guest virtual address 562 * TLB... */ 559 RTGCPHYS const GCPhys = WalkFast.GCPhys; 563 560 564 561 /* … … 1250 1247 } 1251 1248 1252 PGMPTWALK Walk; 1253 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk); 1254 if (RT_FAILURE(rc)) 1249 PGMPTWALKFAST WalkFast; 1250 int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext, 1251 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE, 1252 &WalkFast); 1253 if (RT_SUCCESS(rc)) 1254 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS); 1255 else 1255 1256 { 1256 1257 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc)); 1257 1258 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 1258 if (Walk .fFailed & PGM_WALKFAIL_EPT)1259 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk , IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);1259 if (WalkFast.fFailed & PGM_WALKFAIL_EPT) 1260 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */); 1260 1261 #endif 1261 1262 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc); 1262 1263 } 1263 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3) 1264 { 1265 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext)); 1266 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 1267 if (Walk.fFailed & PGM_WALKFAIL_EPT) 1268 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */); 1269 #endif 1270 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1271 } 1272 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) 1273 { 1274 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext)); 1275 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 1276 if (Walk.fFailed & PGM_WALKFAIL_EPT) 1277 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */); 1278 #endif 1279 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1280 } 1281 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK); 1282 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode)); 1283 /** @todo Check reserved bits and such stuff. PGM is better at doing 1284 * that, so do it when implementing the guest virtual address 1285 * TLB... */ 1264 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3); 1265 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)); 1266 1267 RTGCPHYS const GCPhys = WalkFast.GCPhys; 1268 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode)); 1286 1269 1287 1270 /* … … 5710 5693 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 5711 5694 if (WalkFast.fFailed & PGM_WALKFAIL_EPT) 5712 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk , fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);5695 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */); 5713 5696 #endif 5714 5697 *pGCPhysMem = NIL_RTGCPHYS; -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r104932 r104933 455 455 #if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) || defined(VBOX_WITH_NESTED_HWVIRT_SVM_XXX) || defined(DOXYGEN_RUNNING) 456 456 /** Converts regular style walk info to fast style. */ 457 DECL_FORCE_INLINE(void) PGM_GST_NAME(ConvertPtWalkToFast)(PGMPTWALK const *pSrc, PPGMPTWALKFAST *pDst)457 DECL_FORCE_INLINE(void) PGM_GST_NAME(ConvertPtWalkToFast)(PGMPTWALK const *pSrc, PPGMPTWALKFAST pDst) 458 458 { 459 459 pDst->GCPtr = pSrc->GCPtr; … … 462 462 pDst->fInfo = (pSrc->fSucceeded ? PGM_WALKINFO_SUCCEEDED : 0) 463 463 | (pSrc->fIsSlat ? PGM_WALKINFO_IS_SLAT : 0) 464 | (pSrc->fIsLinearAddrValid ? PGM_WALKINFO_IS_LINEAR_ADDR_VALID : 0) 465 | ((uint32_t)pSrc->uLevel << PGM_WALKINFO_LEVEL_SHIFT); 466 pDst->fFailed = pSrc->fFailed; 464 | (pSrc->fIsLinearAddrValid ? PGM_WALKINFO_IS_LINEAR_ADDR_VALID : 0); 465 pDst->fFailed = pSrc->fFailed | ((uint32_t)pSrc->uLevel << PGM_WALKFAIL_LEVEL_SHIFT); 467 466 pDst->fEffective = pSrc->fEffective; 468 467 } … … 553 552 else \ 554 553 { \ 555 PGM_ NAME(ConvertPtWalkToFast)(&WalkSlat, pWalk); \554 PGM_GST_NAME(ConvertPtWalkToFast)(&WalkSlat, pWalk); \ 556 555 return rcX; \ 557 556 } \ … … 907 906 pWalk->GCPhys = WalkSlat.GCPhys; 908 907 pWalk->GCPhysNested = 0; 909 pWalk-> u64Union = 0;910 pWalk->f Succeeded = true;908 pWalk->fInfo = PGM_WALKINFO_SUCCEEDED; 909 pWalk->fFailed = PGM_WALKFAIL_SUCCESS; 911 910 pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_A | X86_PTE_D; 912 911 } 913 912 else 914 PGM_ NAME(ConvertPtWalkToFast)(&WalkSlat, pWalk);913 PGM_GST_NAME(ConvertPtWalkToFast)(&WalkSlat, pWalk); 915 914 return rc; 916 915 }
Note:
See TracChangeset
for help on using the changeset viewer.

