Changeset 72178 in vbox
- Timestamp:
- May 9, 2018 4:18:56 PM (6 years ago)
- Location:
- trunk
- Files:
-
- 6 edited
-
include/VBox/vmm/cpumctx.h (modified) (1 diff)
-
src/VBox/VMM/VMMR0/HMSVMR0.cpp (modified) (6 diffs)
-
src/VBox/VMM/VMMR3/CPUM.cpp (modified) (4 diffs)
-
src/VBox/VMM/VMMR3/HM.cpp (modified) (4 diffs)
-
src/VBox/VMM/include/CPUMInternal.h (modified) (1 diff)
-
src/VBox/VMM/include/HMInternal.h (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpumctx.h
r71833 r72178 288 288 /** 289 289 * SVM Host-state area (Nested Hw.virt - VirtualBox's layout). 290 * 291 * @warning Exercise caution while modifying the layout of this struct. It's 292 * part of VM saved states. 290 293 */ 291 294 #pragma pack(1) -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r72085 r72178 5381 5381 } 5382 5382 5383 /** @todo Needed when restoring saved-state when saved state support wasn't yet5384 * added. Perhaps it won't be required later. */5385 #if 05386 5383 case SVM_EXIT_NPF: 5387 5384 { 5388 5385 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging); 5389 if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_PF)) 5390 return HM_SVM_VMEXIT_NESTED(pVCpu, SVM_EXIT_XCPT_14, RT_LO_U32(uExitInfo1), uExitInfo2); 5391 hmR0SvmSetPendingXcptPF(pVCpu, pCtx, RT_LO_U32(uExitInfo1), uExitInfo2); 5392 return VINF_SUCCESS; 5386 return hmR0SvmExitNestedPF(pVCpu, pCtx, pSvmTransient); 5393 5387 } 5394 #else 5395 case SVM_EXIT_NPF: 5396 #endif 5388 5397 5389 case SVM_EXIT_INIT: /* We shouldn't get INIT signals while executing a nested-guest. */ 5398 {5399 5390 return hmR0SvmExitUnexpected(pVCpu, pCtx, pSvmTransient); 5400 }5401 5391 5402 5392 default: … … 7017 7007 { 7018 7008 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7019 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);7020 7009 7021 7010 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 7025 7014 7026 7015 /* See AMD spec. 15.25.6 "Nested versus Guest Page Faults, Fault Ordering" for VMCB details for #NPF. */ 7027 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 7028 uint32_t u32ErrCode = pVmcb->ctrl.u64ExitInfo1; /** @todo Make it more explicit that high bits can be non-zero. */ 7016 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 7029 7017 RTGCPHYS GCPhysFaultAddr = pVmcb->ctrl.u64ExitInfo2; 7018 uint32_t u32ErrCode = pVmcb->ctrl.u64ExitInfo1; /* Note! High bits in EXITINFO1 may contain additional info and are 7019 thus intentionally not copied into u32ErrCode. */ 7030 7020 7031 7021 Log4(("#NPF at CS:RIP=%04x:%#RX64 faultaddr=%RGp errcode=%#x \n", pCtx->cs.Sel, pCtx->rip, GCPhysFaultAddr, u32ErrCode)); 7032 7022 7033 #ifdef VBOX_HM_WITH_GUEST_PATCHING 7034 /* TPR patching for 32-bit guests, using the reserved bit in the page tables for MMIO regions. */ 7023 /* 7024 * TPR patching for 32-bit guests, using the reserved bit in the page tables for MMIO regions. 7025 */ 7035 7026 if ( pVM->hm.s.fTprPatchingAllowed 7036 7027 && (GCPhysFaultAddr & PAGE_OFFSET_MASK) == XAPIC_OFF_TPR … … 7052 7043 } 7053 7044 } 7054 #endif7055 7045 7056 7046 /* … … 7292 7282 Assert(!pVM->hm.s.fNestedPaging); 7293 7283 7294 #ifdef VBOX_HM_WITH_GUEST_PATCHING 7295 /* Shortcut for APIC TPR reads and writes; only applicable to 32-bit guests. */ 7284 /* 7285 * TPR patching shortcut for APIC TPR reads and writes; only applicable to 32-bit guests. 7286 */ 7296 7287 if ( pVM->hm.s.fTprPatchingAllowed 7297 7288 && (uFaultAddress & 0xfff) == XAPIC_OFF_TPR … … 7317 7308 } 7318 7309 } 7319 #endif7320 7310 7321 7311 Log4(("#PF: uFaultAddress=%#RX64 CS:RIP=%#04x:%#RX64 u32ErrCode %#RX32 cr3=%#RX64\n", uFaultAddress, pCtx->cs.Sel, -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r71859 r72178 282 282 }; 283 283 284 /** Saved state field descriptors for SVM nested hardware-virtualization 285 * Host State. */ 286 static const SSMFIELD g_aSvmHwvirtHostState[] = 287 { 288 SSMFIELD_ENTRY( SVMHOSTSTATE, uEferMsr), 289 SSMFIELD_ENTRY( SVMHOSTSTATE, uCr0), 290 SSMFIELD_ENTRY( SVMHOSTSTATE, uCr4), 291 SSMFIELD_ENTRY( SVMHOSTSTATE, uCr3), 292 SSMFIELD_ENTRY( SVMHOSTSTATE, uRip), 293 SSMFIELD_ENTRY( SVMHOSTSTATE, uRsp), 294 SSMFIELD_ENTRY( SVMHOSTSTATE, uRax), 295 SSMFIELD_ENTRY( SVMHOSTSTATE, rflags), 296 SSMFIELD_ENTRY( SVMHOSTSTATE, es.Sel), 297 SSMFIELD_ENTRY( SVMHOSTSTATE, es.ValidSel), 298 SSMFIELD_ENTRY( SVMHOSTSTATE, es.fFlags), 299 SSMFIELD_ENTRY( SVMHOSTSTATE, es.u64Base), 300 SSMFIELD_ENTRY( SVMHOSTSTATE, es.u32Limit), 301 SSMFIELD_ENTRY( SVMHOSTSTATE, es.Attr), 302 SSMFIELD_ENTRY( SVMHOSTSTATE, cs.Sel), 303 SSMFIELD_ENTRY( SVMHOSTSTATE, cs.ValidSel), 304 SSMFIELD_ENTRY( SVMHOSTSTATE, cs.fFlags), 305 SSMFIELD_ENTRY( SVMHOSTSTATE, cs.u64Base), 306 SSMFIELD_ENTRY( SVMHOSTSTATE, cs.u32Limit), 307 SSMFIELD_ENTRY( SVMHOSTSTATE, cs.Attr), 308 SSMFIELD_ENTRY( SVMHOSTSTATE, ss.Sel), 309 SSMFIELD_ENTRY( SVMHOSTSTATE, ss.ValidSel), 310 SSMFIELD_ENTRY( SVMHOSTSTATE, ss.fFlags), 311 SSMFIELD_ENTRY( SVMHOSTSTATE, ss.u64Base), 312 SSMFIELD_ENTRY( SVMHOSTSTATE, ss.u32Limit), 313 SSMFIELD_ENTRY( SVMHOSTSTATE, ss.Attr), 314 SSMFIELD_ENTRY( SVMHOSTSTATE, ds.Sel), 315 SSMFIELD_ENTRY( SVMHOSTSTATE, ds.ValidSel), 316 SSMFIELD_ENTRY( SVMHOSTSTATE, ds.fFlags), 317 SSMFIELD_ENTRY( SVMHOSTSTATE, ds.u64Base), 318 SSMFIELD_ENTRY( SVMHOSTSTATE, ds.u32Limit), 319 SSMFIELD_ENTRY( SVMHOSTSTATE, ds.Attr), 320 SSMFIELD_ENTRY( SVMHOSTSTATE, gdtr.cbGdt), 321 SSMFIELD_ENTRY( SVMHOSTSTATE, gdtr.pGdt), 322 SSMFIELD_ENTRY( SVMHOSTSTATE, idtr.cbIdt), 323 SSMFIELD_ENTRY( SVMHOSTSTATE, idtr.pIdt), 324 SSMFIELD_ENTRY_IGNORE(SVMHOSTSTATE, abPadding), 325 SSMFIELD_ENTRY_TERM() 326 }; 327 284 328 /** Saved state field descriptors for CPUMCTX. */ 285 329 static const SSMFIELD g_aCpumX87Fields[] = … … 1380 1424 SSMR3PutStructEx(pSSM, pZmm16Hi, sizeof(*pZmm16Hi), SSMSTRUCT_FLAGS_FULL_STRUCT, g_aCpumZmm16HiFields, NULL); 1381 1425 } 1382 1426 if (pVM->cpum.ro.GuestFeatures.fSvm) 1427 { 1428 Assert(pGstCtx->hwvirt.svm.CTX_SUFF(pVmcb)); 1429 SSMR3PutU64(pSSM, pGstCtx->hwvirt.svm.uMsrHSavePa); 1430 SSMR3PutGCPhys(pSSM, pGstCtx->hwvirt.svm.GCPhysVmcb); 1431 SSMR3PutU64(pSSM, pGstCtx->hwvirt.svm.uPrevPauseTick); 1432 SSMR3PutU16(pSSM, pGstCtx->hwvirt.svm.cPauseFilter); 1433 SSMR3PutU16(pSSM, pGstCtx->hwvirt.svm.cPauseFilterThreshold); 1434 SSMR3PutBool(pSSM, pGstCtx->hwvirt.svm.fInterceptEvents); 1435 SSMR3PutBool(pSSM, pGstCtx->hwvirt.svm.fHMCachedVmcb); 1436 SSMR3PutStructEx(pSSM, &pGstCtx->hwvirt.svm.HostState, sizeof(pGstCtx->hwvirt.svm.HostState), 0 /* fFlags */, 1437 g_aSvmHwvirtHostState, NULL /* pvUser */); 1438 SSMR3PutMem(pSSM, pGstCtx->hwvirt.svm.pVmcbR3, SVM_VMCB_PAGES << X86_PAGE_4K_SHIFT); 1439 SSMR3PutMem(pSSM, pGstCtx->hwvirt.svm.pvMsrBitmapR3, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT); 1440 SSMR3PutMem(pSSM, pGstCtx->hwvirt.svm.pvIoBitmapR3, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT); 1441 SSMR3PutU32(pSSM, pGstCtx->hwvirt.fLocalForcedActions); 1442 SSMR3PutBool(pSSM, pGstCtx->hwvirt.fGif); 1443 } 1383 1444 SSMR3PutU32(pSSM, pVCpu->cpum.s.fUseFlags); 1384 1445 SSMR3PutU32(pSSM, pVCpu->cpum.s.fChanged); … … 1413 1474 * Validate version. 1414 1475 */ 1415 if ( uVersion != CPUM_SAVED_STATE_VERSION_XSAVE 1476 if ( uVersion != CPUM_SAVED_STATE_VERSION_HWVIRT_SVM 1477 && uVersion != CPUM_SAVED_STATE_VERSION_XSAVE 1416 1478 && uVersion != CPUM_SAVED_STATE_VERSION_GOOD_CPUID_COUNT 1417 1479 && uVersion != CPUM_SAVED_STATE_VERSION_BAD_CPUID_COUNT … … 1600 1662 PX86XSAVEZMM16HI pZmm16Hi = CPUMCTX_XSAVE_C_PTR(pGstCtx, XSAVE_C_ZMM_16HI_BIT, PX86XSAVEZMM16HI); 1601 1663 SSMR3GetStructEx(pSSM, pZmm16Hi, sizeof(*pZmm16Hi), SSMSTRUCT_FLAGS_FULL_STRUCT, g_aCpumZmm16HiFields, NULL); 1664 } 1665 if (uVersion >= CPUM_SAVED_STATE_VERSION_HWVIRT_SVM) 1666 { 1667 if (pVM->cpum.ro.GuestFeatures.fSvm) 1668 { 1669 Assert(pGstCtx->hwvirt.svm.CTX_SUFF(pVmcb)); 1670 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.svm.uMsrHSavePa); 1671 SSMR3GetGCPhys(pSSM, &pGstCtx->hwvirt.svm.GCPhysVmcb); 1672 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.svm.uPrevPauseTick); 1673 SSMR3GetU16(pSSM, &pGstCtx->hwvirt.svm.cPauseFilter); 1674 SSMR3GetU16(pSSM, &pGstCtx->hwvirt.svm.cPauseFilterThreshold); 1675 SSMR3GetBool(pSSM, &pGstCtx->hwvirt.svm.fInterceptEvents); 1676 SSMR3GetBool(pSSM, &pGstCtx->hwvirt.svm.fHMCachedVmcb); 1677 SSMR3GetStructEx(pSSM, &pGstCtx->hwvirt.svm.HostState, sizeof(pGstCtx->hwvirt.svm.HostState), 1678 0 /* fFlags */, g_aSvmHwvirtHostState, NULL /* pvUser */); 1679 SSMR3GetMem(pSSM, pGstCtx->hwvirt.svm.pVmcbR3, SVM_VMCB_PAGES << X86_PAGE_4K_SHIFT); 1680 SSMR3GetMem(pSSM, pGstCtx->hwvirt.svm.pvMsrBitmapR3, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT); 1681 SSMR3GetMem(pSSM, pGstCtx->hwvirt.svm.pvIoBitmapR3, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT); 1682 SSMR3GetU32(pSSM, &pGstCtx->hwvirt.fLocalForcedActions); 1683 SSMR3GetBool(pSSM, &pGstCtx->hwvirt.fGif); 1684 } 1602 1685 } 1603 1686 } -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r71933 r72178 3457 3457 * Execute state save operation. 3458 3458 * 3459 * Save only data that cannot be re-loaded while entering HM ring-0 code. This 3460 * is because we always save the VM state from ring-3 and thus most HM state 3461 * will be re-synced dynamically at runtime and don't need to be part of the VM 3462 * saved state. 3463 * 3459 3464 * @returns VBox status code. 3460 3465 * @param pVM The cross context VM structure. … … 3469 3474 for (VMCPUID i = 0; i < pVM->cCpus; i++) 3470 3475 { 3471 /* 3472 * Save the basic bits - fortunately all the other things can be resynced on load. 3473 */ 3474 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.Event.fPending); 3476 Assert(!pVM->aCpus[i].hm.s.Event.fPending); 3477 if (pVM->cpum.ro.GuestFeatures.fSvm) 3478 { 3479 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVM->aCpus[i].hm.s.svm.NstGstVmcbCache; 3480 rc = SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptRdCRx); 3481 rc |= SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptWrCRx); 3482 rc |= SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptRdDRx); 3483 rc |= SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptWrDRx); 3484 rc |= SSMR3PutU16(pSSM, pVmcbNstGstCache->u16PauseFilterThreshold); 3485 rc |= SSMR3PutU16(pSSM, pVmcbNstGstCache->u16PauseFilterCount); 3486 rc |= SSMR3PutU32(pSSM, pVmcbNstGstCache->u32InterceptXcpt); 3487 rc |= SSMR3PutU64(pSSM, pVmcbNstGstCache->u64InterceptCtrl); 3488 rc |= SSMR3PutU64(pSSM, pVmcbNstGstCache->u64TSCOffset); 3489 rc |= SSMR3PutBool(pSSM, pVmcbNstGstCache->fVIntrMasking); 3490 rc |= SSMR3PutBool(pSSM, pVmcbNstGstCache->fNestedPaging); 3491 rc |= SSMR3PutBool(pSSM, pVmcbNstGstCache->fLbrVirt); 3492 AssertRCReturn(rc, rc); 3493 } 3494 } 3495 3496 /* Save the guest patch data. */ 3497 rc = SSMR3PutGCPtr(pSSM, pVM->hm.s.pGuestPatchMem); 3498 rc |= SSMR3PutGCPtr(pSSM, pVM->hm.s.pFreeGuestPatchMem); 3499 rc |= SSMR3PutU32(pSSM, pVM->hm.s.cbGuestPatchMem); 3500 3501 /* Store all the guest patch records too. */ 3502 rc |= SSMR3PutU32(pSSM, pVM->hm.s.cPatches); 3503 AssertRCReturn(rc, rc); 3504 3505 for (uint32_t i = 0; i < pVM->hm.s.cPatches; i++) 3506 { 3507 AssertCompileSize(HMTPRINSTR, 4); 3508 PCHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i]; 3509 rc = SSMR3PutU32(pSSM, pPatch->Core.Key); 3510 rc |= SSMR3PutMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode)); 3511 rc |= SSMR3PutU32(pSSM, pPatch->cbOp); 3512 rc |= SSMR3PutMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode)); 3513 rc |= SSMR3PutU32(pSSM, pPatch->cbNewOp); 3514 rc |= SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType); 3515 rc |= SSMR3PutU32(pSSM, pPatch->uSrcOperand); 3516 rc |= SSMR3PutU32(pSSM, pPatch->uDstOperand); 3517 rc |= SSMR3PutU32(pSSM, pPatch->pJumpTarget); 3518 rc |= SSMR3PutU32(pSSM, pPatch->cFaults); 3475 3519 AssertRCReturn(rc, rc); 3476 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.Event.u32ErrCode); 3477 AssertRCReturn(rc, rc); 3478 rc = SSMR3PutU64(pSSM, pVM->aCpus[i].hm.s.Event.u64IntInfo); 3479 AssertRCReturn(rc, rc); 3480 /** @todo Shouldn't we be saving GCPtrFaultAddress too? */ 3481 3482 /** @todo We only need to save pVM->aCpus[i].hm.s.vmx.fWasInRealMode and 3483 * perhaps not even that (the initial value of @c true is safe. */ 3484 uint32_t u32Dummy = PGMMODE_REAL; 3485 rc = SSMR3PutU32(pSSM, u32Dummy); 3486 AssertRCReturn(rc, rc); 3487 rc = SSMR3PutU32(pSSM, u32Dummy); 3488 AssertRCReturn(rc, rc); 3489 rc = SSMR3PutU32(pSSM, u32Dummy); 3490 AssertRCReturn(rc, rc); 3491 } 3492 3493 #ifdef VBOX_HM_WITH_GUEST_PATCHING 3494 rc = SSMR3PutGCPtr(pSSM, pVM->hm.s.pGuestPatchMem); 3495 AssertRCReturn(rc, rc); 3496 rc = SSMR3PutGCPtr(pSSM, pVM->hm.s.pFreeGuestPatchMem); 3497 AssertRCReturn(rc, rc); 3498 rc = SSMR3PutU32(pSSM, pVM->hm.s.cbGuestPatchMem); 3499 AssertRCReturn(rc, rc); 3500 3501 /* Store all the guest patch records too. */ 3502 rc = SSMR3PutU32(pSSM, pVM->hm.s.cPatches); 3503 AssertRCReturn(rc, rc); 3504 3505 for (unsigned i = 0; i < pVM->hm.s.cPatches; i++) 3506 { 3507 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i]; 3508 3509 rc = SSMR3PutU32(pSSM, pPatch->Core.Key); 3510 AssertRCReturn(rc, rc); 3511 3512 rc = SSMR3PutMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode)); 3513 AssertRCReturn(rc, rc); 3514 3515 rc = SSMR3PutU32(pSSM, pPatch->cbOp); 3516 AssertRCReturn(rc, rc); 3517 3518 rc = SSMR3PutMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode)); 3519 AssertRCReturn(rc, rc); 3520 3521 rc = SSMR3PutU32(pSSM, pPatch->cbNewOp); 3522 AssertRCReturn(rc, rc); 3523 3524 AssertCompileSize(HMTPRINSTR, 4); 3525 rc = SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType); 3526 AssertRCReturn(rc, rc); 3527 3528 rc = SSMR3PutU32(pSSM, pPatch->uSrcOperand); 3529 AssertRCReturn(rc, rc); 3530 3531 rc = SSMR3PutU32(pSSM, pPatch->uDstOperand); 3532 AssertRCReturn(rc, rc); 3533 3534 rc = SSMR3PutU32(pSSM, pPatch->pJumpTarget); 3535 AssertRCReturn(rc, rc); 3536 3537 rc = SSMR3PutU32(pSSM, pPatch->cFaults); 3538 AssertRCReturn(rc, rc); 3539 /** @todo We need to save SVMNESTEDVMCBCACHE (if pCtx fHMCached is true as we 3540 * are in nested-geust execution and the cache contains pristine 3541 * fields that we only restore on \#VMEXIT and not on 3542 * every exit-to-ring 3. */ 3543 } 3544 #endif 3520 } 3521 3545 3522 return VINF_SUCCESS; 3546 3523 } … … 3566 3543 * Validate version. 3567 3544 */ 3568 if ( uVersion != HM_SAVED_STATE_VERSION 3569 && uVersion != HM_SAVED_STATE_VERSION_NO_PATCHING 3545 if ( uVersion != HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT 3546 && uVersion != HM_SAVED_STATE_VERSION_TPR_PATCHING 3547 && uVersion != HM_SAVED_STATE_VERSION_NO_TPR_PATCHING 3570 3548 && uVersion != HM_SAVED_STATE_VERSION_2_0_X) 3571 3549 { … … 3573 3551 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION; 3574 3552 } 3553 3554 /* 3555 * Load per-VCPU state. 3556 */ 3575 3557 for (VMCPUID i = 0; i < pVM->cCpus; i++) 3576 3558 { 3577 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hm.s.Event.fPending); 3559 if (uVersion >= HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT) 3560 { 3561 /* Load the SVM nested hw.virt state if the VM is configured for it. */ 3562 if (pVM->cpum.ro.GuestFeatures.fSvm) 3563 { 3564 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVM->aCpus[i].hm.s.svm.NstGstVmcbCache; 3565 rc = SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptRdCRx); 3566 rc |= SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptWrCRx); 3567 rc |= SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptRdDRx); 3568 rc |= SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptWrDRx); 3569 rc |= SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16PauseFilterThreshold); 3570 rc |= SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16PauseFilterCount); 3571 rc |= SSMR3GetU32(pSSM, &pVmcbNstGstCache->u32InterceptXcpt); 3572 rc |= SSMR3GetU64(pSSM, &pVmcbNstGstCache->u64InterceptCtrl); 3573 rc |= SSMR3GetU64(pSSM, &pVmcbNstGstCache->u64TSCOffset); 3574 rc |= SSMR3GetBool(pSSM, &pVmcbNstGstCache->fVIntrMasking); 3575 rc |= SSMR3GetBool(pSSM, &pVmcbNstGstCache->fNestedPaging); 3576 rc |= SSMR3GetBool(pSSM, &pVmcbNstGstCache->fLbrVirt); 3577 AssertRCReturn(rc, rc); 3578 } 3579 } 3580 else 3581 { 3582 /* Pending HM event (obsolete for a long time since TPRM holds the info.) */ 3583 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hm.s.Event.fPending); 3584 rc |= SSMR3GetU32(pSSM, &pVM->aCpus[i].hm.s.Event.u32ErrCode); 3585 rc |= SSMR3GetU64(pSSM, &pVM->aCpus[i].hm.s.Event.u64IntInfo); 3586 3587 /* VMX fWasInRealMode related data. */ 3588 uint32_t uDummy; 3589 rc |= SSMR3GetU32(pSSM, &uDummy); AssertRCReturn(rc, rc); 3590 rc |= SSMR3GetU32(pSSM, &uDummy); AssertRCReturn(rc, rc); 3591 rc |= SSMR3GetU32(pSSM, &uDummy); AssertRCReturn(rc, rc); 3592 AssertRCReturn(rc, rc); 3593 } 3594 } 3595 3596 /* 3597 * Load TPR patching data. 3598 */ 3599 if (uVersion >= HM_SAVED_STATE_VERSION_TPR_PATCHING) 3600 { 3601 rc = SSMR3GetGCPtr(pSSM, &pVM->hm.s.pGuestPatchMem); 3602 rc |= SSMR3GetGCPtr(pSSM, &pVM->hm.s.pFreeGuestPatchMem); 3603 rc |= SSMR3GetU32(pSSM, &pVM->hm.s.cbGuestPatchMem); 3604 3605 /* Fetch all TPR patch records. */ 3606 rc |= SSMR3GetU32(pSSM, &pVM->hm.s.cPatches); 3578 3607 AssertRCReturn(rc, rc); 3579 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hm.s.Event.u32ErrCode); 3580 AssertRCReturn(rc, rc); 3581 rc = SSMR3GetU64(pSSM, &pVM->aCpus[i].hm.s.Event.u64IntInfo); 3582 AssertRCReturn(rc, rc); 3583 3584 if (uVersion >= HM_SAVED_STATE_VERSION_NO_PATCHING) 3585 { 3586 uint32_t val; 3587 /** @todo See note in hmR3Save(). */ 3588 rc = SSMR3GetU32(pSSM, &val); 3589 AssertRCReturn(rc, rc); 3590 rc = SSMR3GetU32(pSSM, &val); 3591 AssertRCReturn(rc, rc); 3592 rc = SSMR3GetU32(pSSM, &val); 3593 AssertRCReturn(rc, rc); 3594 } 3595 } 3596 #ifdef VBOX_HM_WITH_GUEST_PATCHING 3597 if (uVersion > HM_SAVED_STATE_VERSION_NO_PATCHING) 3598 { 3599 rc = SSMR3GetGCPtr(pSSM, &pVM->hm.s.pGuestPatchMem); 3600 AssertRCReturn(rc, rc); 3601 rc = SSMR3GetGCPtr(pSSM, &pVM->hm.s.pFreeGuestPatchMem); 3602 AssertRCReturn(rc, rc); 3603 rc = SSMR3GetU32(pSSM, &pVM->hm.s.cbGuestPatchMem); 3604 AssertRCReturn(rc, rc); 3605 3606 /* Fetch all TPR patch records. */ 3607 rc = SSMR3GetU32(pSSM, &pVM->hm.s.cPatches); 3608 AssertRCReturn(rc, rc); 3609 3610 for (unsigned i = 0; i < pVM->hm.s.cPatches; i++) 3608 for (uint32_t i = 0; i < pVM->hm.s.cPatches; i++) 3611 3609 { 3612 3610 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i]; 3613 3614 rc = SSMR3GetU32(pSSM, &pPatch->Core.Key); 3615 AssertRCReturn(rc, rc); 3616 3617 rc = SSMR3GetMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode)); 3618 AssertRCReturn(rc, rc); 3619 3620 rc = SSMR3GetU32(pSSM, &pPatch->cbOp); 3621 AssertRCReturn(rc, rc); 3622 3623 rc = SSMR3GetMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode)); 3624 AssertRCReturn(rc, rc); 3625 3626 rc = SSMR3GetU32(pSSM, &pPatch->cbNewOp); 3627 AssertRCReturn(rc, rc); 3628 3629 rc = SSMR3GetU32(pSSM, (uint32_t *)&pPatch->enmType); 3630 AssertRCReturn(rc, rc); 3611 rc = SSMR3GetU32(pSSM, &pPatch->Core.Key); 3612 rc |= SSMR3GetMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode)); 3613 rc |= SSMR3GetU32(pSSM, &pPatch->cbOp); 3614 rc |= SSMR3GetMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode)); 3615 rc |= SSMR3GetU32(pSSM, &pPatch->cbNewOp); 3616 rc |= SSMR3GetU32(pSSM, (uint32_t *)&pPatch->enmType); 3631 3617 3632 3618 if (pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT) 3633 3619 pVM->hm.s.fTPRPatchingActive = true; 3634 3635 3620 Assert(pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT || pVM->hm.s.fTPRPatchingActive == false); 3636 3621 3637 rc = SSMR3GetU32(pSSM, &pPatch->uSrcOperand); 3622 rc |= SSMR3GetU32(pSSM, &pPatch->uSrcOperand); 3623 rc |= SSMR3GetU32(pSSM, &pPatch->uDstOperand); 3624 rc |= SSMR3GetU32(pSSM, &pPatch->cFaults); 3625 rc |= SSMR3GetU32(pSSM, &pPatch->pJumpTarget); 3638 3626 AssertRCReturn(rc, rc); 3639 3627 3640 rc = SSMR3GetU32(pSSM, &pPatch->uDstOperand); 3628 LogFlow(("hmR3Load: patch %d\n", i)); 3629 LogFlow(("Key = %x\n", pPatch->Core.Key)); 3630 LogFlow(("cbOp = %d\n", pPatch->cbOp)); 3631 LogFlow(("cbNewOp = %d\n", pPatch->cbNewOp)); 3632 LogFlow(("type = %d\n", pPatch->enmType)); 3633 LogFlow(("srcop = %d\n", pPatch->uSrcOperand)); 3634 LogFlow(("dstop = %d\n", pPatch->uDstOperand)); 3635 LogFlow(("cFaults = %d\n", pPatch->cFaults)); 3636 LogFlow(("target = %x\n", pPatch->pJumpTarget)); 3637 3638 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core); 3641 3639 AssertRCReturn(rc, rc); 3642 3643 rc = SSMR3GetU32(pSSM, &pPatch->cFaults); 3644 AssertRCReturn(rc, rc); 3645 3646 rc = SSMR3GetU32(pSSM, &pPatch->pJumpTarget); 3647 AssertRCReturn(rc, rc); 3648 3649 Log(("hmR3Load: patch %d\n", i)); 3650 Log(("Key = %x\n", pPatch->Core.Key)); 3651 Log(("cbOp = %d\n", pPatch->cbOp)); 3652 Log(("cbNewOp = %d\n", pPatch->cbNewOp)); 3653 Log(("type = %d\n", pPatch->enmType)); 3654 Log(("srcop = %d\n", pPatch->uSrcOperand)); 3655 Log(("dstop = %d\n", pPatch->uDstOperand)); 3656 Log(("cFaults = %d\n", pPatch->cFaults)); 3657 Log(("target = %x\n", pPatch->pJumpTarget)); 3658 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core); 3659 AssertRC(rc); 3660 } 3661 } 3662 #endif 3640 } 3641 } 3663 3642 3664 3643 return VINF_SUCCESS; -
trunk/src/VBox/VMM/include/CPUMInternal.h
r69474 r72178 117 117 * @{ */ 118 118 /** The current saved state version. */ 119 #define CPUM_SAVED_STATE_VERSION CPUM_SAVED_STATE_VERSION_XSAVE 119 #define CPUM_SAVED_STATE_VERSION CPUM_SAVED_STATE_VERSION_HWVIRT_SVM 120 /** The saved state version including SVM hardware virtualization state. */ 121 #define CPUM_SAVED_STATE_VERSION_HWVIRT_SVM 18 120 122 /** The saved state version including XSAVE state. */ 121 123 #define CPUM_SAVED_STATE_VERSION_XSAVE 17 -
trunk/src/VBox/VMM/include/HMInternal.h
r71910 r72178 249 249 250 250 251 /** Enable for TPR guest patching. */ 252 #define VBOX_HM_WITH_GUEST_PATCHING 253 254 /** @name HM saved state versions 251 /** @name HM saved state versions. 255 252 * @{ 256 253 */ 257 #ifdef VBOX_HM_WITH_GUEST_PATCHING 258 # define HM_SAVED_STATE_VERSION 5 259 # define HM_SAVED_STATE_VERSION_NO_PATCHING 4 260 #else 261 # define HM_SAVED_STATE_VERSION 4 262 # define HM_SAVED_STATE_VERSION_NO_PATCHING 4 263 #endif 264 #define HM_SAVED_STATE_VERSION_2_0_X 3 254 #define HM_SAVED_STATE_VERSION HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT 255 #define HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT 6 256 #define HM_SAVED_STATE_VERSION_TPR_PATCHING 5 257 #define HM_SAVED_STATE_VERSION_NO_TPR_PATCHING 4 258 #define HM_SAVED_STATE_VERSION_2_0_X 3 265 259 /** @} */ 260 266 261 267 262 /** … … 361 356 /** Pointer to HMTPRPATCH. */ 362 357 typedef HMTPRPATCH *PHMTPRPATCH; 358 /** Pointer to a const HMTPRPATCH. */ 359 typedef const HMTPRPATCH *PCHMTPRPATCH; 363 360 364 361
Note:
See TracChangeset
for help on using the changeset viewer.

