Changeset 92465 in vbox
- Timestamp:
- Nov 17, 2021 3:01:09 AM (3 years ago)
- Location:
- trunk
- Files:
-
- 10 edited
-
include/VBox/vmm/nem.h (modified) (3 diffs)
-
include/VBox/vmm/vm.h (modified) (1 diff)
-
include/VBox/vmm/vm.mac (modified) (1 diff)
-
src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h (modified) (5 diffs)
-
src/VBox/VMM/VMMR0/NEMR0Native-win.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp (modified) (4 diffs)
-
src/VBox/VMM/VMMR3/NEMR3Native-linux.cpp (modified) (9 diffs)
-
src/VBox/VMM/VMMR3/NEMR3Native-win.cpp (modified) (4 diffs)
-
src/VBox/VMM/VMMR3/PGMPhys.cpp (modified) (7 diffs)
-
src/VBox/VMM/include/NEMInternal.h (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/nem.h
r92449 r92465 100 100 void *pvRam, void *pvMmio2, uint32_t *puNemRange); 101 101 VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, 102 void *pvRam, void *pvMmio2, uint8_t *pu2State );102 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange); 103 103 /** @name Flags for NEMR3NotifyPhysMmioExMap and NEMR3NotifyPhysMmioExUnmap. 104 104 * @{ */ … … 127 127 * @param fFlags NEM_NOTIFY_PHYS_ROM_F_XXX. 128 128 * @param pu2State New page state or UINT8_MAX to leave as-is. 129 * @param puNemRange Access to the relevant PGMRAMRANGE::uNemRange field. 129 130 */ 130 131 VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, 131 uint32_t fFlags, uint8_t *pu2State );132 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange); 132 133 133 134 /** … … 145 146 * @param pu2State Where to return the new NEM page state, UINT8_MAX 146 147 * for unchanged. 148 * @param puNemRange Access to the relevant PGMRAMRANGE::uNemRange field. 147 149 */ 148 150 VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, 149 uint32_t fFlags, uint8_t *pu2State );151 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange); 150 152 151 153 /** @name Flags for NEMR3NotifyPhysRomRegisterEarly and NEMR3NotifyPhysRomRegisterLate. -
trunk/include/VBox/vmm/vm.h
r92408 r92465 1346 1346 struct NEM s; 1347 1347 #endif 1348 uint8_t padding[ 512]; /* multiple of 64 */1348 uint8_t padding[4608]; /* multiple of 64 */ 1349 1349 } nem; 1350 1350 -
trunk/include/VBox/vmm/vm.mac
r92408 r92465 149 149 .em resb 256 150 150 alignb 64 151 .nem resb 512151 .nem resb 4608 152 152 alignb 64 153 153 .tm resb 10112 -
trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h
r92449 r92465 372 372 ADD_REG64(WHvX64RegisterSfmask, pVCpu->cpum.GstCtx.msrSFMASK); 373 373 } 374 if (fWhat & CPUMCTX_EXTRN_TSC_AUX) 375 ADD_REG64(WHvX64RegisterTscAux, pCtxMsrs->msr.TscAux); 374 376 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS) 375 377 { … … 392 394 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF0000, pCtxMsrs->msr.MtrrFix4K_F0000); 393 395 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF8000, pCtxMsrs->msr.MtrrFix4K_F8000); 394 ADD_REG64(WHvX64RegisterTscAux, pCtxMsrs->msr.TscAux);395 396 #if 0 /** @todo these registers aren't available? Might explain something.. .*/ 396 397 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pVM); … … 667 668 // const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pVM); 668 669 //#endif 670 if (fWhat & CPUMCTX_EXTRN_TSC_AUX) 671 aenmNames[iReg++] = WHvX64RegisterTscAux; 669 672 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS) 670 673 { … … 686 689 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF0000; 687 690 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF8000; 688 aenmNames[iReg++] = WHvX64RegisterTscAux;689 691 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */ 690 692 //#ifdef LOG_ENABLED … … 1011 1013 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrSFMASK, WHvX64RegisterSfmask, "MSR SFMASK"); 1012 1014 } 1013 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS) 1014 { 1015 Assert(aenmNames[iReg] == WHvX64RegisterApicBase); 1016 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu); 1017 if (aValues[iReg].Reg64 != uOldBase) 1018 { 1019 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n", 1020 pVCpu->idCpu, uOldBase, aValues[iReg].Reg64, aValues[iReg].Reg64 ^ uOldBase)); 1021 int rc2 = APICSetBaseMsr(pVCpu, aValues[iReg].Reg64); 1022 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("%Rrc %RX64\n", rc2, aValues[iReg].Reg64)); 1023 } 1024 iReg++; 1025 1026 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterPat, "MSR PAT"); 1015 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS)) 1016 { 1017 PCPUMCTXMSRS const pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu); 1018 if (fWhat & CPUMCTX_EXTRN_TSC_AUX) 1019 GET_REG64_LOG7(pCtxMsrs->msr.TscAux, WHvX64RegisterTscAux, "MSR TSC_AUX"); 1020 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS) 1021 { 1022 Assert(aenmNames[iReg] == WHvX64RegisterApicBase); 1023 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu); 1024 if (aValues[iReg].Reg64 != uOldBase) 1025 { 1026 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n", 1027 pVCpu->idCpu, uOldBase, aValues[iReg].Reg64, aValues[iReg].Reg64 ^ uOldBase)); 1028 int rc2 = APICSetBaseMsr(pVCpu, aValues[iReg].Reg64); 1029 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("%Rrc %RX64\n", rc2, aValues[iReg].Reg64)); 1030 } 1031 iReg++; 1032 1033 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterPat, "MSR PAT"); 1027 1034 #if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */ 1028 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterMsrMtrrCap);1035 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterMsrMtrrCap); 1029 1036 #endif 1030 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu); 1031 GET_REG64_LOG7(pCtxMsrs->msr.MtrrDefType, WHvX64RegisterMsrMtrrDefType, "MSR MTRR_DEF_TYPE"); 1032 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix64K_00000, WHvX64RegisterMsrMtrrFix64k00000, "MSR MTRR_FIX_64K_00000"); 1033 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_80000, WHvX64RegisterMsrMtrrFix16k80000, "MSR MTRR_FIX_16K_80000"); 1034 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_A0000, WHvX64RegisterMsrMtrrFix16kA0000, "MSR MTRR_FIX_16K_A0000"); 1035 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C0000, WHvX64RegisterMsrMtrrFix4kC0000, "MSR MTRR_FIX_4K_C0000"); 1036 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C8000, WHvX64RegisterMsrMtrrFix4kC8000, "MSR MTRR_FIX_4K_C8000"); 1037 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D0000, WHvX64RegisterMsrMtrrFix4kD0000, "MSR MTRR_FIX_4K_D0000"); 1038 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D8000, WHvX64RegisterMsrMtrrFix4kD8000, "MSR MTRR_FIX_4K_D8000"); 1039 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E0000, WHvX64RegisterMsrMtrrFix4kE0000, "MSR MTRR_FIX_4K_E0000"); 1040 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E8000, WHvX64RegisterMsrMtrrFix4kE8000, "MSR MTRR_FIX_4K_E8000"); 1041 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F0000, WHvX64RegisterMsrMtrrFix4kF0000, "MSR MTRR_FIX_4K_F0000"); 1042 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F8000, WHvX64RegisterMsrMtrrFix4kF8000, "MSR MTRR_FIX_4K_F8000"); 1043 GET_REG64_LOG7(pCtxMsrs->msr.TscAux, WHvX64RegisterTscAux, "MSR TSC_AUX"); 1044 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */ 1037 GET_REG64_LOG7(pCtxMsrs->msr.MtrrDefType, WHvX64RegisterMsrMtrrDefType, "MSR MTRR_DEF_TYPE"); 1038 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix64K_00000, WHvX64RegisterMsrMtrrFix64k00000, "MSR MTRR_FIX_64K_00000"); 1039 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_80000, WHvX64RegisterMsrMtrrFix16k80000, "MSR MTRR_FIX_16K_80000"); 1040 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_A0000, WHvX64RegisterMsrMtrrFix16kA0000, "MSR MTRR_FIX_16K_A0000"); 1041 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C0000, WHvX64RegisterMsrMtrrFix4kC0000, "MSR MTRR_FIX_4K_C0000"); 1042 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C8000, WHvX64RegisterMsrMtrrFix4kC8000, "MSR MTRR_FIX_4K_C8000"); 1043 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D0000, WHvX64RegisterMsrMtrrFix4kD0000, "MSR MTRR_FIX_4K_D0000"); 1044 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D8000, WHvX64RegisterMsrMtrrFix4kD8000, "MSR MTRR_FIX_4K_D8000"); 1045 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E0000, WHvX64RegisterMsrMtrrFix4kE0000, "MSR MTRR_FIX_4K_E0000"); 1046 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E8000, WHvX64RegisterMsrMtrrFix4kE8000, "MSR MTRR_FIX_4K_E8000"); 1047 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F0000, WHvX64RegisterMsrMtrrFix4kF0000, "MSR MTRR_FIX_4K_F0000"); 1048 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F8000, WHvX64RegisterMsrMtrrFix4kF8000, "MSR MTRR_FIX_4K_F8000"); 1049 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */ 1050 } 1045 1051 } 1046 1052 -
trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp
r92194 r92465 1702 1702 iReg++; 1703 1703 } 1704 if (fWhat & CPUMCTX_EXTRN_TSC_AUX) 1705 { 1706 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1707 pInput->Elements[iReg].Name = HvX64RegisterTscAux; 1708 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.TscAux; 1709 iReg++; 1710 } 1704 1711 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS) 1705 1712 { … … 1771 1778 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF8000; 1772 1779 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F8000; 1773 iReg++;1774 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);1775 pInput->Elements[iReg].Name = HvX64RegisterTscAux;1776 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.TscAux;1777 1780 iReg++; 1778 1781 -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp
r92456 r92465 2920 2920 2921 2921 VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam, 2922 void *pvMmio2, uint8_t *pu2State )2922 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange) 2923 2923 { 2924 2924 RT_NOREF(pVM); 2925 2925 2926 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p \n",2927 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State ));2926 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n", 2927 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange)); 2928 2928 2929 2929 int rc = VINF_SUCCESS; … … 2987 2987 2988 2988 VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags, 2989 uint8_t *pu2State )2990 { 2991 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags );2989 uint8_t *pu2State, uint32_t *puNemRange) 2990 { 2991 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange); 2992 2992 2993 2993 Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags)); 2994 *pu2State = UINT8_MAX; 2994 *pu2State = UINT8_MAX; 2995 *puNemRange = 0; 2995 2996 return VINF_SUCCESS; 2996 2997 } … … 2998 2999 2999 3000 VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, 3000 uint32_t fFlags, uint8_t *pu2State )3001 { 3002 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p \n",3003 GCPhys, cb, pvPages, fFlags, pu2State ));3001 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange) 3002 { 3003 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n", 3004 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange)); 3004 3005 *pu2State = UINT8_MAX; 3005 3006 … … 3018 3019 return VERR_NEM_MAP_PAGES_FAILED; 3019 3020 } 3020 RT_NOREF(pVM, fFlags );3021 RT_NOREF(pVM, fFlags, puNemRange); 3021 3022 return VINF_SUCCESS; 3022 3023 #else 3023 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags );3024 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange); 3024 3025 return VERR_NEM_MAP_PAGES_FAILED; 3025 3026 #endif -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-linux.cpp
r92445 r92465 126 126 CAP_ENTRY__L(KVM_CAP_INTERNAL_ERROR_DATA), /* 40 */ 127 127 #ifdef __KVM_HAVE_VCPU_EVENTS 128 CAP_ENTRY__L(KVM_CAP_VCPU_EVENTS), 128 CAP_ENTRY_ML(KVM_CAP_VCPU_EVENTS), 129 #else 130 CAP_ENTRY_MU(41), 129 131 #endif 130 132 CAP_ENTRY__L(KVM_CAP_S390_PSW), … … 144 146 CAP_ENTRY__L(KVM_CAP_ENABLE_CAP), 145 147 #ifdef __KVM_HAVE_XSAVE 146 CAP_ENTRY__L(KVM_CAP_XSAVE), 148 CAP_ENTRY_ML(KVM_CAP_XSAVE), 149 #else 150 CAP_ENTRY_MU(55), 147 151 #endif 148 152 #ifdef __KVM_HAVE_XCRS 149 CAP_ENTRY__L(KVM_CAP_XCRS), 153 CAP_ENTRY_ML(KVM_CAP_XCRS), 154 #else 155 CAP_ENTRY_MU(56), 150 156 #endif 151 157 CAP_ENTRY__L(KVM_CAP_PPC_GET_PVINFO), … … 280 286 CAP_ENTRY__L(KVM_CAP_S390_DIAG318), 281 287 CAP_ENTRY__L(KVM_CAP_STEAL_TIME), 282 CAP_ENTRY_ _L(KVM_CAP_X86_USER_SPACE_MSR),283 CAP_ENTRY_ _L(KVM_CAP_X86_MSR_FILTER),288 CAP_ENTRY_ML(KVM_CAP_X86_USER_SPACE_MSR), /* (since 5.10) */ 289 CAP_ENTRY_ML(KVM_CAP_X86_MSR_FILTER), 284 290 CAP_ENTRY__L(KVM_CAP_ENFORCE_PV_FEATURE_CPUID), /* 190 */ 285 291 CAP_ENTRY__L(KVM_CAP_SYS_HYPERV_CPUID), … … 378 384 rcRet = RTERRINFO_LOG_REL_ADD_F(pErrInfo, VERR_NEM_INIT_FAILED, "Odd KVM_GET_VCPU_MMAP_SIZE value: %#x (%d)", rc, rc); 379 385 386 /* 387 * Init the slot ID bitmap. 388 */ 389 ASMBitSet(&pVM->nem.s.bmSlotIds[0], 0); /* don't use slot 0 */ 390 if (pVM->nem.s.cMaxMemSlots < _32K) 391 ASMBitSetRange(&pVM->nem.s.bmSlotIds[0], pVM->nem.s.cMaxMemSlots, _32K); 392 ASMBitSet(&pVM->nem.s.bmSlotIds[0], _32K - 1); /* don't use the last slot */ 393 380 394 return rcRet; 381 395 } … … 411 425 if ((void *)pVCpu->nem.s.pRun == MAP_FAILED) 412 426 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "mmap failed for VCpu #%u: %d", idCpu, errno); 427 428 /* We want all x86 registers and events on each exit. */ 429 pVCpu->nem.s.pRun->kvm_valid_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS | KVM_SYNC_X86_EVENTS; 413 430 } 414 431 return VINF_SUCCESS; … … 553 570 /** @todo */ 554 571 555 return V ERR_NOT_IMPLEMENTED;572 return VINF_SUCCESS; 556 573 } 557 574 … … 566 583 int nemR3NativeTerm(PVM pVM) 567 584 { 568 RT_NOREF(pVM); 585 /* 586 * Per-cpu data 587 */ 588 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 589 { 590 PVMCPU pVCpu = pVM->apCpusR3[idCpu]; 591 592 if (pVCpu->nem.s.fdVCpu != -1) 593 { 594 close(pVCpu->nem.s.fdVCpu); 595 pVCpu->nem.s.fdVCpu = -1; 596 } 597 if (pVCpu->nem.s.pRun) 598 { 599 munmap(pVCpu->nem.s.pRun, pVM->nem.s.cbVCpuMmap); 600 pVCpu->nem.s.pRun = NULL; 601 } 602 } 603 604 /* 605 * Global data. 606 */ 607 if (pVM->nem.s.fdVm != -1) 608 { 609 close(pVM->nem.s.fdVm); 610 pVM->nem.s.fdVm = -1; 611 } 612 613 if (pVM->nem.s.fdKvm != -1) 614 { 615 close(pVM->nem.s.fdKvm); 616 pVM->nem.s.fdKvm = -1; 617 } 569 618 return VINF_SUCCESS; 570 619 } … … 595 644 596 645 597 VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu) 598 { 599 RT_NOREF(pVM, pVCpu); 646 /********************************************************************************************************************************* 647 * Memory management * 648 *********************************************************************************************************************************/ 649 650 651 /** 652 * Allocates a memory slot ID. 653 * 654 * @returns Slot ID on success, UINT16_MAX on failure. 655 */ 656 static uint16_t nemR3LnxMemSlotIdAlloc(PVM pVM) 657 { 658 /* Use the hint first. */ 659 uint16_t idHint = pVM->nem.s.idPrevSlot; 660 if (idHint < _32K - 1) 661 { 662 int32_t idx = ASMBitNextClear(&pVM->nem.s.bmSlotIds, _32K, idHint); 663 Assert(idx < _32K); 664 if (idx > 0 && !ASMAtomicBitTestAndSet(&pVM->nem.s.bmSlotIds, idx)) 665 return pVM->nem.s.idPrevSlot = (uint16_t)idx; 666 } 667 668 /* 669 * Search the whole map from the start. 670 */ 671 int32_t idx = ASMBitFirstClear(&pVM->nem.s.bmSlotIds, _32K); 672 Assert(idx < _32K); 673 if (idx > 0 && !ASMAtomicBitTestAndSet(&pVM->nem.s.bmSlotIds, idx)) 674 return pVM->nem.s.idPrevSlot = (uint16_t)idx; 675 676 Assert(idx < 0 /*shouldn't trigger unless there is a race */); 677 return UINT16_MAX; /* caller is expected to assert. */ 678 } 679 680 681 /** 682 * Frees a memory slot ID 683 */ 684 static void nemR3LnxMemSlotIdFree(PVM pVM, uint16_t idSlot) 685 { 686 if (RT_LIKELY(idSlot < _32K && ASMAtomicBitTestAndClear(&pVM->nem.s.bmSlotIds, idSlot))) 687 { /*likely*/ } 688 else 689 AssertMsgFailed(("idSlot=%u (%#x)\n", idSlot, idSlot)); 690 } 691 692 693 694 VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3, 695 uint8_t *pu2State, uint32_t *puNemRange) 696 { 697 uint16_t idSlot = nemR3LnxMemSlotIdAlloc(pVM); 698 AssertLogRelReturn(idSlot < _32K, VERR_NEM_MAP_PAGES_FAILED); 699 700 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p pu2State=%p (%d) puNemRange=%p (%d) - idSlot=%#x\n", 701 GCPhys, cb, pvR3, pu2State, pu2State, puNemRange, *puNemRange, idSlot)); 702 703 struct kvm_userspace_memory_region Region; 704 Region.slot = idSlot; 705 Region.flags = 0; 706 Region.guest_phys_addr = GCPhys; 707 Region.memory_size = cb; 708 Region.userspace_addr = (uintptr_t)pvR3; 709 710 int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region); 711 if (rc == 0) 712 { 713 *pu2State = 0; 714 *puNemRange = idSlot; 715 return VINF_SUCCESS; 716 } 717 718 LogRel(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p, idSlot=%#x failed: %u/%u\n", GCPhys, cb, pvR3, idSlot, rc, errno)); 719 nemR3LnxMemSlotIdFree(pVM, idSlot); 720 return VERR_NEM_MAP_PAGES_FAILED; 721 } 722 723 724 VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM) 725 { 726 RT_NOREF(pVM); 727 return true; 728 } 729 730 731 VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, 732 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange) 733 { 734 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d) puNemRange=%p (%#x)\n", 735 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State, puNemRange, puNemRange ? *puNemRange : UINT32_MAX)); 736 RT_NOREF(pvRam); 737 738 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) 739 { 740 /** @todo implement splitting and whatnot of ranges if we want to be 100% 741 * conforming (just modify RAM registrations in MM.cpp to test). */ 742 AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p\n", GCPhys, cb, fFlags, pvRam, pvMmio2), 743 VERR_NEM_MAP_PAGES_FAILED); 744 } 745 746 /* 747 * Register MMIO2. 748 */ 749 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2) 750 { 751 AssertReturn(pvMmio2, VERR_NEM_MAP_PAGES_FAILED); 752 AssertReturn(puNemRange, VERR_NEM_MAP_PAGES_FAILED); 753 754 uint16_t idSlot = nemR3LnxMemSlotIdAlloc(pVM); 755 AssertLogRelReturn(idSlot < _32K, VERR_NEM_MAP_PAGES_FAILED); 756 757 struct kvm_userspace_memory_region Region; 758 Region.slot = idSlot; 759 Region.flags = fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES ? KVM_MEM_LOG_DIRTY_PAGES : 0; 760 Region.guest_phys_addr = GCPhys; 761 Region.memory_size = cb; 762 Region.userspace_addr = (uintptr_t)pvMmio2; 763 764 int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region); 765 if (rc == 0) 766 { 767 *pu2State = 0; 768 *puNemRange = idSlot; 769 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvMmio2=%p - idSlot=%#x\n", 770 GCPhys, cb, fFlags, pvMmio2, idSlot)); 771 return VINF_SUCCESS; 772 } 773 774 nemR3LnxMemSlotIdFree(pVM, idSlot); 775 AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x, pvMmio2=%p, idSlot=%#x failed: %u/%u\n", 776 GCPhys, cb, fFlags, pvMmio2, idSlot, errno, rc), 777 VERR_NEM_MAP_PAGES_FAILED); 778 } 779 780 /* MMIO, don't care. */ 781 *pu2State = 0; 782 *puNemRange = UINT32_MAX; 783 return VINF_SUCCESS; 784 } 785 786 787 VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, 788 void *pvRam, void *pvMmio2, uint32_t *puNemRange) 789 { 790 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange); 791 return VINF_SUCCESS; 792 } 793 794 795 VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam, 796 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange) 797 { 798 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p puNemRange=%p (%#x)\n", 799 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange)); 800 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State); 801 802 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) 803 { 804 /** @todo implement splitting and whatnot of ranges if we want to be 100% 805 * conforming (just modify RAM registrations in MM.cpp to test). */ 806 AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p\n", GCPhys, cb, fFlags, pvRam, pvMmio2), 807 VERR_NEM_UNMAP_PAGES_FAILED); 808 } 809 810 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2) 811 { 812 uint32_t const idSlot = *puNemRange; 813 AssertReturn(idSlot > 0 && idSlot < _32K, VERR_NEM_IPE_4); 814 AssertReturn(ASMBitTest(pVM->nem.s.bmSlotIds, idSlot), VERR_NEM_IPE_4); 815 816 struct kvm_userspace_memory_region Region; 817 Region.slot = idSlot; 818 Region.flags = 0; 819 Region.guest_phys_addr = GCPhys; 820 Region.memory_size = 0; /* this deregisters it. */ 821 Region.userspace_addr = (uintptr_t)pvMmio2; 822 823 int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region); 824 if (rc == 0) 825 { 826 if (pu2State) 827 *pu2State = 0; 828 *puNemRange = UINT32_MAX; 829 nemR3LnxMemSlotIdFree(pVM, idSlot); 830 return VINF_SUCCESS; 831 } 832 833 AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x, pvMmio2=%p, idSlot=%#x failed: %u/%u\n", 834 GCPhys, cb, fFlags, pvMmio2, idSlot, errno, rc), 835 VERR_NEM_UNMAP_PAGES_FAILED); 836 } 837 838 if (pu2State) 839 *pu2State = UINT8_MAX; 840 return VINF_SUCCESS; 841 } 842 843 844 VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange, 845 void *pvBitmap, size_t cbBitmap) 846 { 847 AssertReturn(uNemRange > 0 && uNemRange < _32K, VERR_NEM_IPE_4); 848 AssertReturn(ASMBitTest(pVM->nem.s.bmSlotIds, uNemRange), VERR_NEM_IPE_4); 849 850 RT_NOREF(GCPhys, cbBitmap); 851 852 struct kvm_dirty_log DirtyLog; 853 DirtyLog.slot = uNemRange; 854 DirtyLog.padding1 = 0; 855 DirtyLog.dirty_bitmap = pvBitmap; 856 857 int rc = ioctl(pVM->nem.s.fdVm, KVM_GET_DIRTY_LOG, &DirtyLog); 858 AssertLogRelMsgReturn(rc == 0, ("%RGp LB %RGp idSlot=%#x failed: %u/%u\n", GCPhys, cb, uNemRange, errno, rc), 859 VERR_NEM_QUERY_DIRTY_BITMAP_FAILED); 860 861 return VINF_SUCCESS; 862 } 863 864 865 VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags, 866 uint8_t *pu2State, uint32_t *puNemRange) 867 { 868 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags)); 869 *pu2State = UINT8_MAX; 870 871 /* Don't support puttint ROM where there is already RAM. For 872 now just shuffle the registrations till it works... */ 873 AssertLogRelMsgReturn(!(fFlags & NEM_NOTIFY_PHYS_ROM_F_REPLACE), ("%RGp LB %RGp fFlags=%#x\n", GCPhys, cb, fFlags), 874 VERR_NEM_MAP_PAGES_FAILED); 875 876 /** @todo figure out how to do shadow ROMs. */ 877 878 /* 879 * We only allocate a slot number here in case we need to use it to 880 * fend of physical handler fun. 881 */ 882 uint16_t idSlot = nemR3LnxMemSlotIdAlloc(pVM); 883 AssertLogRelReturn(idSlot < _32K, VERR_NEM_MAP_PAGES_FAILED); 884 885 *pu2State = 0; 886 *puNemRange = idSlot; 887 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp fFlags=%#x pvPages=%p - idSlot=%#x\n", 888 GCPhys, cb, fFlags, pvPages, idSlot)); 889 RT_NOREF(GCPhys, cb, fFlags, pvPages); 890 return VINF_SUCCESS; 891 } 892 893 894 VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, 895 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange) 896 { 897 Log5(("NEMR3NotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n", 898 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange)); 899 900 AssertPtrReturn(pvPages, VERR_NEM_IPE_5); 901 902 uint32_t const idSlot = *puNemRange; 903 AssertReturn(idSlot > 0 && idSlot < _32K, VERR_NEM_IPE_4); 904 AssertReturn(ASMBitTest(pVM->nem.s.bmSlotIds, idSlot), VERR_NEM_IPE_4); 905 906 *pu2State = UINT8_MAX; 907 908 /* 909 * Do the actual setting of the user pages here now that we've 910 * got a valid pvPages (typically isn't available during the early 911 * notification, unless we're replacing RAM). 912 */ 913 struct kvm_userspace_memory_region Region; 914 Region.slot = idSlot; 915 Region.flags = 0; 916 Region.guest_phys_addr = GCPhys; 917 Region.memory_size = cb; 918 Region.userspace_addr = (uintptr_t)pvPages; 919 920 int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region); 921 if (rc == 0) 922 { 923 *pu2State = 0; 924 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp fFlags=%#x pvPages=%p - idSlot=%#x\n", 925 GCPhys, cb, fFlags, pvPages, idSlot)); 926 return VINF_SUCCESS; 927 } 928 AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x, pvPages=%p, idSlot=%#x failed: %u/%u\n", 929 GCPhys, cb, fFlags, pvPages, idSlot, errno, rc), 930 VERR_NEM_MAP_PAGES_FAILED); 931 } 932 933 934 /** 935 * Called when the A20 state changes. 936 * 937 * @param pVCpu The CPU the A20 state changed on. 938 * @param fEnabled Whether it was enabled (true) or disabled. 939 */ 940 VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled) 941 { 942 Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled)); 943 Assert(VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM))); 944 RT_NOREF(pVCpu, fEnabled); 945 } 946 947 948 VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb, 949 RTR3PTR pvMemR3, uint8_t *pu2State) 950 { 951 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n", 952 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State)); 953 954 *pu2State = UINT8_MAX; 955 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3); 956 } 957 958 959 void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb) 960 { 961 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind)); 962 RT_NOREF(pVM, enmKind, GCPhys, cb); 963 } 964 965 966 void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld, 967 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM) 968 { 969 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n", 970 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM)); 971 RT_NOREF(pVM, enmKind, GCPhysOld, GCPhysNew, cb, fRestoreAsRAM); 972 } 973 974 975 int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt, 976 PGMPAGETYPE enmType, uint8_t *pu2State) 977 { 978 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n", 979 GCPhys, HCPhys, fPageProt, enmType, *pu2State)); 980 RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State); 981 return VINF_SUCCESS; 982 } 983 984 985 VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt, 986 PGMPAGETYPE enmType, uint8_t *pu2State) 987 { 988 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n", 989 GCPhys, HCPhys, fPageProt, enmType, *pu2State)); 990 Assert(VM_IS_NEM_ENABLED(pVM)); 991 RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State); 992 993 } 994 995 996 VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew, 997 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State) 998 { 999 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp pvNewR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n", 1000 GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, *pu2State)); 1001 Assert(VM_IS_NEM_ENABLED(pVM)); 1002 RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State); 1003 } 1004 1005 1006 /********************************************************************************************************************************* 1007 * CPU State * 1008 *********************************************************************************************************************************/ 1009 1010 /** 1011 * Worker that imports selected state from KVM. 1012 */ 1013 static int nemHCLnxImportState(PVMCPUCC pVCpu, uint64_t fWhat, struct kvm_run *pRun) 1014 { 1015 RT_NOREF(pVCpu, fWhat, pRun); 600 1016 return VERR_NOT_IMPLEMENTED; 601 1017 } 1018 1019 1020 /** 1021 * Interface for importing state on demand (used by IEM). 1022 * 1023 * @returns VBox status code. 1024 * @param pVCpu The cross context CPU structure. 1025 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 1026 */ 1027 VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat) 1028 { 1029 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand); 1030 1031 RT_NOREF(pVCpu, fWhat); 1032 return nemHCLnxImportState(pVCpu, fWhat, pVCpu->nem.s.pRun); 1033 } 1034 1035 1036 /** 1037 * Exports state to KVM. 1038 */ 1039 static int nemHCLnxExportState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, struct kvm_run *pRun) 1040 { 1041 uint64_t const fExtrn = pCtx->fExtrn; 1042 Assert((fExtrn & CPUMCTX_EXTRN_ALL) != CPUMCTX_EXTRN_ALL); 1043 1044 /* 1045 * Stuff that goes into kvm_run::s.regs.regs: 1046 */ 1047 if ( (fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_GPRS_MASK)) 1048 != (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_GPRS_MASK)) 1049 { 1050 if (!(fExtrn & CPUMCTX_EXTRN_RIP)) 1051 pRun->s.regs.regs.rip = pCtx->rip; 1052 if (!(fExtrn & CPUMCTX_EXTRN_RFLAGS)) 1053 pRun->s.regs.regs.rflags = pCtx->rflags.u; 1054 1055 if (!(fExtrn & CPUMCTX_EXTRN_RAX)) 1056 pRun->s.regs.regs.rax = pCtx->rax; 1057 if (!(fExtrn & CPUMCTX_EXTRN_RCX)) 1058 pRun->s.regs.regs.rcx = pCtx->rcx; 1059 if (!(fExtrn & CPUMCTX_EXTRN_RDX)) 1060 pRun->s.regs.regs.rdx = pCtx->rdx; 1061 if (!(fExtrn & CPUMCTX_EXTRN_RBX)) 1062 pRun->s.regs.regs.rbx = pCtx->rbx; 1063 if (!(fExtrn & CPUMCTX_EXTRN_RSP)) 1064 pRun->s.regs.regs.rsp = pCtx->rsp; 1065 if (!(fExtrn & CPUMCTX_EXTRN_RBP)) 1066 pRun->s.regs.regs.rbp = pCtx->rbp; 1067 if (!(fExtrn & CPUMCTX_EXTRN_RSI)) 1068 pRun->s.regs.regs.rsi = pCtx->rsi; 1069 if (!(fExtrn & CPUMCTX_EXTRN_RDI)) 1070 pRun->s.regs.regs.rdi = pCtx->rdi; 1071 if (!(fExtrn & CPUMCTX_EXTRN_R8_R15)) 1072 { 1073 pRun->s.regs.regs.r8 = pCtx->r8; 1074 pRun->s.regs.regs.r9 = pCtx->r9; 1075 pRun->s.regs.regs.r10 = pCtx->r10; 1076 pRun->s.regs.regs.r11 = pCtx->r11; 1077 pRun->s.regs.regs.r12 = pCtx->r12; 1078 pRun->s.regs.regs.r13 = pCtx->r13; 1079 pRun->s.regs.regs.r14 = pCtx->r14; 1080 pRun->s.regs.regs.r15 = pCtx->r15; 1081 } 1082 pRun->kvm_dirty_regs |= KVM_SYNC_X86_REGS; 1083 } 1084 1085 /* 1086 * Stuff that goes into kvm_run::s.regs.sregs: 1087 */ 1088 /** @todo apic_base */ 1089 if ( (fExtrn & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK | CPUMCTX_EXTRN_CR_MASK | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_APIC_TPR)) 1090 != (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK | CPUMCTX_EXTRN_CR_MASK | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_APIC_TPR)) 1091 { 1092 #define NEM_LNX_EXPORT_SEG(a_KvmSeg, a_CtxSeg) do { \ 1093 (a_KvmSeg).base = (a_CtxSeg).u64Base; \ 1094 (a_KvmSeg).limit = (a_CtxSeg).u32Limit; \ 1095 (a_KvmSeg).selector = (a_CtxSeg).Sel; \ 1096 (a_KvmSeg).type = (a_CtxSeg).Attr.n.u4Type; \ 1097 (a_KvmSeg).s = (a_CtxSeg).Attr.n.u1DescType; \ 1098 (a_KvmSeg).dpl = (a_CtxSeg).Attr.n.u2Dpl; \ 1099 (a_KvmSeg).present = (a_CtxSeg).Attr.n.u1Present; \ 1100 (a_KvmSeg).avl = (a_CtxSeg).Attr.n.u1Available; \ 1101 (a_KvmSeg).l = (a_CtxSeg).Attr.n.u1Long; \ 1102 (a_KvmSeg).db = (a_CtxSeg).Attr.n.u1DefBig; \ 1103 (a_KvmSeg).g = (a_CtxSeg).Attr.n.u1Granularity; \ 1104 (a_KvmSeg).unusable = (a_CtxSeg).Attr.n.u1Unusable; \ 1105 (a_KvmSeg).padding = 0; \ 1106 } while (0) 1107 1108 if ((fExtrn & CPUMCTX_EXTRN_SREG_MASK) != CPUMCTX_EXTRN_SREG_MASK) 1109 { 1110 if (!(fExtrn & CPUMCTX_EXTRN_ES)) 1111 NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.es, pCtx->es); 1112 if (!(fExtrn & CPUMCTX_EXTRN_CS)) 1113 NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.cs, pCtx->cs); 1114 if (!(fExtrn & CPUMCTX_EXTRN_SS)) 1115 NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.ss, pCtx->ss); 1116 if (!(fExtrn & CPUMCTX_EXTRN_DS)) 1117 NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.ds, pCtx->ds); 1118 if (!(fExtrn & CPUMCTX_EXTRN_FS)) 1119 NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.fs, pCtx->fs); 1120 if (!(fExtrn & CPUMCTX_EXTRN_GS)) 1121 NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.gs, pCtx->gs); 1122 } 1123 if ((fExtrn & CPUMCTX_EXTRN_TABLE_MASK) != CPUMCTX_EXTRN_TABLE_MASK) 1124 { 1125 if (!(fExtrn & CPUMCTX_EXTRN_GDTR)) 1126 { 1127 pRun->s.regs.sregs.gdt.base = pCtx->gdtr.pGdt; 1128 pRun->s.regs.sregs.gdt.limit = pCtx->gdtr.cbGdt; 1129 pRun->s.regs.sregs.gdt.padding[0] = 0; 1130 pRun->s.regs.sregs.gdt.padding[1] = 0; 1131 pRun->s.regs.sregs.gdt.padding[2] = 0; 1132 } 1133 if (!(fExtrn & CPUMCTX_EXTRN_IDTR)) 1134 { 1135 pRun->s.regs.sregs.idt.base = pCtx->idtr.pIdt; 1136 pRun->s.regs.sregs.idt.limit = pCtx->idtr.cbIdt; 1137 pRun->s.regs.sregs.idt.padding[0] = 0; 1138 pRun->s.regs.sregs.idt.padding[1] = 0; 1139 pRun->s.regs.sregs.idt.padding[2] = 0; 1140 } 1141 if (!(fExtrn & CPUMCTX_EXTRN_LDTR)) 1142 NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.ldt, pCtx->ldtr); 1143 if (!(fExtrn & CPUMCTX_EXTRN_TR)) 1144 NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.tr, pCtx->tr); 1145 } 1146 if ((fExtrn & CPUMCTX_EXTRN_CR_MASK) != CPUMCTX_EXTRN_CR_MASK) 1147 { 1148 if (!(fExtrn & CPUMCTX_EXTRN_CR0)) 1149 pRun->s.regs.sregs.cr0 = pCtx->cr0; 1150 if (!(fExtrn & CPUMCTX_EXTRN_CR2)) 1151 pRun->s.regs.sregs.cr2 = pCtx->cr2; 1152 if (!(fExtrn & CPUMCTX_EXTRN_CR3)) 1153 pRun->s.regs.sregs.cr3 = pCtx->cr3; 1154 if (!(fExtrn & CPUMCTX_EXTRN_CR4)) 1155 pRun->s.regs.sregs.cr4 = pCtx->cr4; 1156 } 1157 if (!(fExtrn & CPUMCTX_EXTRN_APIC_TPR)) 1158 pRun->s.regs.sregs.cr8 = CPUMGetGuestCR8(pVCpu); 1159 if (!(fExtrn & CPUMCTX_EXTRN_EFER)) 1160 pRun->s.regs.sregs.efer = pCtx->msrEFER; 1161 1162 /** @todo apic_base */ 1163 /** @todo interrupt_bitmap - IRQ injection? */ 1164 pRun->kvm_dirty_regs |= KVM_SYNC_X86_SREGS; 1165 } 1166 1167 /* 1168 * Debug registers. 1169 */ 1170 if ((fExtrn & CPUMCTX_EXTRN_DR_MASK) != CPUMCTX_EXTRN_DR_MASK) 1171 { 1172 struct kvm_debugregs DbgRegs = {{0}}; 1173 1174 if (fExtrn & CPUMCTX_EXTRN_DR_MASK) 1175 { 1176 /* Partial debug state, we must get DbgRegs first so we can merge: */ 1177 int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_DEBUGREGS, &DbgRegs); 1178 AssertMsgReturn(rc == 0, ("rc=%d errno=%d\n", rc, errno), VERR_NEM_IPE_3); 1179 } 1180 1181 if (!(fExtrn & CPUMCTX_EXTRN_DR0_DR3)) 1182 { 1183 DbgRegs.db[0] = pCtx->dr[0]; 1184 DbgRegs.db[1] = pCtx->dr[1]; 1185 DbgRegs.db[2] = pCtx->dr[2]; 1186 DbgRegs.db[3] = pCtx->dr[3]; 1187 } 1188 if (!(fExtrn & CPUMCTX_EXTRN_DR6)) 1189 DbgRegs.dr6 = pCtx->dr[6]; 1190 if (!(fExtrn & CPUMCTX_EXTRN_DR7)) 1191 DbgRegs.dr7 = pCtx->dr[7]; 1192 1193 int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_SET_DEBUGREGS, &DbgRegs); 1194 AssertMsgReturn(rc == 0, ("rc=%d errno=%d\n", rc, errno), VERR_NEM_IPE_3); 1195 } 1196 1197 /* 1198 * FPU, SSE, AVX, ++. 1199 */ 1200 if ( (fExtrn & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx)) 1201 != (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx)) 1202 { 1203 if ( (fExtrn & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE)) 1204 != (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE)) 1205 { 1206 if (fExtrn & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE)) 1207 { 1208 /* Partial state is annoying as we have to do merging - is this possible at all? */ 1209 struct kvm_xsave XSave; 1210 int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_XSAVE, &XSave); 1211 AssertMsgReturn(rc == 0, ("rc=%d errno=%d\n", rc, errno), VERR_NEM_IPE_3); 1212 1213 if (!(fExtrn & CPUMCTX_EXTRN_X87)) 1214 memcpy(&pCtx->XState.x87, &XSave, sizeof(pCtx->XState.x87)); 1215 if (!(fExtrn & CPUMCTX_EXTRN_SSE_AVX)) 1216 { 1217 /** @todo */ 1218 } 1219 if (!(fExtrn & CPUMCTX_EXTRN_OTHER_XSAVE)) 1220 { 1221 /** @todo */ 1222 } 1223 } 1224 1225 int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_SET_XSAVE, &pCtx->XState); 1226 AssertMsgReturn(rc == 0, ("rc=%d errno=%d\n", rc, errno), VERR_NEM_IPE_3); 1227 } 1228 1229 if (!(fExtrn & CPUMCTX_EXTRN_XCRx)) 1230 { 1231 struct kvm_xcrs Xcrs = 1232 { /*.nr_xcrs = */ 2, 1233 /*.flags = */ 0, 1234 /*.xcrs= */ { 1235 { /*.xcr =*/ 0, /*.reserved=*/ 0, /*.value=*/ pCtx->aXcr[0] }, 1236 { /*.xcr =*/ 1, /*.reserved=*/ 0, /*.value=*/ pCtx->aXcr[1] }, 1237 } 1238 }; 1239 1240 int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_SET_XCRS, &Xcrs); 1241 AssertMsgReturn(rc == 0, ("rc=%d errno=%d\n", rc, errno), VERR_NEM_IPE_3); 1242 } 1243 } 1244 1245 /* 1246 * MSRs. 1247 */ 1248 if ( (fExtrn & (CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_SYSENTER_MSRS | CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS)) 1249 != (CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_SYSENTER_MSRS | CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS)) 1250 { 1251 union 1252 { 1253 struct kvm_msrs Core; 1254 uint64_t padding[2 + sizeof(struct kvm_msr_entry) * 32]; 1255 } uBuf; 1256 uint32_t iMsr = 0; 1257 PCPUMCTXMSRS const pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu); 1258 1259 #define ADD_MSR(a_Msr, a_uValue) do { \ 1260 Assert(iMsr < 32); \ 1261 uBuf.Core.entries[iMsr].index = (a_Msr); \ 1262 uBuf.Core.entries[iMsr].reserved = 0; \ 1263 uBuf.Core.entries[iMsr].data = (a_uValue); \ 1264 iMsr += 1; \ 1265 } while (0) 1266 1267 if (!(fExtrn & CPUMCTX_EXTRN_KERNEL_GS_BASE)) 1268 ADD_MSR(MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE); 1269 if (!(fExtrn & CPUMCTX_EXTRN_SYSCALL_MSRS)) 1270 { 1271 ADD_MSR(MSR_K6_STAR, pCtx->msrSTAR); 1272 ADD_MSR(MSR_K8_LSTAR, pCtx->msrLSTAR); 1273 ADD_MSR(MSR_K8_CSTAR, pCtx->msrCSTAR); 1274 ADD_MSR(MSR_K8_SF_MASK, pCtx->msrSFMASK); 1275 } 1276 if (!(fExtrn & CPUMCTX_EXTRN_SYSENTER_MSRS)) 1277 { 1278 ADD_MSR(MSR_IA32_SYSENTER_CS, pCtx->SysEnter.cs); 1279 ADD_MSR(MSR_IA32_SYSENTER_EIP, pCtx->SysEnter.eip); 1280 ADD_MSR(MSR_IA32_SYSENTER_ESP, pCtx->SysEnter.esp); 1281 } 1282 if (!(fExtrn & CPUMCTX_EXTRN_TSC_AUX)) 1283 ADD_MSR(MSR_K8_TSC_AUX, pCtxMsrs->msr.TscAux); 1284 if (!(fExtrn & CPUMCTX_EXTRN_OTHER_MSRS)) 1285 { 1286 ADD_MSR(MSR_IA32_CR_PAT, pCtx->msrPAT); 1287 /** @todo What do we _have_ to add here? 1288 * We also have: Mttr*, MiscEnable, FeatureControl. */ 1289 } 1290 1291 uBuf.Core.pad = 0; 1292 uBuf.Core.nmsrs = iMsr; 1293 int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_SET_MSRS, &uBuf); 1294 AssertMsgReturn(rc == (int)iMsr, 1295 ("rc=%d iMsr=%d (->%#x) errno=%d\n", 1296 rc, iMsr, (uint32_t)rc < iMsr ? uBuf.Core.entries[rc].index : 0, errno), 1297 VERR_NEM_IPE_3); 1298 } 1299 1300 /* 1301 * KVM now owns all the state. 1302 */ 1303 pCtx->fExtrn = (fExtrn & ~CPUMCTX_EXTRN_KEEPER_MASK) | CPUMCTX_EXTRN_KEEPER_NEM | CPUMCTX_EXTRN_ALL; 1304 1305 RT_NOREF(pVM); 1306 return VINF_SUCCESS; 1307 } 1308 1309 1310 /** 1311 * Query the CPU tick counter and optionally the TSC_AUX MSR value. 1312 * 1313 * @returns VBox status code. 1314 * @param pVCpu The cross context CPU structure. 1315 * @param pcTicks Where to return the CPU tick count. 1316 * @param puAux Where to return the TSC_AUX register value. 1317 */ 1318 VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux) 1319 { 1320 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick); 1321 // KVM_GET_CLOCK? 1322 RT_NOREF(pVCpu, pcTicks, puAux); 1323 return VINF_SUCCESS; 1324 } 1325 1326 1327 /** 1328 * Resumes CPU clock (TSC) on all virtual CPUs. 1329 * 1330 * This is called by TM when the VM is started, restored, resumed or similar. 1331 * 1332 * @returns VBox status code. 1333 * @param pVM The cross context VM structure. 1334 * @param pVCpu The cross context CPU structure of the calling EMT. 1335 * @param uPausedTscValue The TSC value at the time of pausing. 1336 */ 1337 VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue) 1338 { 1339 // KVM_SET_CLOCK? 1340 RT_NOREF(pVM, pVCpu, uPausedTscValue); 1341 return VINF_SUCCESS; 1342 } 1343 1344 1345 VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM) 1346 { 1347 RT_NOREF(pVM); 1348 return NEM_FEAT_F_NESTED_PAGING 1349 | NEM_FEAT_F_FULL_GST_EXEC 1350 | NEM_FEAT_F_XSAVE_XRSTOR; 1351 } 1352 1353 1354 1355 /********************************************************************************************************************************* 1356 * Execution * 1357 *********************************************************************************************************************************/ 602 1358 603 1359 … … 637 1393 638 1394 639 VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3, 640 uint8_t *pu2State, uint32_t *puNemRange) 641 { 642 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p pu2State=%p (%d) puNemRange=%p (%d)\n", 643 GCPhys, cb, pvR3, pu2State, pu2State, puNemRange, *puNemRange)); 644 *pu2State = UINT8_MAX; 645 RT_NOREF(pVM, GCPhys, cb, pvR3, puNemRange); 1395 static VBOXSTRICTRC nemHCLnxHandleInterruptFF(PVM pVM, PVMCPU pVCpu) 1396 { 1397 RT_NOREF(pVM, pVCpu); 646 1398 return VINF_SUCCESS; 647 1399 } 648 1400 649 1401 650 VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM) 651 { 652 RT_NOREF(pVM); 653 return false; 654 } 655 656 657 VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, 658 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange) 659 { 660 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d) puNemRange=%p (%#x)\n", 661 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State, puNemRange, puNemRange ? *puNemRange : UINT32_MAX)); 662 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange); 663 *pu2State = UINT8_MAX; 664 return VINF_SUCCESS; 665 } 666 667 668 VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, 669 void *pvRam, void *pvMmio2, uint32_t *puNemRange) 670 { 671 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange); 672 return VINF_SUCCESS; 673 } 674 675 676 VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam, 677 void *pvMmio2, uint8_t *pu2State) 678 { 679 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p\n", 680 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State)); 681 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State); 682 if (pu2State) 683 *pu2State = UINT8_MAX; 684 return VINF_SUCCESS; 685 } 686 687 688 VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange, 689 void *pvBitmap, size_t cbBitmap) 690 { 691 RT_NOREF(pVM, GCPhys, cb, uNemRange, pvBitmap, cbBitmap); 692 AssertFailed(); 1402 static VBOXSTRICTRC nemHCLnxHandleExitIo(PVMCC pVM, PVMCPUCC pVCpu, struct kvm_run *pRun) 1403 { 1404 /* 1405 * Input validation. 1406 */ 1407 Assert(pRun->io.count > 0); 1408 Assert(pRun->io.size == 1 || pRun->io.size == 2 || pRun->io.size == 4); 1409 Assert(pRun->io.direction == KVM_EXIT_IO_IN || pRun->io.direction == KVM_EXIT_IO_OUT); 1410 Assert(pRun->io.data_offset < pVM->nem.s.cbVCpuMmap); 1411 Assert(pRun->io.data_offset + pRun->io.size * pRun->io.count <= pVM->nem.s.cbVCpuMmap); 1412 1413 /* 1414 * Do the requested job. 1415 */ 1416 VBOXSTRICTRC rcStrict; 1417 RTPTRUNION uPtrData; 1418 uPtrData.pu8 = (uint8_t *)pRun + pRun->io.data_offset; 1419 if (pRun->io.count == 1) 1420 { 1421 if (pRun->io.direction == KVM_EXIT_IO_IN) 1422 { 1423 uint32_t uValue = 0; 1424 rcStrict = IOMIOPortRead(pVM, pVCpu, pRun->io.port, &uValue, pRun->io.size); 1425 Log4(("IOExit/%u: %04x:%08RX64: IN %#x LB %u -> %#x, rcStrict=%Rrc\n", 1426 pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip, 1427 pRun->io.port, pRun->io.size, uValue, VBOXSTRICTRC_VAL(rcStrict) )); 1428 if (IOM_SUCCESS(rcStrict)) 1429 { 1430 if (pRun->io.size == 4) 1431 *uPtrData.pu32 = uValue; 1432 else if (pRun->io.size == 2) 1433 *uPtrData.pu16 = (uint16_t)uValue; 1434 else 1435 *uPtrData.pu8 = (uint8_t)uValue; 1436 } 1437 } 1438 else 1439 { 1440 uint32_t const uValue = pRun->io.size == 4 ? *uPtrData.pu32 1441 : pRun->io.size == 2 ? *uPtrData.pu16 1442 : *uPtrData.pu8; 1443 rcStrict = IOMIOPortWrite(pVM, pVCpu, pRun->io.port, uValue, pRun->io.size); 1444 Log4(("IOExit/%u: %04x:%08RX64: OUT %#x, %#x LB %u rcStrict=%Rrc\n", 1445 pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip, 1446 pRun->io.port, uValue, pRun->io.size, VBOXSTRICTRC_VAL(rcStrict) )); 1447 } 1448 } 1449 else 1450 { 1451 uint32_t cTransfers = pRun->io.count; 1452 if (pRun->io.direction == KVM_EXIT_IO_IN) 1453 { 1454 rcStrict = IOMIOPortReadString(pVM, pVCpu, pRun->io.port, uPtrData.pv, &cTransfers, pRun->io.size); 1455 Log4(("IOExit/%u: %04x:%08RX64: REP INS %#x LB %u * %#x times -> rcStrict=%Rrc cTransfers=%d\n", 1456 pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip, 1457 pRun->io.port, pRun->io.size, pRun->io.count, VBOXSTRICTRC_VAL(rcStrict), cTransfers )); 1458 } 1459 else 1460 { 1461 rcStrict = IOMIOPortWriteString(pVM, pVCpu, pRun->io.port, uPtrData.pv, &cTransfers, pRun->io.size); 1462 Log4(("IOExit/%u: %04x:%08RX64: REP OUTS %#x LB %u * %#x times -> rcStrict=%Rrc cTransfers=%d\n", 1463 pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip, 1464 pRun->io.port, pRun->io.size, pRun->io.count, VBOXSTRICTRC_VAL(rcStrict), cTransfers )); 1465 } 1466 Assert(cTransfers == 0); 1467 } 1468 return rcStrict; 1469 } 1470 1471 1472 static VBOXSTRICTRC nemHCLnxHandleExit(PVMCC pVM, PVMCPUCC pVCpu, struct kvm_run *pRun) 1473 { 1474 switch (pRun->exit_reason) 1475 { 1476 case KVM_EXIT_EXCEPTION: 1477 AssertFailed(); 1478 break; 1479 1480 case KVM_EXIT_IO: 1481 return nemHCLnxHandleExitIo(pVM, pVCpu, pRun); 1482 1483 case KVM_EXIT_HYPERCALL: 1484 AssertFailed(); 1485 break; 1486 1487 case KVM_EXIT_DEBUG: 1488 AssertFailed(); 1489 break; 1490 1491 case KVM_EXIT_HLT: 1492 AssertFailed(); 1493 break; 1494 1495 case KVM_EXIT_MMIO: 1496 AssertFailed(); 1497 break; 1498 1499 case KVM_EXIT_IRQ_WINDOW_OPEN: 1500 AssertFailed(); 1501 break; 1502 1503 case KVM_EXIT_X86_RDMSR: 1504 AssertFailed(); 1505 break; 1506 1507 case KVM_EXIT_X86_WRMSR: 1508 AssertFailed(); 1509 break; 1510 1511 case KVM_EXIT_INTR: /* EINTR */ 1512 return VINF_SUCCESS; 1513 1514 case KVM_EXIT_SET_TPR: 1515 AssertFailed(); 1516 break; 1517 case KVM_EXIT_TPR_ACCESS: 1518 AssertFailed(); 1519 break; 1520 case KVM_EXIT_NMI: 1521 AssertFailed(); 1522 break; 1523 1524 case KVM_EXIT_SYSTEM_EVENT: 1525 AssertFailed(); 1526 break; 1527 case KVM_EXIT_IOAPIC_EOI: 1528 AssertFailed(); 1529 break; 1530 case KVM_EXIT_HYPERV: 1531 AssertFailed(); 1532 break; 1533 1534 case KVM_EXIT_DIRTY_RING_FULL: 1535 AssertFailed(); 1536 break; 1537 case KVM_EXIT_AP_RESET_HOLD: 1538 AssertFailed(); 1539 break; 1540 case KVM_EXIT_X86_BUS_LOCK: 1541 AssertFailed(); 1542 break; 1543 1544 1545 case KVM_EXIT_SHUTDOWN: 1546 AssertFailed(); 1547 break; 1548 1549 case KVM_EXIT_FAIL_ENTRY: 1550 AssertFailed(); 1551 break; 1552 case KVM_EXIT_INTERNAL_ERROR: 1553 AssertFailed(); 1554 break; 1555 1556 /* 1557 * Foreign and unknowns. 1558 */ 1559 case KVM_EXIT_EPR: 1560 AssertLogRelMsgFailedReturn(("KVM_EXIT_EPR on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1); 1561 case KVM_EXIT_WATCHDOG: 1562 AssertLogRelMsgFailedReturn(("KVM_EXIT_WATCHDOG on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1); 1563 case KVM_EXIT_ARM_NISV: 1564 AssertLogRelMsgFailedReturn(("KVM_EXIT_ARM_NISV on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1); 1565 case KVM_EXIT_S390_STSI: 1566 AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_STSI on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1); 1567 case KVM_EXIT_S390_TSCH: 1568 AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_TSCH on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1); 1569 case KVM_EXIT_OSI: 1570 AssertLogRelMsgFailedReturn(("KVM_EXIT_OSI on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1); 1571 case KVM_EXIT_PAPR_HCALL: 1572 AssertLogRelMsgFailedReturn(("KVM_EXIT_PAPR_HCALL on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1); 1573 case KVM_EXIT_S390_UCONTROL: 1574 AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_UCONTROL on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1); 1575 case KVM_EXIT_DCR: 1576 AssertLogRelMsgFailedReturn(("KVM_EXIT_DCR on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1); 1577 case KVM_EXIT_S390_SIEIC: 1578 AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_SIEIC on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1); 1579 case KVM_EXIT_S390_RESET: 1580 AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_RESET on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1); 1581 case KVM_EXIT_UNKNOWN: 1582 AssertLogRelMsgFailedReturn(("KVM_EXIT_UNKNOWN on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1); 1583 case KVM_EXIT_XEN: 1584 AssertLogRelMsgFailedReturn(("KVM_EXIT_XEN on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1); 1585 default: 1586 AssertLogRelMsgFailedReturn(("Unknown exit reason %u on VCpu #%u at %04x:%RX64!\n", pRun->exit_reason, pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1); 1587 } 1588 1589 RT_NOREF(pVM, pVCpu, pRun); 693 1590 return VERR_NOT_IMPLEMENTED; 694 1591 } 695 1592 696 1593 697 VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags, 698 uint8_t *pu2State) 699 { 700 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags)); 701 *pu2State = UINT8_MAX; 702 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags); 703 return VINF_SUCCESS; 704 } 705 706 707 VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, 708 uint32_t fFlags, uint8_t *pu2State) 709 { 710 Log5(("NEMR3NotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p\n", 711 GCPhys, cb, pvPages, fFlags, pu2State)); 712 *pu2State = UINT8_MAX; 713 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags); 714 return VINF_SUCCESS; 715 } 716 717 718 /** 719 * Called when the A20 state changes. 720 * 721 * @param pVCpu The CPU the A20 state changed on. 722 * @param fEnabled Whether it was enabled (true) or disabled. 723 */ 724 VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled) 725 { 726 Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled)); 727 Assert(VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM))); 728 RT_NOREF(pVCpu, fEnabled); 729 } 730 731 732 /** 733 * Interface for importing state on demand (used by IEM). 734 * 735 * @returns VBox status code. 736 * @param pVCpu The cross context CPU structure. 737 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 738 */ 739 VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat) 740 { 741 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand); 742 743 RT_NOREF(pVCpu, fWhat); 744 return VERR_NOT_IMPLEMENTED; 745 } 746 747 748 /** 749 * Query the CPU tick counter and optionally the TSC_AUX MSR value. 750 * 751 * @returns VBox status code. 752 * @param pVCpu The cross context CPU structure. 753 * @param pcTicks Where to return the CPU tick count. 754 * @param puAux Where to return the TSC_AUX register value. 755 */ 756 VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux) 757 { 758 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick); 759 // KVM_GET_CLOCK? 760 RT_NOREF(pVCpu, pcTicks, puAux); 761 return VERR_NOT_IMPLEMENTED; 762 } 763 764 765 /** 766 * Resumes CPU clock (TSC) on all virtual CPUs. 767 * 768 * This is called by TM when the VM is started, restored, resumed or similar. 769 * 770 * @returns VBox status code. 771 * @param pVM The cross context VM structure. 772 * @param pVCpu The cross context CPU structure of the calling EMT. 773 * @param uPausedTscValue The TSC value at the time of pausing. 774 */ 775 VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue) 776 { 777 // KVM_SET_CLOCK? 778 RT_NOREF(pVM, pVCpu, uPausedTscValue); 779 return VERR_NOT_IMPLEMENTED; 780 } 781 782 783 VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb, 784 RTR3PTR pvMemR3, uint8_t *pu2State) 785 { 786 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n", 787 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State)); 788 789 *pu2State = UINT8_MAX; 790 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3); 791 } 792 793 794 void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb) 795 { 796 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind)); 797 RT_NOREF(pVM, enmKind, GCPhys, cb); 798 } 799 800 801 void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld, 802 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM) 803 { 804 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n", 805 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM)); 806 RT_NOREF(pVM, enmKind, GCPhysOld, GCPhysNew, cb, fRestoreAsRAM); 807 } 808 809 810 int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt, 811 PGMPAGETYPE enmType, uint8_t *pu2State) 812 { 813 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n", 814 GCPhys, HCPhys, fPageProt, enmType, *pu2State)); 815 RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State); 816 return VINF_SUCCESS; 817 } 818 819 820 VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt, 821 PGMPAGETYPE enmType, uint8_t *pu2State) 822 { 823 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n", 824 GCPhys, HCPhys, fPageProt, enmType, *pu2State)); 825 Assert(VM_IS_NEM_ENABLED(pVM)); 826 RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State); 827 828 } 829 830 831 VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew, 832 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State) 833 { 834 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp pvNewR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n", 835 GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, *pu2State)); 836 Assert(VM_IS_NEM_ENABLED(pVM)); 837 RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State); 1594 VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu) 1595 { 1596 /* 1597 * Try switch to NEM runloop state. 1598 */ 1599 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED)) 1600 { /* likely */ } 1601 else 1602 { 1603 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED); 1604 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu)); 1605 return VINF_SUCCESS; 1606 } 1607 1608 /* 1609 * The run loop. 1610 */ 1611 struct kvm_run * const pRun = pVCpu->nem.s.pRun; 1612 const bool fSingleStepping = DBGFIsStepping(pVCpu); 1613 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 1614 for (unsigned iLoop = 0;; iLoop++) 1615 { 1616 /* 1617 * Pending interrupts or such? Need to check and deal with this prior 1618 * to the state syncing. 1619 */ 1620 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC 1621 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI)) 1622 { 1623 /* Try inject interrupt. */ 1624 rcStrict = nemHCLnxHandleInterruptFF(pVM, pVCpu); 1625 if (rcStrict == VINF_SUCCESS) 1626 { /* likely */ } 1627 else 1628 { 1629 LogFlow(("NEM/%u: breaking: nemHCLnxHandleInterruptFF -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) )); 1630 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus); 1631 break; 1632 } 1633 } 1634 1635 /* 1636 * Do not execute in KVM if the A20 isn't enabled. 1637 */ 1638 if (PGMPhysIsA20Enabled(pVCpu)) 1639 { /* likely */ } 1640 else 1641 { 1642 rcStrict = VINF_EM_RESCHEDULE_REM; 1643 LogFlow(("NEM/%u: breaking: A20 disabled\n", pVCpu->idCpu)); 1644 break; 1645 } 1646 1647 /* 1648 * Ensure KVM has the whole state. 1649 */ 1650 if ( (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL) 1651 != CPUMCTX_EXTRN_ALL) 1652 { 1653 int rc2 = nemHCLnxExportState(pVM, pVCpu, &pVCpu->cpum.GstCtx, pRun); 1654 AssertRCReturn(rc2, rc2); 1655 } 1656 1657 /* 1658 * Poll timers and run for a bit. 1659 * 1660 * With the VID approach (ring-0 or ring-3) we can specify a timeout here, 1661 * so we take the time of the next timer event and uses that as a deadline. 1662 * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine. 1663 */ 1664 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing 1665 * the whole polling job when timers have changed... */ 1666 uint64_t offDeltaIgnored; 1667 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt); 1668 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC) 1669 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 1670 { 1671 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM)) 1672 { 1673 LogFlow(("NEM/%u: Entry @ %04x:%08RX64 IF=%d EFL=%#RX64 SS:RSP=%04x:%08RX64 cr0=%RX64\n", 1674 pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip, 1675 !!(pRun->s.regs.regs.rflags & X86_EFL_IF), pRun->s.regs.regs.rflags, 1676 pRun->s.regs.sregs.ss.selector, pRun->s.regs.regs.rsp, pRun->s.regs.sregs.cr0)); 1677 TMNotifyStartOfExecution(pVM, pVCpu); 1678 1679 int rcLnx = ioctl(pVCpu->nem.s.fdVCpu, KVM_RUN, 0UL); 1680 1681 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT); 1682 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC()); 1683 1684 LogFlow(("NEM/%u: Exit @ %04x:%08RX64 IF=%d EFL=%#RX64 CR8=%#x Reason=%#x IrqReady=%d Flags=%#x\n", pVCpu->idCpu, 1685 pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip, pRun->if_flag, 1686 pRun->s.regs.regs.rflags, pRun->s.regs.sregs.cr8, pRun->exit_reason, 1687 pRun->ready_for_interrupt_injection, pRun->flags)); 1688 if (RT_LIKELY(rcLnx == 0 || errno == EINTR)) 1689 { 1690 /* 1691 * Deal with the message. 1692 */ 1693 rcStrict = nemHCLnxHandleExit(pVM, pVCpu, pRun); 1694 if (rcStrict == VINF_SUCCESS) 1695 { /* hopefully likely */ } 1696 else 1697 { 1698 LogFlow(("NEM/%u: breaking: nemHCLnxHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) )); 1699 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus); 1700 break; 1701 } 1702 } 1703 else 1704 { 1705 int rc2 = RTErrConvertFromErrno(errno); 1706 AssertLogRelMsgFailedReturn(("KVM_RUN failed: rcLnx=%d errno=%u rc=%Rrc\n", rcLnx, errno, rc2), rc2); 1707 } 1708 1709 /* 1710 * If no relevant FFs are pending, loop. 1711 */ 1712 if ( !VM_FF_IS_ANY_SET( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK) 1713 && !VMCPU_FF_IS_ANY_SET(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) ) 1714 continue; 1715 1716 /** @todo Try handle pending flags, not just return to EM loops. Take care 1717 * not to set important RCs here unless we've handled an exit. */ 1718 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n", 1719 pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions)); 1720 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost); 1721 } 1722 else 1723 { 1724 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) )); 1725 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel); 1726 } 1727 } 1728 else 1729 { 1730 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu)); 1731 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre); 1732 } 1733 break; 1734 } /* the run loop */ 1735 1736 1737 /* 1738 * If the CPU is running, make sure to stop it before we try sync back the 1739 * state and return to EM. We don't sync back the whole state if we can help it. 1740 */ 1741 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM)) 1742 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED); 1743 1744 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL) 1745 { 1746 /* Try anticipate what we might need. */ 1747 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK; 1748 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) 1749 || RT_FAILURE(rcStrict)) 1750 fImport = CPUMCTX_EXTRN_ALL; 1751 # ifdef IN_RING0 /* Ring-3 I/O port access optimizations: */ 1752 else if ( rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE 1753 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE) 1754 fImport = CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS; 1755 else if (rcStrict == VINF_EM_PENDING_R3_IOPORT_READ) 1756 fImport = CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS; 1757 # endif 1758 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC 1759 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI)) 1760 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK; 1761 1762 if (pVCpu->cpum.GstCtx.fExtrn & fImport) 1763 { 1764 int rc2 = nemHCLnxImportState(pVCpu, fImport, pRun); 1765 if (RT_SUCCESS(rc2)) 1766 pVCpu->cpum.GstCtx.fExtrn &= ~fImport; 1767 else if (RT_SUCCESS(rcStrict)) 1768 rcStrict = rc2; 1769 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL)) 1770 pVCpu->cpum.GstCtx.fExtrn = 0; 1771 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn); 1772 } 1773 else 1774 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped); 1775 } 1776 else 1777 { 1778 pVCpu->cpum.GstCtx.fExtrn = 0; 1779 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped); 1780 } 1781 1782 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, 1783 pVCpu->cpum.GstCtx.rflags, VBOXSTRICTRC_VAL(rcStrict) )); 1784 return rcStrict; 838 1785 } 839 1786 -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp
r92443 r92465 2114 2114 2115 2115 VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam, 2116 void *pvMmio2, uint8_t *pu2State )2117 { 2118 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p \n",2119 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State ));2116 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange) 2117 { 2118 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n", 2119 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange)); 2120 2120 2121 2121 int rc = VINF_SUCCESS; … … 2203 2203 2204 2204 VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags, 2205 uint8_t *pu2State )2205 uint8_t *pu2State, uint32_t *puNemRange) 2206 2206 { 2207 2207 Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags)); 2208 *pu2State = UINT8_MAX; 2208 *pu2State = UINT8_MAX; 2209 *puNemRange = 0; 2209 2210 2210 2211 #if 0 /* Let's not do this after all. We'll protection change notifications for each page and if not we'll map them lazily. */ … … 2242 2243 2243 2244 VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, 2244 uint32_t fFlags, uint8_t *pu2State )2245 { 2246 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p \n",2247 GCPhys, cb, pvPages, fFlags, pu2State ));2245 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange) 2246 { 2247 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n", 2248 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange)); 2248 2249 *pu2State = UINT8_MAX; 2249 2250 … … 2265 2266 return VERR_NEM_MAP_PAGES_FAILED; 2266 2267 } 2267 RT_NOREF(fFlags );2268 RT_NOREF(fFlags, puNemRange); 2268 2269 #else 2269 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags );2270 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange); 2270 2271 #endif 2271 2272 return VINF_SUCCESS; -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r92441 r92465 2498 2498 if (VM_IS_NEM_ENABLED(pVM)) /* Notify REM before we unlink the range. */ 2499 2499 { 2500 rc = NEMR3NotifyPhysMmioExUnmap(pVM, GCPhys, GCPhysLast - GCPhys + 1, 0 /*fFlags*/, NULL, NULL, NULL); 2500 rc = NEMR3NotifyPhysMmioExUnmap(pVM, GCPhys, GCPhysLast - GCPhys + 1, 0 /*fFlags*/, 2501 NULL, NULL, NULL, &pRam->uNemRange); 2501 2502 AssertLogRelRCReturn(rc, rc); 2502 2503 } … … 2546 2547 rc = NEMR3NotifyPhysMmioExUnmap(pVM, GCPhys, GCPhysLast - GCPhys + 1, NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE, 2547 2548 pRam->pvR3 ? (uint8_t *)pRam->pvR3 + GCPhys - pRam->GCPhys : NULL, 2548 NULL, &u2State );2549 NULL, &u2State, &pRam->uNemRange); 2549 2550 AssertLogRelRCReturn(rc, rc); 2550 2551 if (u2State != UINT8_MAX) … … 3688 3689 pRam->pvR3 3689 3690 ? (uint8_t *)pRam->pvR3 + pFirstMmio->RamRange.GCPhys - pRam->GCPhys : NULL, 3690 pFirstMmio->pvR3, &u2State );3691 pFirstMmio->pvR3, &u2State, &pRam->uNemRange); 3691 3692 AssertRCStmt(rc, rcRet = rc); 3692 3693 if (u2State != UINT8_MAX) … … 3721 3722 uint8_t u2State = UINT8_MAX; 3722 3723 rc = NEMR3NotifyPhysMmioExUnmap(pVM, pCurMmio->RamRange.GCPhys, pCurMmio->RamRange.cb, fNemFlags, 3723 NULL, pCurMmio->pvR3, &u2State );3724 NULL, pCurMmio->pvR3, &u2State, &pCurMmio->RamRange.uNemRange); 3724 3725 AssertRCStmt(rc, rcRet = rc); 3725 3726 if (u2State != UINT8_MAX) … … 4349 4350 | (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED ? NEM_NOTIFY_PHYS_ROM_F_SHADOW : 0); 4350 4351 uint8_t u2NemState = UINT8_MAX; 4352 uint32_t uNemRange = 0; 4351 4353 if (VM_IS_NEM_ENABLED(pVM)) 4352 4354 { 4353 4355 int rc = NEMR3NotifyPhysRomRegisterEarly(pVM, GCPhys, cPages << PAGE_SHIFT, 4354 4356 fRamExists ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhys) : NULL, 4355 fNemNotify, &u2NemState );4357 fNemNotify, &u2NemState, fRamExists ? &pRam->uNemRange : &uNemRange); 4356 4358 AssertLogRelRCReturn(rc, rc); 4357 4359 } … … 4435 4437 pRamNew->pvR3 = NULL; 4436 4438 pRamNew->paLSPages = NULL; 4439 #ifdef VBOX_WITH_NATIVE_NEM 4440 pRamNew->uNemRange = uNemRange; 4441 #endif 4437 4442 4438 4443 PPGMPAGE pRamPage = &pRamNew->aPages[idxFirstRamPage]; … … 4631 4636 u2NemState = UINT8_MAX; 4632 4637 rc = NEMR3NotifyPhysRomRegisterLate(pVM, GCPhys, cb, PGM_RAMRANGE_CALC_PAGE_R3PTR(pRamNew, GCPhys), 4633 fNemNotify, &u2NemState); 4638 fNemNotify, &u2NemState, 4639 fRamExists ? &pRam->uNemRange : &pRamNew->uNemRange); 4634 4640 if (u2NemState != UINT8_MAX) 4635 4641 pgmPhysSetNemStateForPages(&pRamNew->aPages[idxFirstRamPage], cPages, u2NemState); -
trunk/src/VBox/VMM/include/NEMInternal.h
r92444 r92465 189 189 int32_t fdVm; 190 190 191 /** KVM_GET_VCPU_MMAP_SIZE. */ 192 uint32_t cbVCpuMmap; 191 193 /** KVM_CAP_NR_MEMSLOTS. */ 192 194 uint32_t cMaxMemSlots; 193 195 /** KVM_CAP_X86_ROBUST_SINGLESTEP. */ 194 196 bool fRobustSingleStep; 195 /** KVM_GET_VCPU_MMAP_SIZE. */ 196 uint32_t cbVCpuMmap; 197 198 /** Hint where there might be a free slot. */ 199 uint16_t idPrevSlot; 200 /** Memory slot ID allocation bitmap. */ 201 uint64_t bmSlotIds[_32K / 8 / sizeof(uint64_t)]; 197 202 198 203 #elif defined(RT_OS_WINDOWS) … … 356 361 STAMCOUNTER StatCancelChangedState; 357 362 STAMCOUNTER StatCancelAlertedThread; 363 # endif 358 364 STAMCOUNTER StatBreakOnCancel; 359 365 STAMCOUNTER StatBreakOnFFPre; 360 366 STAMCOUNTER StatBreakOnFFPost; 361 367 STAMCOUNTER StatBreakOnStatus; 362 # endif363 368 STAMCOUNTER StatImportOnDemand; 364 369 STAMCOUNTER StatImportOnReturn;
Note:
See TracChangeset
for help on using the changeset viewer.

