Changeset 51643 in vbox
- Timestamp:
- Jun 18, 2014 11:06:06 AM (10 years ago)
- Location:
- trunk
- Files:
-
- 26 edited
-
include/VBox/err.h (modified) (1 diff)
-
include/VBox/vmm/gim.h (modified) (5 diffs)
-
include/VBox/vmm/hm_vmx.h (modified) (7 diffs)
-
include/VBox/vmm/tm.h (modified) (1 diff)
-
include/VBox/vmm/vm.h (modified) (2 diffs)
-
include/VBox/vmm/vm.mac (modified) (1 diff)
-
src/VBox/Devices/GIMDev/GIMDev.cpp (modified) (1 diff)
-
src/VBox/Main/src-server/MachineImpl.cpp (modified) (1 diff)
-
src/VBox/VMM/Makefile.kmk (modified) (1 diff)
-
src/VBox/VMM/VMMAll/EMAll.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMAll/GIMAll.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMAll/GIMAllHv.cpp (modified) (7 diffs)
-
src/VBox/VMM/VMMAll/PGMAllPhys.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMAll/TMAllCpu.cpp (modified) (8 diffs)
-
src/VBox/VMM/VMMR0/GIMR0.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMR0/GIMR0Hv.cpp (modified) (4 diffs)
-
src/VBox/VMM/VMMR0/HMSVMR0.cpp (modified) (3 diffs)
-
src/VBox/VMM/VMMR0/HMVMXR0.cpp (modified) (20 diffs)
-
src/VBox/VMM/VMMR0/VMMR0.cpp (modified) (3 diffs)
-
src/VBox/VMM/VMMR3/GIM.cpp (modified) (14 diffs)
-
src/VBox/VMM/VMMR3/GIMHv.cpp (modified) (5 diffs)
-
src/VBox/VMM/VMMR3/HM.cpp (modified) (9 diffs)
-
src/VBox/VMM/include/GIMHvInternal.h (modified) (5 diffs)
-
src/VBox/VMM/include/GIMInternal.h (modified) (2 diffs)
-
src/VBox/VMM/include/HMInternal.h (modified) (27 diffs)
-
src/VBox/VMM/testcase/tstVMStructSize.cpp (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/err.h
r51560 r51643 2353 2353 /** Unknown or invalid GIM provider. */ 2354 2354 #define VERR_GIM_INVALID_PROVIDER (-6306) 2355 /** GIM generic operation failed. */ 2356 #define VERR_GIM_OPERATION_FAILED (-6307) 2357 /** The GIM provider does not support any hypercalls. */ 2358 #define VERR_GIM_HYPERCALLS_NOT_AVAILABLE (-6308) 2359 /** The guest has not setup use of the hypercalls. */ 2360 #define VERR_GIM_HYPERCALLS_NOT_ENABLED (-6309) 2361 /** The GIM device is not registered with GIM when it ought to be. */ 2362 #define VERR_GIM_DEVICE_NOT_REGISTERED (-6310) 2355 2363 /** @} */ 2356 2364 -
trunk/include/VBox/vmm/gim.h
r51563 r51643 57 57 GIMPROVIDERID_KVM 58 58 } GIMPROVIDERID; 59 AssertCompileSize(GIMPROVIDERID, 4);59 AssertCompileSize(GIMPROVIDERID, sizeof(uint32_t)); 60 60 61 61 … … 74 74 bool fMapped; 75 75 /** Alignment padding. */ 76 uint8_t au8Alignment0[ 4];76 uint8_t au8Alignment0[3]; 77 77 /** Size of the region (must be page aligned). */ 78 78 uint32_t cbRegion; … … 94 94 /** Pointer to a const GIM MMIO2 region. */ 95 95 typedef GIMMMIO2REGION const *PCGIMMMIO2REGION; 96 AssertCompileMemberAlignment(GIMMMIO2REGION, cbRegion, 8);97 AssertCompileMemberAlignment(GIMMMIO2REGION, pvPageR0, 8);96 AssertCompileMemberAlignment(GIMMMIO2REGION, cbRegion, 8); 97 AssertCompileMemberAlignment(GIMMMIO2REGION, pvPageR0, 8); 98 98 99 99 … … 153 153 VMMR0_INT_DECL(int) GIMR0InitVM(PVM pVM); 154 154 VMMR0_INT_DECL(int) GIMR0TermVM(PVM pVM); 155 VMMR0_INT_DECL(int) GIMR0UpdateParavirtTsc(PVM pVM, uint64_t u64Offset); 155 156 /** @} */ 156 157 #endif /* IN_RING0 */ … … 171 172 172 173 VMMDECL(bool) GIMIsEnabled(PVM pVM); 173 VMMDECL( int) GIMUpdateParavirtTsc(PVM pVM, uint64_t u64Offset);174 VMMDECL(GIMPROVIDERID) GIMGetProvider(PVM pVM); 174 175 VMMDECL(bool) GIMIsParavirtTscEnabled(PVM pVM); 175 176 VMM_INT_DECL(int) GIMHypercall(PVMCPU pVCpu, PCPUMCTX pCtx); -
trunk/include/VBox/vmm/hm_vmx.h
r50905 r51643 4 4 5 5 /* 6 * Copyright (C) 2006-201 3Oracle Corporation6 * Copyright (C) 2006-2014 Oracle Corporation 7 7 * 8 8 * This file is part of VirtualBox Open Source Edition (OSE), as … … 134 134 AssertCompileMemberOffset(VMXRESTOREHOST, uHostFSBase, 40); 135 135 AssertCompileSize(VMXRESTOREHOST, 56); 136 AssertCompileSizeAlignment(VMXRESTOREHOST, 8); 136 137 137 138 /** @name Host-state MSR lazy-restoration flags. … … 779 780 typedef const EPTPT *PCEPTPT; 780 781 781 /** 782 * VPID flush types. 782 /** @name VMX VPID flush types. 783 * Warning!! Valid enum members are in accordance to the VT-x spec. 784 * @{ 783 785 */ 784 786 typedef enum 785 787 { 786 788 /** Invalidate a specific page. */ 787 VMX _FLUSH_VPID_INDIV_ADDR = 0,789 VMXFLUSHVPID_INDIV_ADDR = 0, 788 790 /** Invalidate one context (specific VPID). */ 789 VMX _FLUSH_VPID_SINGLE_CONTEXT = 1,791 VMXFLUSHVPID_SINGLE_CONTEXT = 1, 790 792 /** Invalidate all contexts (all VPIDs). */ 791 VMX _FLUSH_VPID_ALL_CONTEXTS = 2,793 VMXFLUSHVPID_ALL_CONTEXTS = 2, 792 794 /** Invalidate a single VPID context retaining global mappings. */ 793 VMX _FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS = 3,795 VMXFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS = 3, 794 796 /** Unsupported by VirtualBox. */ 795 VMX _FLUSH_VPID_NOT_SUPPORTED = 0xbad,797 VMXFLUSHVPID_NOT_SUPPORTED = 0xbad0, 796 798 /** Unsupported by CPU. */ 797 VMX_FLUSH_VPID_NONE = 0xb00, 798 /** 32bit hackishness. */ 799 VMX_FLUSH_VPID_32BIT_HACK = 0x7fffffff 800 } VMX_FLUSH_VPID; 801 802 /** 803 * EPT flush types. 799 VMXFLUSHVPID_NONE = 0xbad1 800 } VMXFLUSHVPID; 801 AssertCompileSize(VMXFLUSHVPID, 4); 802 /** @} */ 803 804 /** @name VMX EPT flush types. 805 * Warning!! Valid enums values below are in accordance to the VT-x spec. 806 * @{ 804 807 */ 805 808 typedef enum 806 809 { 807 810 /** Invalidate one context (specific EPT). */ 808 VMX _FLUSH_EPT_SINGLE_CONTEXT= 1,811 VMXFLUSHEPT_SINGLE_CONTEXT = 1, 809 812 /* Invalidate all contexts (all EPTs) */ 810 VMX _FLUSH_EPT_ALL_CONTEXTS= 2,813 VMXFLUSHEPT_ALL_CONTEXTS = 2, 811 814 /** Unsupported by VirtualBox. */ 812 VMX _FLUSH_EPT_NOT_SUPPORTED = 0xbad,815 VMXFLUSHEPT_NOT_SUPPORTED = 0xbad0, 813 816 /** Unsupported by CPU. */ 814 VMX _FLUSH_EPT_NONE = 0xb00,815 /** 32bit hackishness. */ 816 VMX_FLUSH_EPT_32BIT_HACK = 0x7fffffff 817 } VMX_FLUSH_EPT; 818 /** @} */ 819 820 /** @name MSR autoload/store elements 817 VMXFLUSHEPT_NONE = 0xbad1 818 } VMXFLUSHEPT; 819 AssertCompileSize(VMXFLUSHEPT, 4); 820 /** @} */ 821 822 /** @name VMX MSR autoload/store element. 823 * In accordance to VT-x spec. 821 824 * @{ 822 825 */ … … 824 827 typedef struct 825 828 { 829 /** The MSR Id. */ 826 830 uint32_t u32Msr; 831 /** Reserved (MBZ). */ 827 832 uint32_t u32Reserved; 833 /** The MSR value. */ 828 834 uint64_t u64Value; 829 835 } VMXAUTOMSR; … … 877 883 /** Pointer to a VMXMSRS struct. */ 878 884 typedef VMXMSRS *PVMXMSRS; 885 AssertCompileSizeAlignment(VMXMSRS, 8); 879 886 /** @} */ 880 887 … … 2188 2195 * @param pDescriptor Descriptor 2189 2196 */ 2190 DECLASM(int) VMXR0InvEPT(VMX _FLUSH_EPT enmFlush, uint64_t *pDescriptor);2197 DECLASM(int) VMXR0InvEPT(VMXFLUSHEPT enmFlush, uint64_t *pDescriptor); 2191 2198 2192 2199 /** … … 2196 2203 * @param pDescriptor Descriptor 2197 2204 */ 2198 DECLASM(int) VMXR0InvVPID(VMX _FLUSH_VPID enmFlush, uint64_t *pDescriptor);2205 DECLASM(int) VMXR0InvVPID(VMXFLUSHVPID enmFlush, uint64_t *pDescriptor); 2199 2206 2200 2207 /** -
trunk/include/VBox/vmm/tm.h
r44528 r51643 133 133 VMMDECL(uint64_t) TMCpuTickGet(PVMCPU pVCpu); 134 134 VMM_INT_DECL(uint64_t) TMCpuTickGetNoCheck(PVMCPU pVCpu); 135 VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC );136 VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, uint64_t *poffRealTSC);135 VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfParavirtTsc); 136 VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, bool *pfParavirtTsc, uint64_t *poffRealTSC); 137 137 VMM_INT_DECL(int) TMCpuTickSet(PVM pVM, PVMCPU pVCpu, uint64_t u64Tick); 138 138 VMM_INT_DECL(int) TMCpuTickSetLastSeen(PVMCPU pVCpu, uint64_t u64LastSeenTick); -
trunk/include/VBox/vmm/vm.h
r51560 r51643 149 149 struct HMCPU s; 150 150 #endif 151 uint8_t padding[56 32]; /* multiple of 64 */151 uint8_t padding[5696]; /* multiple of 64 */ 152 152 } hm; 153 153 … … 235 235 236 236 /** Align the following members on page boundary. */ 237 uint8_t abAlignment2[64];237 //uint8_t abAlignment2[64]; 238 238 239 239 /** PGM part. */ -
trunk/include/VBox/vmm/vm.mac
r51560 r51643 135 135 136 136 .cpum resb 3584 137 .hm resb 56 32137 .hm resb 5696 138 138 .em resb 1472 139 139 .iem resb 3072 -
trunk/src/VBox/Devices/GIMDev/GIMDev.cpp
r51561 r51643 115 115 else 116 116 pCur->pvPageRC = NIL_RTRCPTR; 117 118 LogRel(("GIMDev: Registered %s\n", pCur->szDescription)); 117 119 } 118 120 } -
trunk/src/VBox/Main/src-server/MachineImpl.cpp
r51567 r51643 8517 8517 mHWData->mKeyboardHIDType = data.keyboardHIDType; 8518 8518 mHWData->mChipsetType = data.chipsetType; 8519 mHWData->mParavirtProvider = data.paravirtProvider; 8519 8520 mHWData->mEmulatedUSBCardReaderEnabled = data.fEmulatedUSBCardReader; 8520 8521 mHWData->mHPETEnabled = data.fHPETEnabled; -
trunk/src/VBox/VMM/Makefile.kmk
r51165 r51643 545 545 VMMR0/CPUMR0.cpp \ 546 546 VMMR0/CPUMR0A.asm \ 547 VMMR0/GIMR0.cpp \ 548 VMMR0/GIMR0Hv.cpp \ 547 549 VMMR0/GMMR0.cpp \ 548 550 VMMR0/GVMMR0.cpp \ -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r51422 r51643 1749 1749 if (RT_UNLIKELY(rc != VINF_SUCCESS)) 1750 1750 { 1751 Assert(rc == VERR_CPUM_RAISE_GP_0 );1751 Assert(rc == VERR_CPUM_RAISE_GP_0 || rc == VERR_EM_INTERPRETER); 1752 1752 Log4(("EM: Refuse RDMSR: rc=%Rrc\n", rc)); 1753 1753 return VERR_EM_INTERPRETER; -
trunk/src/VBox/VMM/VMMAll/GIMAll.cpp
r51563 r51643 45 45 46 46 /** 47 * Gets the GIM provider configured for this VM. 48 * 49 * @returns The GIM provider Id. 50 * @param pVM Pointer to the VM. 51 */ 52 VMMDECL(GIMPROVIDERID) GIMGetProvider(PVM pVM) 53 { 54 return pVM->gim.s.enmProviderId; 55 } 56 57 58 /** 47 59 * Implements a GIM hypercall with the provider configured for the VM. 48 60 * … … 67 79 } 68 80 } 69 70 71 /**72 * Updates the paravirtualized TSC supported by the GIM provider.73 *74 * @returns VBox status code.75 * @retval VINF_SUCCESS if the paravirt. TSC is setup and in use.76 * @retval VERR_GIM_NOT_ENABLED if no GIM provider is configured for this VM.77 * @retval VERR_GIM_PVTSC_NOT_AVAILABLE if the GIM provider does not support any78 * paravirt. TSC.79 * @retval VERR_GIM_PVTSC_NOT_IN_USE if the GIM provider supports paravirt. TSC80 * but the guest isn't currently using it.81 *82 * @param pVM Pointer to the VM.83 * @param u64Offset The computed TSC offset.84 *85 * @thread EMT(pVCpu)86 */87 VMMDECL(int) GIMUpdateParavirtTsc(PVM pVM, uint64_t u64Offset)88 {89 if (!pVM->gim.s.fEnabled)90 return VERR_GIM_NOT_ENABLED;91 92 switch (pVM->gim.s.enmProviderId)93 {94 case GIMPROVIDERID_HYPERV:95 return GIMHvUpdateParavirtTsc(pVM, u64Offset);96 97 default:98 break;99 }100 return VERR_GIM_PVTSC_NOT_AVAILABLE;101 }102 103 81 104 82 VMMDECL(bool) GIMIsParavirtTscEnabled(PVM pVM) -
trunk/src/VBox/VMM/VMMAll/GIMAllHv.cpp
r51563 r51643 28 28 #include <VBox/vmm/vm.h> 29 29 #include <VBox/vmm/pgm.h> 30 #include <VBox/vmm/pdmdev.h> 31 32 #include <iprt/asm-amd64-x86.h> 33 #include <iprt/spinlock.h> 30 34 31 35 … … 53 57 { 54 58 return MSR_GIM_HV_REF_TSC_IS_ENABLED(pVM->gim.s.u.Hv.u64TscPageMsr); 55 }56 57 58 /**59 * Updates Hyper-V's reference TSC page.60 *61 * @returns VBox status code.62 * @param pVM Pointer to the VM.63 * @param u64Offset The computed TSC offset.64 * @thread EMT(pVCpu)65 */66 VMM_INT_DECL(int) GIMHvUpdateParavirtTsc(PVM pVM, uint64_t u64Offset)67 {68 Assert(GIMIsEnabled(pVM));69 bool fHvTscEnabled = MSR_GIM_HV_REF_TSC_IS_ENABLED(pVM->gim.s.u.Hv.u64TscPageMsr);70 if (!fHvTscEnabled)71 return VERR_GIM_PVTSC_NOT_ENABLED;72 73 PGIMHV pHv = &pVM->gim.s.u.Hv;74 PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX];75 PGIMHVREFTSC pRefTsc = (PGIMHVREFTSC)pRegion->CTX_SUFF(pvPage);76 Assert(pRefTsc);77 78 /** @todo Protect this with a spinlock! */79 pRefTsc->u64TscScale = UINT64_C(0x1000000000000000);80 pRefTsc->u64TscOffset = u64Offset;81 ASMAtomicIncU32(&pRefTsc->u32TscSequence);82 83 return VINF_SUCCESS;84 59 } 85 60 … … 104 79 case MSR_GIM_HV_TIME_REF_COUNT: 105 80 { 106 /* Hyper-V reports the time in 100 ns units. */81 /* Hyper-V reports the time in 100 ns units (10 MHz). */ 107 82 uint64_t u64Tsc = TMCpuTickGet(pVCpu); 108 83 uint64_t u64TscHz = TMCpuTicksPerSecond(pVM); … … 129 104 130 105 case MSR_GIM_HV_TSC_FREQ: 131 *puValue = TMCpuTicksPerSecond(pVM); 132 return VINF_SUCCESS; 106 #ifndef IN_RING3 107 return VERR_EM_INTERPRETER; 108 #else 109 LogRel(("GIM: MSR_GIM_HV_TSC_FREQ %u\n", TMCpuTicksPerSecond(pVM))); 110 //*puValue = TMCpuTicksPerSecond(pVM); 111 *puValue = 2690000000; 112 return VINF_SUCCESS; 113 #endif 133 114 134 115 case MSR_GIM_HV_APIC_FREQ: 135 116 /** @todo Fix this later! Get the information from DevApic. */ 136 117 *puValue = UINT32_C(1000000000); /* TMCLOCK_FREQ_VIRTUAL */ 118 return VINF_SUCCESS; 119 120 case MSR_GIM_HV_RESET: 121 *puValue = 0; 137 122 return VINF_SUCCESS; 138 123 … … 171 156 if (!uRawValue) 172 157 { 173 GIMR3 Mmio2Unmap(pVM, &pHv->aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX]);158 GIMR3HvDisableHypercallPage(pVM); 174 159 pHv->u64HypercallMsr &= ~MSR_GIM_HV_HYPERCALL_ENABLE_BIT; 175 Log4Func(("Disabled hypercalls\n"));176 160 } 177 161 pHv->u64GuestOsIdMsr = uRawValue; … … 196 180 } 197 181 198 PPDMDEVINSR3 pDevIns = pVM->gim.s.pDevInsR3; 199 PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX]; 200 AssertPtr(pDevIns); 201 AssertPtr(pRegion); 202 203 /* 204 * Is the guest disabling the hypercall-page? Allow it regardless of the Guest-OS Id Msr. 205 */ 182 /* Is the guest disabling the hypercall-page? Allow it regardless of the Guest-OS Id Msr. */ 206 183 if (!fEnable) 207 184 { 208 GIMR3 Mmio2Unmap(pVM, pRegion);185 GIMR3HvDisableHypercallPage(pVM); 209 186 pHv->u64HypercallMsr = uRawValue; 210 Log4Func(("Disabled hypercalls\n")); 211 return VINF_SUCCESS; 212 } 213 214 /* 215 * Map the hypercall-page. 216 */ 187 return VINF_SUCCESS; 188 } 189 190 /* Enable the hypercall-page. */ 217 191 RTGCPHYS GCPhysHypercallPage = MSR_GIM_HV_HYPERCALL_GUEST_PFN(uRawValue) << PAGE_SHIFT; 218 int rc = GIMR3 Mmio2Map(pVM, pRegion, GCPhysHypercallPage, "Hyper-V Hypercall-page");192 int rc = GIMR3HvEnableHypercallPage(pVM, GCPhysHypercallPage); 219 193 if (RT_SUCCESS(rc)) 220 194 { 221 /* 222 * Patch the hypercall-page. 223 */ 224 if (HMIsEnabled(pVM)) 225 { 226 size_t cbWritten = 0; 227 rc = HMPatchHypercall(pVM, pRegion->pvPageR3, PAGE_SIZE, &cbWritten); 228 if ( RT_SUCCESS(rc) 229 && cbWritten < PAGE_SIZE - 1) 230 { 231 uint8_t *pbLast = (uint8_t *)pRegion->pvPageR3 + cbWritten; 232 *pbLast = 0xc3; /* RET */ 233 234 pHv->u64HypercallMsr = uRawValue; 235 LogRelFunc(("Enabled hypercalls at %#RGp\n", GCPhysHypercallPage)); 236 LogRelFunc(("%.*Rhxd\n", cbWritten + 1, (uint8_t *)pRegion->pvPageR3)); 237 return VINF_SUCCESS; 238 } 239 240 LogFunc(("MSR_GIM_HV_HYPERCALL: HMPatchHypercall failed. rc=%Rrc cbWritten=%u\n", rc, cbWritten)); 241 } 242 else 243 { 244 /** @todo Handle raw-mode hypercall page patching. */ 245 LogRelFunc(("MSR_GIM_HV_HYPERCALL: raw-mode not yet implemented!\n")); 246 } 247 248 GIMR3Mmio2Unmap(pVM, pRegion); 249 } 250 else 251 LogFunc(("MSR_GIM_HV_HYPERCALL: GIMR3Mmio2Map failed. rc=%Rrc -> #GP(0)\n", rc)); 195 pHv->u64HypercallMsr = uRawValue; 196 return VINF_SUCCESS; 197 } 252 198 253 199 return VERR_CPUM_RAISE_GP_0; … … 263 209 pHv->u64TscPageMsr = (uRawValue & ~MSR_GIM_HV_REF_TSC_ENABLE_BIT); 264 210 265 PPDMDEVINSR3 pDevIns = pVM->gim.s.pDevInsR3; 266 PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[GIM_HV_REF_TSC_PAGE_REGION_IDX]; 267 AssertPtr(pDevIns); 268 AssertPtr(pRegion); 269 270 /* 271 * Is the guest disabling the TSC-page? 272 */ 211 /* Is the guest disabling the TSC-page? */ 273 212 bool fEnable = RT_BOOL(uRawValue & MSR_GIM_HV_REF_TSC_ENABLE_BIT); 274 213 if (!fEnable) 275 214 { 276 GIMR3Mmio2Unmap(pVM, pRegion); 277 Log4Func(("Disabled TSC-page\n")); 278 return VINF_SUCCESS; 279 } 280 281 /* 282 * Map the TSC-page. 283 */ 215 GIMR3HvDisableTscPage(pVM); 216 pHv->u64TscPageMsr = uRawValue; 217 return VINF_SUCCESS; 218 } 219 220 /* Enable the TSC-page. */ 284 221 RTGCPHYS GCPhysTscPage = MSR_GIM_HV_REF_TSC_GUEST_PFN(uRawValue) << PAGE_SHIFT; 285 int rc = GIMR3 Mmio2Map(pVM, pRegion, GCPhysTscPage, "Hyper-V TSC-page");222 int rc = GIMR3HvEnableTscPage(pVM, GCPhysTscPage); 286 223 if (RT_SUCCESS(rc)) 287 224 { 288 225 pHv->u64TscPageMsr = uRawValue; 289 Log4Func(("MSR_GIM_HV_REF_TSC: Enabled Hyper-V TSC page at %#RGp\n", GCPhysTscPage)); 290 return VINF_SUCCESS; 291 } 292 else 293 LogFunc(("MSR_GIM_HV_REF_TSC: GIMR3Mmio2Map failed. rc=%Rrc -> #GP(0)\n", rc)); 226 return VINF_SUCCESS; 227 } 294 228 295 229 return VERR_CPUM_RAISE_GP_0; 230 #endif /* !IN_RING3 */ 231 } 232 233 case MSR_GIM_HV_RESET: 234 { 235 #ifndef IN_RING3 236 return VERR_EM_INTERPRETER; 237 #else 238 if (MSR_GIM_HV_RESET_IS_SET(uRawValue)) 239 { 240 LogRel(("GIM: HyperV: Reset initiated by MSR.\n")); 241 int rc = PDMDevHlpVMReset(pVM->gim.s.pDevInsR3); 242 AssertRC(rc); 243 } 244 /* else: Ignore writes to other bits. */ 245 return VINF_SUCCESS; 296 246 #endif /* !IN_RING3 */ 297 247 } -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r49640 r51643 1036 1036 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage)); 1037 1037 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage)); 1038 AssertLogRelReturn((uint8_t)(idMmio2 - 1U)< RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), 1039 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE); 1038 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), 1039 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2, 1040 RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys, 1041 pPage->s.idPage, pPage->s.uStateY), 1042 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE); 1040 1043 PPGMMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1]; 1041 1044 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE); -
trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp
r44933 r51643 25 25 #include "TMInternal.h" 26 26 #include <VBox/vmm/vm.h> 27 #include <VBox/vmm/gim.h> 27 28 #include <VBox/sup.h> 28 29 … … 140 141 * 141 142 * @returns true/false accordingly. 142 * @param pVCpu Pointer to the VMCPU.143 * @param pVCpu Pointer to the VMCPU. 143 144 * @param poffRealTSC The offset against the TSC of the current CPU. 144 145 * Can be NULL. 145 * @thread EMT. 146 */ 147 VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC) 146 * @param pfParavirtTsc Where to store whether paravirt. TSC can be used or 147 * not. 148 * @thread EMT(pVCpu). 149 */ 150 VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfParavirtTsc) 148 151 { 149 152 PVM pVM = pVCpu->CTX_SUFF(pVM); 153 bool fParavirtTsc = false; 150 154 151 155 /* 152 156 * We require: 157 * 1. Use of a paravirtualized TSC is enabled by the guest. 158 * (OR) 153 159 * 1. A fixed TSC, this is checked at init time. 154 160 * 2. That the TSC is ticking (we shouldn't be here if it isn't) … … 158 164 * c) we're not using warp drive (accelerated virtual guest time). 159 165 */ 160 if ( pVM->tm.s.fMaybeUseOffsettedHostTSC161 && RT_LIKELY(pVCpu->tm.s.fTSCTicking)162 && ( pVM->tm.s.fTSCUseRealTSC163 || ( !pVM->tm.s.fVirtualSyncCatchUp164 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)165 && !pVM->tm.s.fVirtualWarpDrive))166 )166 if ( (*pfParavirtTsc = GIMIsParavirtTscEnabled(pVM)) == true 167 || ( pVM->tm.s.fMaybeUseOffsettedHostTSC 168 && RT_LIKELY(pVCpu->tm.s.fTSCTicking) 169 && ( pVM->tm.s.fTSCUseRealTSC 170 || ( !pVM->tm.s.fVirtualSyncCatchUp 171 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking) 172 && !pVM->tm.s.fVirtualWarpDrive)))) 167 173 { 168 174 if (!pVM->tm.s.fTSCUseRealTSC) … … 233 239 * @returns The number of host CPU clock ticks to the next timer deadline. 234 240 * @param pVCpu The current CPU. 241 * @param pfParavirtTsc Where to store whether paravirt. TSC can be used or 242 * not. 235 243 * @param poffRealTSC The offset against the TSC of the current CPU. 244 * 236 245 * @thread EMT(pVCpu). 237 * @remarks Superset of TMCpuTickCanUseRealTSC. 238 */ 239 VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, uint64_t *poffRealTSC) 246 * @remarks Superset of TMCpuTickCanUseRealTSC(). 247 */ 248 VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, bool *pfParavirtTsc, 249 uint64_t *poffRealTSC) 240 250 { 241 251 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 244 254 /* 245 255 * We require: 256 * 1. Use of a paravirtualized TSC is enabled by the guest. 257 * (OR) 246 258 * 1. A fixed TSC, this is checked at init time. 247 259 * 2. That the TSC is ticking (we shouldn't be here if it isn't) … … 251 263 * c) we're not using warp drive (accelerated virtual guest time). 252 264 */ 253 if ( pVM->tm.s.fMaybeUseOffsettedHostTSC254 && RT_LIKELY(pVCpu->tm.s.fTSCTicking)255 && ( pVM->tm.s.fTSCUseRealTSC256 || ( !pVM->tm.s.fVirtualSyncCatchUp257 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)258 && !pVM->tm.s.fVirtualWarpDrive))259 )265 if ( (*pfParavirtTsc = GIMIsParavirtTscEnabled(pVM)) == true 266 || ( pVM->tm.s.fMaybeUseOffsettedHostTSC 267 && RT_LIKELY(pVCpu->tm.s.fTSCTicking) 268 && ( pVM->tm.s.fTSCUseRealTSC 269 || ( !pVM->tm.s.fVirtualSyncCatchUp 270 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking) 271 && !pVM->tm.s.fVirtualWarpDrive)))) 260 272 { 261 273 *pfOffsettedTsc = true; … … 293 305 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM)); 294 306 } 307 295 308 return cTicksToDeadline; 296 309 } … … 412 425 413 426 /** 414 * Gets the last seen CPU timestamp counter .415 * 416 * @returns last seen TSC427 * Gets the last seen CPU timestamp counter of the guest. 428 * 429 * @returns the last seen TSC. 417 430 * @param pVCpu Pointer to the VMCPU. 418 431 * 419 * @thread EMT which TSC is to be set.432 * @thread EMT(pVCpu). 420 433 */ 421 434 VMM_INT_DECL(uint64_t) TMCpuTickGetLastSeen(PVMCPU pVCpu) -
trunk/src/VBox/VMM/VMMR0/GIMR0.cpp
r51560 r51643 23 23 #include "GIMHvInternal.h" 24 24 25 #include < iprt/err.h>25 #include <VBox/err.h> 26 26 #include <VBox/vmm/vm.h> 27 27 … … 72 72 } 73 73 74 /** 75 * Updates the paravirtualized TSC supported by the GIM provider. 76 * 77 * @returns VBox status code. 78 * @retval VINF_SUCCESS if the paravirt. TSC is setup and in use. 79 * @retval VERR_GIM_NOT_ENABLED if no GIM provider is configured for this VM. 80 * @retval VERR_GIM_PVTSC_NOT_AVAILABLE if the GIM provider does not support any 81 * paravirt. TSC. 82 * @retval VERR_GIM_PVTSC_NOT_IN_USE if the GIM provider supports paravirt. TSC 83 * but the guest isn't currently using it. 84 * 85 * @param pVM Pointer to the VM. 86 * @param u64Offset The computed TSC offset. 87 * 88 * @thread EMT(pVCpu) 89 */ 90 VMMR0_INT_DECL(int) GIMR0UpdateParavirtTsc(PVM pVM, uint64_t u64Offset) 91 { 92 if (!pVM->gim.s.fEnabled) 93 return VERR_GIM_NOT_ENABLED; 94 95 switch (pVM->gim.s.enmProviderId) 96 { 97 case GIMPROVIDERID_HYPERV: 98 return GIMR0HvUpdateParavirtTsc(pVM, u64Offset); 99 100 default: 101 break; 102 } 103 return VERR_GIM_PVTSC_NOT_AVAILABLE; 104 } 105 -
trunk/src/VBox/VMM/VMMR0/GIMR0Hv.cpp
r51560 r51643 23 23 #include "GIMHvInternal.h" 24 24 25 #include <iprt/err.h> 26 #include <iprt/asm.h> 27 #include <iprt/memobj.h> 25 #include <VBox/err.h> 28 26 #include <VBox/vmm/gim.h> 29 27 #include <VBox/vmm/vm.h> 30 28 29 #include <iprt/spinlock.h> 31 30 31 32 #if 0 32 33 /** 33 34 * Allocates and maps one physically contiguous page. The allocated page is … … 80 81 } 81 82 } 83 #endif 84 85 /** 86 * Updates Hyper-V's reference TSC page. 87 * 88 * @returns VBox status code. 89 * @param pVM Pointer to the VM. 90 * @param u64Offset The computed TSC offset. 91 * @thread EMT. 92 */ 93 VMM_INT_DECL(int) GIMR0HvUpdateParavirtTsc(PVM pVM, uint64_t u64Offset) 94 { 95 Assert(GIMIsEnabled(pVM)); 96 bool fHvTscEnabled = MSR_GIM_HV_REF_TSC_IS_ENABLED(pVM->gim.s.u.Hv.u64TscPageMsr); 97 if (RT_UNLIKELY(!fHvTscEnabled)) 98 return VERR_GIM_PVTSC_NOT_ENABLED; 99 100 PCGIMHV pcHv = &pVM->gim.s.u.Hv; 101 PCGIMMMIO2REGION pcRegion = &pcHv->aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX]; 102 PGIMHVREFTSC pRefTsc = (PGIMHVREFTSC)pcRegion->CTX_SUFF(pvPage); 103 Assert(pRefTsc); 104 105 RTSpinlockAcquire(pcHv->hSpinlockR0); 106 pRefTsc->i64TscOffset = u64Offset; 107 if (pRefTsc->u32TscSequence < UINT32_C(0xfffffffe)) 108 ASMAtomicIncU32(&pRefTsc->u32TscSequence); 109 else 110 ASMAtomicWriteU32(&pRefTsc->u32TscSequence, 1); 111 RTSpinlockRelease(pcHv->hSpinlockR0); 112 113 Assert(pRefTsc->u32TscSequence != 0); 114 Assert(pRefTsc->u32TscSequence != UINT32_C(0xffffffff)); 115 return VINF_SUCCESS; 116 } 82 117 83 118 … … 90 125 VMMR0_INT_DECL(int) GIMR0HvInitVM(PVM pVM) 91 126 { 92 #if 093 127 AssertPtr(pVM); 94 128 Assert(GIMIsEnabled(pVM)); 95 129 96 130 PGIMHV pHv = &pVM->gim.s.u.Hv; 97 Assert(pHv->h MemObjTscPage == NIL_RTR0MEMOBJ);131 Assert(pHv->hSpinlockR0 == NIL_RTSPINLOCK); 98 132 99 /* 100 * Allocate the TSC page. 101 */ 102 int rc = gimR0HvPageAllocZ(&pHv->hMemObjTscPage, &pHv->pvTscPageR0, &pHv->HCPhysTscPage); 103 if (RT_FAILURE(rc)) 104 goto cleanup; 105 #endif 106 107 return VINF_SUCCESS; 108 109 #if 0 110 cleanup: 111 gimR0HvPageFree(&pHv->hMemObjTscPage, &pHv->pvTscPageR0, &pHv->HCPhysTscPage); 133 int rc = RTSpinlockCreate(&pHv->hSpinlockR0, RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, "Hyper-V"); 112 134 return rc; 113 #endif114 135 } 115 136 … … 125 146 AssertPtr(pVM); 126 147 Assert(GIMIsEnabled(pVM)); 127 #if 0 148 128 149 PGIMHV pHv = &pVM->gim.s.u.Hv; 129 gimR0HvPageFree(&pHv->hMemObjTscPage, &pHv->pvTscPageR0, &pHv->HCPhysTscPage); 130 #endif 150 RTSpinlockDestroy(pHv->hSpinlockR0); 151 pHv->hSpinlockR0 = NIL_RTSPINLOCK; 152 131 153 return VINF_SUCCESS; 132 154 } -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r51421 r51643 30 30 #include <VBox/vmm/iom.h> 31 31 #include <VBox/vmm/tm.h> 32 #include <VBox/vmm/gim.h> 32 33 33 34 #ifdef DEBUG_ramshankar … … 2234 2235 static void hmR0SvmUpdateTscOffsetting(PVMCPU pVCpu) 2235 2236 { 2237 bool fParavirtTsc = false; 2236 2238 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 2237 if (TMCpuTickCanUseRealTSC(pVCpu, &pVmcb->ctrl.u64TSCOffset)) 2238 { 2239 uint64_t u64CurTSC = ASMReadTSC(); 2240 if (u64CurTSC + pVmcb->ctrl.u64TSCOffset > TMCpuTickGetLastSeen(pVCpu)) 2239 if (TMCpuTickCanUseRealTSC(pVCpu, &pVmcb->ctrl.u64TSCOffset, &fParavirtTsc)) 2240 { 2241 uint64_t u64CurTSC = ASMReadTSC(); 2242 uint64_t u64LastTick = TMCpuTickGetLastSeen(pVCpu); 2243 if (fParavirtTsc) 2244 { 2245 if (u64CurTSC + pVmcb->ctrl.u64TSCOffset > u64LastTick) 2246 { 2247 pVmcb->ctrl.u64TSCOffset = u64LastTick - u64CurTSC; 2248 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffsetAdjusted); 2249 } 2250 int rc = GIMR0UpdateParavirtTsc(pVCpu->CTX_SUFF(pVM), pVmcb->ctrl.u64TSCOffset); 2251 AssertRC(rc); 2252 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt); 2253 } 2254 2255 if (u64CurTSC + pVmcb->ctrl.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu)) 2241 2256 { 2242 2257 pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC; … … 2253 2268 else 2254 2269 { 2270 Assert(!fParavirtTsc); 2255 2271 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC; 2256 2272 pVmcb->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP; -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r51244 r51643 34 34 #include <VBox/vmm/selm.h> 35 35 #include <VBox/vmm/tm.h> 36 #include <VBox/vmm/gim.h> 36 37 #ifdef VBOX_WITH_REM 37 38 # include <VBox/vmm/rem.h> … … 336 337 * Internal Functions * 337 338 *******************************************************************************/ 338 static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMX _FLUSH_EPT enmFlush);339 static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX _FLUSH_VPID enmFlush, RTGCPTR GCPtr);339 static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush); 340 static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr); 340 341 static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr, 341 342 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntState); … … 1067 1068 if (pMsrs->u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS) 1068 1069 { 1069 hmR0VmxFlushEpt(NULL /* pVCpu */, VMX _FLUSH_EPT_ALL_CONTEXTS);1070 hmR0VmxFlushEpt(NULL /* pVCpu */, VMXFLUSHEPT_ALL_CONTEXTS); 1070 1071 pCpu->fFlushAsidBeforeUse = false; 1071 1072 } … … 1698 1699 * @remarks Can be called with interrupts disabled. 1699 1700 */ 1700 static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMX _FLUSH_EPT enmFlush)1701 static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush) 1701 1702 { 1702 1703 uint64_t au64Descriptor[2]; 1703 if (enmFlush == VMX _FLUSH_EPT_ALL_CONTEXTS)1704 if (enmFlush == VMXFLUSHEPT_ALL_CONTEXTS) 1704 1705 au64Descriptor[0] = 0; 1705 1706 else … … 1734 1735 * @remarks Can be called with interrupts disabled. 1735 1736 */ 1736 static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX _FLUSH_VPID enmFlush, RTGCPTR GCPtr)1737 static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr) 1737 1738 { 1738 1739 NOREF(pVM); … … 1741 1742 1742 1743 uint64_t au64Descriptor[2]; 1743 if (enmFlush == VMX _FLUSH_VPID_ALL_CONTEXTS)1744 if (enmFlush == VMXFLUSHVPID_ALL_CONTEXTS) 1744 1745 { 1745 1746 au64Descriptor[0] = 0; … … 1795 1796 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR) 1796 1797 { 1797 hmR0VmxFlushVpid(pVM, pVCpu, VMX _FLUSH_VPID_INDIV_ADDR, GCVirt);1798 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, GCVirt); 1798 1799 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt); 1799 1800 } … … 1961 1962 { 1962 1963 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++) 1963 hmR0VmxFlushVpid(pVM, pVCpu, VMX _FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);1964 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]); 1964 1965 } 1965 1966 else … … 2120 2121 if (pCpu->fFlushAsidBeforeUse) 2121 2122 { 2122 if (pVM->hm.s.vmx.enmFlushVpid == VMX _FLUSH_VPID_SINGLE_CONTEXT)2123 hmR0VmxFlushVpid(pVM, pVCpu, VMX _FLUSH_VPID_SINGLE_CONTEXT, 0 /* GCPtr */);2124 else if (pVM->hm.s.vmx.enmFlushVpid == VMX _FLUSH_VPID_ALL_CONTEXTS)2123 if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_SINGLE_CONTEXT) 2124 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */); 2125 else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_ALL_CONTEXTS) 2125 2126 { 2126 hmR0VmxFlushVpid(pVM, pVCpu, VMX _FLUSH_VPID_ALL_CONTEXTS, 0 /* GCPtr */);2127 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */); 2127 2128 pCpu->fFlushAsidBeforeUse = false; 2128 2129 } … … 2151 2152 { 2152 2153 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++) 2153 hmR0VmxFlushVpid(pVM, pVCpu, VMX _FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);2154 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]); 2154 2155 } 2155 2156 else … … 2226 2227 { 2227 2228 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT) 2228 pVM->hm.s.vmx.enmFlushEpt = VMX _FLUSH_EPT_SINGLE_CONTEXT;2229 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_SINGLE_CONTEXT; 2229 2230 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS) 2230 pVM->hm.s.vmx.enmFlushEpt = VMX _FLUSH_EPT_ALL_CONTEXTS;2231 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_ALL_CONTEXTS; 2231 2232 else 2232 2233 { 2233 2234 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */ 2234 pVM->hm.s.vmx.enmFlushEpt = VMX _FLUSH_EPT_NOT_SUPPORTED;2235 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED; 2235 2236 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2236 2237 } … … 2240 2241 { 2241 2242 LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.Msrs.u64EptVpidCaps)); 2242 pVM->hm.s.vmx.enmFlushEpt = VMX _FLUSH_EPT_NOT_SUPPORTED;2243 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED; 2243 2244 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2244 2245 } … … 2247 2248 { 2248 2249 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */ 2249 pVM->hm.s.vmx.enmFlushEpt = VMX _FLUSH_EPT_NOT_SUPPORTED;2250 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED; 2250 2251 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2251 2252 } … … 2260 2261 { 2261 2262 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT) 2262 pVM->hm.s.vmx.enmFlushVpid = VMX _FLUSH_VPID_SINGLE_CONTEXT;2263 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_SINGLE_CONTEXT; 2263 2264 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS) 2264 pVM->hm.s.vmx.enmFlushVpid = VMX _FLUSH_VPID_ALL_CONTEXTS;2265 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_ALL_CONTEXTS; 2265 2266 else 2266 2267 { … … 2270 2271 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS) 2271 2272 LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n")); 2272 pVM->hm.s.vmx.enmFlushVpid = VMX _FLUSH_VPID_NOT_SUPPORTED;2273 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED; 2273 2274 pVM->hm.s.vmx.fVpid = false; 2274 2275 } … … 2278 2279 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */ 2279 2280 Log4(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n")); 2280 pVM->hm.s.vmx.enmFlushVpid = VMX _FLUSH_VPID_NOT_SUPPORTED;2281 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED; 2281 2282 pVM->hm.s.vmx.fVpid = false; 2282 2283 } … … 2728 2729 2729 2730 /* Initialize these always, see hmR3InitFinalizeR0().*/ 2730 pVM->hm.s.vmx.enmFlushEpt = VMX _FLUSH_EPT_NONE;2731 pVM->hm.s.vmx.enmFlushVpid = VMX _FLUSH_VPID_NONE;2731 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NONE; 2732 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NONE; 2732 2733 2733 2734 /* Setup the tagged-TLB flush handlers. */ … … 5565 5566 int rc = VERR_INTERNAL_ERROR_5; 5566 5567 bool fOffsettedTsc = false; 5568 bool fParavirtTsc = false; 5567 5569 PVM pVM = pVCpu->CTX_SUFF(pVM); 5568 5570 if (pVM->hm.s.vmx.fUsePreemptTimer) 5569 5571 { 5570 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hm.s.vmx.u64TSCOffset); 5572 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &fParavirtTsc, 5573 &pVCpu->hm.s.vmx.u64TSCOffset); 5571 5574 5572 5575 /* Make sure the returned values have sane upper and lower boundaries. */ … … 5580 5583 } 5581 5584 else 5582 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset); 5585 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fParavirtTsc); 5586 5587 if (fParavirtTsc) 5588 { 5589 uint64_t const u64CurTsc = ASMReadTSC(); 5590 uint64_t const u64LastTick = TMCpuTickGetLastSeen(pVCpu); 5591 if (u64CurTsc + pVCpu->hm.s.vmx.u64TSCOffset < u64LastTick) 5592 { 5593 pVCpu->hm.s.vmx.u64TSCOffset = (u64LastTick - u64CurTsc); 5594 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffsetAdjusted); 5595 } 5596 5597 Assert(u64CurTsc + pVCpu->hm.s.vmx.u64TSCOffset >= u64LastTick); 5598 rc = GIMR0UpdateParavirtTsc(pVM, pVCpu->hm.s.vmx.u64TSCOffset); 5599 if (RT_SUCCESS(rc)) 5600 { 5601 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */ 5602 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); AssertRC(rc); 5603 5604 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT; 5605 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc); 5606 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt); 5607 return; 5608 } 5609 /* else: Shouldn't really fail. If it does, fallback to offsetted TSC mode. */ 5610 } 5583 5611 5584 5612 if (fOffsettedTsc) … … 10358 10386 ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc)); 10359 10387 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr); 10360 10361 10388 if (RT_LIKELY(rc == VINF_SUCCESS)) 10362 10389 { -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r49481 r51643 36 36 #include <VBox/vmm/gvmm.h> 37 37 #include <VBox/vmm/gmm.h> 38 #include <VBox/vmm/gim.h> 38 39 #include <VBox/intnet.h> 39 40 #include <VBox/vmm/hm.h> … … 379 380 if (RT_SUCCESS(rc)) 380 381 { 381 GVMMR0DoneInitVM(pVM); 382 return rc; 382 rc = GIMR0InitVM(pVM); 383 if (RT_SUCCESS(rc)) 384 { 385 GVMMR0DoneInitVM(pVM); 386 return rc; 387 } 388 389 /* bail out*/ 390 #ifdef VBOX_WITH_PCI_PASSTHROUGH 391 PciRawR0TermVM(pVM); 392 #endif 383 393 } 384 394 } 385 386 /* bail out */387 395 } 388 #ifdef VBOX_WITH_PCI_PASSTHROUGH389 PciRawR0TermVM(pVM);390 #endif391 396 HMR0TermVM(pVM); 392 397 } … … 423 428 if (GVMMR0DoingTermVM(pVM, pGVM)) 424 429 { 430 GIMR0TermVM(pVM); 431 425 432 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu]) 426 433 * here to make sure we don't leak any shared pages if we crash... */ -
trunk/src/VBox/VMM/VMMR3/GIM.cpp
r51560 r51643 90 90 */ 91 91 int rc; 92 #if 0 93 rc = SSMR3RegisterInternal(pVM, "GIM", 0, GIM_SSM_VERSION, sizeof(GIM), 94 NULL, NULL, NULL, 95 NULL, gimR3Save, NULL, 96 NULL, gimR3Load, NULL); 92 rc = SSMR3RegisterInternal(pVM, "GIM", 0 /* uInstance */, GIM_SSM_VERSION, sizeof(GIM), 93 NULL /* pfnLivePrep */, NULL /* pfnLiveExec */, NULL /* pfnLiveVote*/, 94 NULL /* pfnSavePrep */, gimR3Save, NULL /* pfnSaveDone */, 95 NULL /* pfnLoadPrep */, gimR3Load, NULL /* pfnLoadDone */); 97 96 if (RT_FAILURE(rc)) 98 97 return rc; 99 #endif100 98 101 99 /* … … 226 224 227 225 /** 228 * Execute statesave operation.226 * Executes state-save operation. 229 227 * 230 228 * @returns VBox status code. … … 234 232 DECLCALLBACK(int) gimR3Save(PVM pVM, PSSMHANDLE pSSM) 235 233 { 236 /** @todo save state. */ 237 return VINF_SUCCESS; 234 AssertReturn(pVM, VERR_INVALID_PARAMETER); 235 AssertReturn(pSSM, VERR_SSM_INVALID_STATE); 236 237 /** @todo Save per-CPU data. */ 238 int rc; 239 #if 0 240 for (VMCPUID i = 0; i < pVM->cCpus; i++) 241 { 242 rc = SSMR3PutXYZ(pSSM, pVM->aCpus[i].gim.s.XYZ); 243 } 244 #endif 245 246 /* 247 * Save per-VM data. 248 */ 249 rc = SSMR3PutBool(pSSM, pVM->gim.s.fEnabled); 250 AssertRCReturn(rc, rc); 251 rc = SSMR3PutU32(pSSM, pVM->gim.s.enmProviderId); 252 AssertRCReturn(rc, rc); 253 rc = SSMR3PutU32(pSSM, pVM->gim.s.u32Version); 254 AssertRCReturn(rc, rc); 255 256 /* 257 * Save provider-specific data. 258 */ 259 if (pVM->gim.s.fEnabled) 260 { 261 switch (pVM->gim.s.enmProviderId) 262 { 263 case GIMPROVIDERID_HYPERV: 264 rc = GIMR3HvSave(pVM, pSSM); 265 AssertRCReturn(rc, rc); 266 break; 267 268 default: 269 break; 270 } 271 } 272 273 return rc; 238 274 } 239 275 … … 250 286 DECLCALLBACK(int) gimR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass) 251 287 { 252 /** @todo load state. */ 253 return VINF_SUCCESS; 288 if (uPass != SSM_PASS_FINAL) 289 return VINF_SUCCESS; 290 291 /** @todo Load per-CPU data. */ 292 int rc; 293 #if 0 294 for (VMCPUID i = 0; i < pVM->cCpus; i++) 295 { 296 rc = SSMR3PutXYZ(pSSM, pVM->aCpus[i].gim.s.XYZ); 297 } 298 #endif 299 300 /* 301 * Load per-VM data. 302 */ 303 rc = SSMR3GetBool(pSSM, &pVM->gim.s.fEnabled); 304 AssertRCReturn(rc, rc); 305 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVM->gim.s.enmProviderId); 306 AssertRCReturn(rc, rc); 307 rc = SSMR3GetU32(pSSM, &pVM->gim.s.u32Version); 308 AssertRCReturn(rc, rc); 309 310 /* 311 * Load provider-specific data. 312 */ 313 if (pVM->gim.s.fEnabled) 314 { 315 switch (pVM->gim.s.enmProviderId) 316 { 317 case GIMPROVIDERID_HYPERV: 318 rc = GIMR3HvLoad(pVM, pSSM, uVersion); 319 AssertRCReturn(rc, rc); 320 break; 321 322 default: 323 break; 324 } 325 } 326 327 return rc; 254 328 } 255 329 … … 266 340 VMMR3_INT_DECL(int) GIMR3Term(PVM pVM) 267 341 { 342 if (!pVM->gim.s.fEnabled) 343 return VINF_SUCCESS; 344 345 switch (pVM->gim.s.enmProviderId) 346 { 347 case GIMPROVIDERID_HYPERV: 348 return GIMR3HvTerm(pVM); 349 350 default: 351 break; 352 } 268 353 return VINF_SUCCESS; 269 354 } … … 350 435 * @param pRegion Pointer to the GIM MMIO2 region. 351 436 */ 352 VMM _INT_DECL(int) GIMR3Mmio2Unmap(PVM pVM, PGIMMMIO2REGION pRegion)437 VMMR3_INT_DECL(int) GIMR3Mmio2Unmap(PVM pVM, PGIMMMIO2REGION pRegion) 353 438 { 354 439 AssertPtr(pVM); … … 359 444 if (pRegion->fMapped) 360 445 { 361 PGMHandlerPhysicalDeregister(pVM, pRegion->GCPhysPage); 362 int rc = PDMDevHlpMMIO2Unmap(pDevIns, pRegion->iRegion, pRegion->GCPhysPage); 446 int rc = PGMHandlerPhysicalDeregister(pVM, pRegion->GCPhysPage); 447 AssertRC(rc); 448 449 rc = PDMDevHlpMMIO2Unmap(pDevIns, pRegion->iRegion, pRegion->GCPhysPage); 363 450 if (RT_SUCCESS(rc)) 364 451 { … … 372 459 373 460 /** 374 * Write access handler for a mapped MMIO2 region that presently ignores writes. 461 * Write access handler for a mapped MMIO2 region. At present, this handler 462 * simply ignores writes. 463 * 464 * In the future we might want to let the GIM provider decide what the handler 465 * should do (like throwing #GP faults). 375 466 * 376 467 * @returns VBox status code. … … 383 474 * @param pvUser User argument (NULL, not used). 384 475 */ 385 static DECLCALLBACK(int) gimR3Mmio2 PageWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,386 PGMACCESSTYPE enmAccessType, void *pvUser)476 static DECLCALLBACK(int) gimR3Mmio2WriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, 477 PGMACCESSTYPE enmAccessType, void *pvUser) 387 478 { 388 479 /* … … 402 493 * @param pRegion Pointer to the GIM MMIO2 region. 403 494 * @param GCPhysRegion Where in the guest address space to map the region. 404 * @param pszDesc Description of the region being mapped. 405 */ 406 VMM_INT_DECL(int) GIMR3Mmio2Map(PVM pVM, PGIMMMIO2REGION pRegion, RTGCPHYS GCPhysRegion, const char *pszDesc) 495 */ 496 VMMR3_INT_DECL(int) GIMR3Mmio2Map(PVM pVM, PGIMMMIO2REGION pRegion, RTGCPHYS GCPhysRegion) 407 497 { 408 498 PPDMDEVINS pDevIns = pVM->gim.s.pDevInsR3; … … 412 502 if (GCPhysRegion & PAGE_OFFSET_MASK) 413 503 { 414 LogFunc(("%s: %#RGp not paging aligned\n", p szDesc, GCPhysRegion));504 LogFunc(("%s: %#RGp not paging aligned\n", pRegion->szDescription, GCPhysRegion)); 415 505 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS; 416 506 } … … 421 511 if (!PGMPhysIsGCPhysNormal(pVM, GCPhysRegion)) 422 512 { 423 LogFunc(("%s: %#RGp is not normal memory\n", p szDesc, GCPhysRegion));513 LogFunc(("%s: %#RGp is not normal memory\n", pRegion->szDescription, GCPhysRegion)); 424 514 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS; 425 515 } 426 516 427 if ( pRegion->fMapped)428 { 429 LogFunc(("%s: A mapping for %#RGp already exists.\n", pszDesc, GCPhysRegion));430 return VERR_ PGM_MAPPING_CONFLICT;431 } 432 433 /* 434 * Map the MMIO2 region over the guest-physical address.517 if (!pRegion->fRegistered) 518 { 519 LogFunc(("%s: Region has not been registered.\n")); 520 return VERR_GIM_IPE_1; 521 } 522 523 /* 524 * Map the MMIO2 region over the specified guest-physical address. 435 525 */ 436 526 int rc = PDMDevHlpMMIO2Map(pDevIns, pRegion->iRegion, GCPhysRegion); … … 443 533 PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, 444 534 GCPhysRegion, GCPhysRegion + (pRegion->cbRegion - 1), 445 gimR3Mmio2 PageWriteHandler, NULL /* pvUserR3 */,535 gimR3Mmio2WriteHandler, NULL /* pvUserR3 */, 446 536 NULL /* pszModR0 */, NULL /* pszHandlerR0 */, NIL_RTR0PTR /* pvUserR0 */, 447 537 NULL /* pszModRC */, NULL /* pszHandlerRC */, NIL_RTRCPTR /* pvUserRC */, 448 p szDesc);538 pRegion->szDescription); 449 539 if (RT_SUCCESS(rc)) 450 540 { 451 541 pRegion->fMapped = true; 452 542 pRegion->GCPhysPage = GCPhysRegion; 453 return VINF_SUCCESS;543 return rc; 454 544 } 455 545 … … 460 550 } 461 551 552 #if 0 553 /** 554 * Registers the physical handler for the registered and mapped MMIO2 region. 555 * 556 * @returns VBox status code. 557 * @param pVM Pointer to the VM. 558 * @param pRegion Pointer to the GIM MMIO2 region. 559 */ 560 VMMR3_INT_DECL(int) GIMR3Mmio2HandlerPhysicalRegister(PVM pVM, PGIMMMIO2REGION pRegion) 561 { 562 AssertPtr(pRegion); 563 AssertReturn(pRegion->fRegistered, VERR_GIM_IPE_2); 564 AssertReturn(pRegion->fMapped, VERR_GIM_IPE_3); 565 566 return PGMR3HandlerPhysicalRegister(pVM, 567 PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, 568 pRegion->GCPhysPage, pRegion->GCPhysPage + (pRegion->cbRegion - 1), 569 gimR3Mmio2WriteHandler, NULL /* pvUserR3 */, 570 NULL /* pszModR0 */, NULL /* pszHandlerR0 */, NIL_RTR0PTR /* pvUserR0 */, 571 NULL /* pszModRC */, NULL /* pszHandlerRC */, NIL_RTRCPTR /* pvUserRC */, 572 pRegion->szDescription); 573 } 574 575 576 /** 577 * Deregisters the physical handler for the MMIO2 region. 578 * 579 * @returns VBox status code. 580 * @param pVM Pointer to the VM. 581 * @param pRegion Pointer to the GIM MMIO2 region. 582 */ 583 VMMR3_INT_DECL(int) GIMR3Mmio2HandlerPhysicalDeregister(PVM pVM, PGIMMMIO2REGION pRegion) 584 { 585 return PGMHandlerPhysicalDeregister(pVM, pRegion->GCPhysPage); 586 } 587 #endif 588 -
trunk/src/VBox/VMM/VMMR3/GIMHv.cpp
r51560 r51643 26 26 #include <iprt/string.h> 27 27 #include <iprt/mem.h> 28 #include <iprt/spinlock.h> 28 29 29 30 #include <VBox/vmm/cpum.h> 31 #include <VBox/vmm/ssm.h> 30 32 #include <VBox/vmm/vm.h> 31 33 #include <VBox/vmm/hm.h> … … 111 113 PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX]; 112 114 pRegion->iRegion = GIM_HV_HYPERCALL_PAGE_REGION_IDX; 115 pRegion->fRCMapping = false; 113 116 pRegion->cbRegion = PAGE_SIZE; 114 117 pRegion->GCPhysPage = NIL_RTGCPHYS; 115 RTStrCopy(pRegion->szDescription, sizeof(pRegion->szDescription), "Hypercall Page"); 116 Assert(!pRegion->fRCMapping); 117 Assert(!pRegion->fMapped); 118 RTStrCopy(pRegion->szDescription, sizeof(pRegion->szDescription), "Hyper-V hypercall page"); 118 119 119 120 pRegion = &pHv->aMmio2Regions[GIM_HV_REF_TSC_PAGE_REGION_IDX]; 120 121 pRegion->iRegion = GIM_HV_REF_TSC_PAGE_REGION_IDX; 122 pRegion->fRCMapping = false; 121 123 pRegion->cbRegion = PAGE_SIZE; 122 124 pRegion->GCPhysPage = NIL_RTGCPHYS; 123 RTStrCopy(pRegion->szDescription, sizeof(pRegion->szDescription), " TSC Page");125 RTStrCopy(pRegion->szDescription, sizeof(pRegion->szDescription), "Hyper-V TSC page"); 124 126 125 127 /* … … 219 221 220 222 223 VMMR3_INT_DECL(int) GIMR3HvTerm(PVM pVM) 224 { 225 GIMR3HvReset(pVM); 226 return VINF_SUCCESS; 227 } 228 229 221 230 VMMR3_INT_DECL(void) GIMR3HvRelocate(PVM pVM, RTGCINTPTR offDelta) 222 231 { … … 239 248 * Unmap MMIO2 pages that the guest may have setup. 240 249 */ 250 LogRelFunc(("Resetting Hyper-V MMIO2 regions and MSRs...\n")); 241 251 PGIMHV pHv = &pVM->gim.s.u.Hv; 242 252 for (unsigned i = 0; i < RT_ELEMENTS(pHv->aMmio2Regions); i++) … … 272 282 } 273 283 284 285 /** 286 * Hyper-V state-save operation. 287 * 288 * @returns VBox status code. 289 * @param pVM Pointer to the VM. 290 * @param pSSM Pointer to the SSM handle. 291 */ 292 VMMR3_INT_DECL(int) GIMR3HvSave(PVM pVM, PSSMHANDLE pSSM) 293 { 294 PCGIMHV pcHv = &pVM->gim.s.u.Hv; 295 296 /** @todo Save per-VCPU data. */ 297 298 /* 299 * Save per-VM MSRs. 300 */ 301 int rc = SSMR3PutU64(pSSM, pcHv->u64GuestOsIdMsr); AssertRCReturn(rc, rc); 302 rc = SSMR3PutU64(pSSM, pcHv->u64HypercallMsr); AssertRCReturn(rc, rc); 303 rc = SSMR3PutU64(pSSM, pcHv->u64TscPageMsr); AssertRCReturn(rc, rc); 304 305 /* 306 * Save Hyper-V features / capabilities. 307 */ 308 rc = SSMR3PutU32(pSSM, pcHv->uBaseFeat); AssertRCReturn(rc, rc); 309 rc = SSMR3PutU32(pSSM, pcHv->uPartFlags); AssertRCReturn(rc, rc); 310 rc = SSMR3PutU32(pSSM, pcHv->uPowMgmtFeat); AssertRCReturn(rc, rc); 311 rc = SSMR3PutU32(pSSM, pcHv->uMiscFeat); AssertRCReturn(rc, rc); 312 rc = SSMR3PutU32(pSSM, pcHv->uHyperHints); AssertRCReturn(rc, rc); 313 314 /* 315 * Save per-VM MMIO2 regions. 316 */ 317 rc = SSMR3PutU32(pSSM, RT_ELEMENTS(pcHv->aMmio2Regions)); 318 for (unsigned i = 0; i < RT_ELEMENTS(pcHv->aMmio2Regions); i++) 319 { 320 /* Save the fields necessary to remap the regions upon load.*/ 321 PCGIMMMIO2REGION pcRegion = &pcHv->aMmio2Regions[i]; 322 rc = SSMR3PutU8(pSSM, pcRegion->iRegion); AssertRCReturn(rc, rc); 323 rc = SSMR3PutBool(pSSM, pcRegion->fRCMapping); AssertRCReturn(rc, rc); 324 rc = SSMR3PutU32(pSSM, pcRegion->cbRegion); AssertRCReturn(rc, rc); 325 rc = SSMR3PutGCPhys(pSSM, pcRegion->GCPhysPage); AssertRCReturn(rc, rc); 326 rc = SSMR3PutStrZ(pSSM, pcRegion->szDescription); AssertRCReturn(rc, rc); 327 } 328 329 return VINF_SUCCESS; 330 } 331 332 333 /** 334 * Hyper-V state-load operation, final pass. 335 * 336 * @returns VBox status code. 337 * @param pVM Pointer to the VM. 338 * @param pSSM Pointer to the SSM handle. 339 * @param uSSMVersion The saved-state version. 340 */ 341 VMMR3_INT_DECL(int) GIMR3HvLoad(PVM pVM, PSSMHANDLE pSSM, uint32_t uSSMVersion) 342 { 343 PGIMHV pHv = &pVM->gim.s.u.Hv; 344 345 /** @todo Load per-VCPU data. */ 346 347 /* 348 * Load per-VM MSRs. 349 */ 350 int rc = SSMR3GetU64(pSSM, &pHv->u64GuestOsIdMsr); AssertRCReturn(rc, rc); 351 rc = SSMR3GetU64(pSSM, &pHv->u64HypercallMsr); AssertRCReturn(rc, rc); 352 rc = SSMR3GetU64(pSSM, &pHv->u64TscPageMsr); AssertRCReturn(rc, rc); 353 354 /* 355 * Save Hyper-V features / capabilities. 356 */ 357 rc = SSMR3GetU32(pSSM, &pHv->uBaseFeat); AssertRCReturn(rc, rc); 358 rc = SSMR3GetU32(pSSM, &pHv->uPartFlags); AssertRCReturn(rc, rc); 359 rc = SSMR3GetU32(pSSM, &pHv->uPowMgmtFeat); AssertRCReturn(rc, rc); 360 rc = SSMR3GetU32(pSSM, &pHv->uMiscFeat); AssertRCReturn(rc, rc); 361 rc = SSMR3GetU32(pSSM, &pHv->uHyperHints); AssertRCReturn(rc, rc); 362 363 /* 364 * Load per-VM MMIO2 regions. 365 */ 366 uint32_t cRegions; 367 rc = SSMR3GetU32(pSSM, &cRegions); 368 if (cRegions != RT_ELEMENTS(pHv->aMmio2Regions)) 369 { 370 LogRelFunc(("MMIO2 region array size mismatch. size=%u expected=%u\n", cRegions, RT_ELEMENTS(pHv->aMmio2Regions))); 371 return VERR_SSM_FIELD_INVALID_VALUE; 372 } 373 374 for (unsigned i = 0; i < RT_ELEMENTS(pHv->aMmio2Regions); i++) 375 { 376 /* The regions would have been registered while constructing the GIM device. */ 377 PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[i]; 378 rc = SSMR3GetU8(pSSM, &pRegion->iRegion); AssertRCReturn(rc, rc); 379 rc = SSMR3GetBool(pSSM, &pRegion->fRCMapping); AssertRCReturn(rc, rc); 380 rc = SSMR3GetU32(pSSM, &pRegion->cbRegion); AssertRCReturn(rc, rc); 381 rc = SSMR3GetGCPhys(pSSM, &pRegion->GCPhysPage); AssertRCReturn(rc, rc); 382 rc = SSMR3GetStrZ(pSSM, pRegion->szDescription, sizeof(pRegion->szDescription)); 383 AssertRCReturn(rc, rc); 384 } 385 386 /* 387 * Enable the Hypercall-page. 388 */ 389 PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX]; 390 if (MSR_GIM_HV_HYPERCALL_IS_ENABLED(pHv->u64HypercallMsr)) 391 { 392 Assert(pRegion->GCPhysPage != NIL_RTGCPHYS); 393 if (pRegion->fRegistered) 394 { 395 rc = GIMR3HvEnableHypercallPage(pVM, pRegion->GCPhysPage); 396 if (RT_FAILURE(rc)) 397 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to enable the hypercall page. GCPhys=%#RGp rc=%Rrc"), 398 pRegion->GCPhysPage, rc); 399 } 400 else 401 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Hypercall MMIO2 region not registered. Missing GIM device?!")); 402 } 403 404 /* 405 * Enable the TSC-page. 406 */ 407 pRegion = &pHv->aMmio2Regions[GIM_HV_REF_TSC_PAGE_REGION_IDX]; 408 if (MSR_GIM_HV_REF_TSC_IS_ENABLED(pHv->u64TscPageMsr)) 409 { 410 Assert(pRegion->GCPhysPage != NIL_RTGCPHYS); 411 if (pRegion->fRegistered) 412 { 413 rc = GIMR3HvEnableTscPage(pVM, pRegion->GCPhysPage); 414 if (RT_FAILURE(rc)) 415 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to enable the TSC page. GCPhys=%#RGp rc=%Rrc"), 416 pRegion->GCPhysPage, rc); 417 } 418 else 419 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("TSC-page MMIO2 region not registered. Missing GIM device?!")); 420 } 421 422 return rc; 423 } 424 425 426 /** 427 * Enables the Hyper-V TSC page. 428 * 429 * @returns VBox status code. 430 * @param pVM Pointer to the VM. 431 * @param GCPhysTscPage Where to map the TSC page. 432 */ 433 VMMR3_INT_DECL(int) GIMR3HvEnableTscPage(PVM pVM, RTGCPHYS GCPhysTscPage) 434 { 435 PPDMDEVINSR3 pDevIns = pVM->gim.s.pDevInsR3; 436 PGIMMMIO2REGION pRegion = &pVM->gim.s.u.Hv.aMmio2Regions[GIM_HV_REF_TSC_PAGE_REGION_IDX]; 437 AssertPtrReturn(pDevIns, VERR_GIM_DEVICE_NOT_REGISTERED); 438 439 int rc; 440 if (pRegion->fMapped) 441 { 442 /* 443 * Is it already enabled at the given guest-address? 444 */ 445 if (pRegion->GCPhysPage == GCPhysTscPage) 446 return VINF_SUCCESS; 447 448 /* 449 * If it's mapped at a different address, unmap the previous address. 450 */ 451 rc = GIMR3HvDisableTscPage(pVM); 452 AssertRC(rc); 453 } 454 455 /* 456 * Map the TSC-page at the specified address. 457 */ 458 Assert(!pRegion->fMapped); 459 rc = GIMR3Mmio2Map(pVM, pRegion, GCPhysTscPage); 460 if (RT_SUCCESS(rc)) 461 { 462 Assert(pRegion->GCPhysPage == GCPhysTscPage); 463 464 /* 465 * Update the TSC scale. Windows guests expect a non-zero TSC sequence, otherwise 466 * they fallback to using the reference count MSR which is not ideal in terms of VM-exits. 467 * 468 * Also, Hyper-V normalizes the time in 10 MHz, see: 469 * http://technet.microsoft.com/it-it/sysinternals/dn553408%28v=vs.110%29 470 */ 471 PGIMHVREFTSC pRefTsc = (PGIMHVREFTSC)pRegion->pvPageR3; 472 Assert(pRefTsc); 473 474 uint64_t const u64TscKHz = TMCpuTicksPerSecond(pVM) / UINT64_C(1000); 475 pRefTsc->u32TscSequence = 1; 476 //pRefTsc->u64TscScale = ((UINT64_C(10000) << 32) / u64TscKHz) << 32; 477 pRefTsc->u64TscScale = 0xf4000000000000; 478 479 LogRel(("GIM: HyperV: Enabled TSC page at %#RGp (u64TscScale=%#RX64 u64TscKHz=%#RX64)\n", GCPhysTscPage, 480 pRefTsc->u64TscScale, u64TscKHz)); 481 return VINF_SUCCESS; 482 } 483 else 484 LogRelFunc(("GIMR3Mmio2Map failed. rc=%Rrc\n", rc)); 485 486 return VERR_GIM_OPERATION_FAILED; 487 } 488 489 490 /** 491 * Disables the Hyper-V TSC page. 492 * 493 * @returns VBox status code. 494 * @param pVM Pointer to the VM. 495 */ 496 VMMR3_INT_DECL(int) GIMR3HvDisableTscPage(PVM pVM) 497 { 498 PGIMHV pHv = &pVM->gim.s.u.Hv; 499 PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[GIM_HV_REF_TSC_PAGE_REGION_IDX]; 500 if (pRegion->fMapped) 501 { 502 GIMR3Mmio2Unmap(pVM, pRegion); 503 Assert(!pRegion->fMapped); 504 LogRel(("GIM: HyperV: Disabled TSC-page\n")); 505 return VINF_SUCCESS; 506 } 507 return VERR_GIM_PVTSC_NOT_ENABLED; 508 } 509 510 511 /** 512 * Disables the Hyper-V Hypercall page. 513 * 514 * @returns VBox status code. 515 */ 516 VMMR3_INT_DECL(int) GIMR3HvDisableHypercallPage(PVM pVM) 517 { 518 PGIMHV pHv = &pVM->gim.s.u.Hv; 519 PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX]; 520 if (pRegion->fMapped) 521 { 522 GIMR3Mmio2Unmap(pVM, pRegion); 523 Assert(!pRegion->fMapped); 524 LogRel(("GIM: HyperV: Disabled Hypercall-page\n")); 525 return VINF_SUCCESS; 526 } 527 return VERR_GIM_HYPERCALLS_NOT_ENABLED; 528 } 529 530 531 /** 532 * Enables the Hyper-V Hypercall page. 533 * 534 * @returns VBox status code. 535 * @param pVM Pointer to the VM. 536 * @param GCPhysHypercallPage Where to map the hypercall page. 537 */ 538 VMMR3_INT_DECL(int) GIMR3HvEnableHypercallPage(PVM pVM, RTGCPHYS GCPhysHypercallPage) 539 { 540 PPDMDEVINSR3 pDevIns = pVM->gim.s.pDevInsR3; 541 PGIMMMIO2REGION pRegion = &pVM->gim.s.u.Hv.aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX]; 542 AssertPtrReturn(pDevIns, VERR_GIM_DEVICE_NOT_REGISTERED); 543 544 if (pRegion->fMapped) 545 { 546 /* 547 * Is it already enabled at the given guest-address? 548 */ 549 if (pRegion->GCPhysPage == GCPhysHypercallPage) 550 return VINF_SUCCESS; 551 552 /* 553 * If it's mapped at a different address, unmap the previous address. 554 */ 555 int rc2 = GIMR3HvDisableHypercallPage(pVM); 556 AssertRC(rc2); 557 } 558 559 /* 560 * Map the hypercall-page at the specified address. 561 */ 562 Assert(!pRegion->fMapped); 563 int rc = GIMR3Mmio2Map(pVM, pRegion, GCPhysHypercallPage); 564 if (RT_SUCCESS(rc)) 565 { 566 Assert(pRegion->GCPhysPage == GCPhysHypercallPage); 567 568 /* 569 * Patch the hypercall-page. 570 */ 571 if (HMIsEnabled(pVM)) 572 { 573 size_t cbWritten = 0; 574 rc = HMPatchHypercall(pVM, pRegion->pvPageR3, PAGE_SIZE, &cbWritten); 575 if ( RT_SUCCESS(rc) 576 && cbWritten < PAGE_SIZE - 1) 577 { 578 uint8_t *pbLast = (uint8_t *)pRegion->pvPageR3 + cbWritten; 579 *pbLast = 0xc3; /* RET */ 580 581 LogRel(("GIM: HyperV: Enabled hypercalls at %#RGp\n", GCPhysHypercallPage)); 582 return VINF_SUCCESS; 583 } 584 else 585 LogRelFunc(("HMPatchHypercall failed. rc=%Rrc cbWritten=%u\n", rc, cbWritten)); 586 } 587 else 588 { 589 /** @todo Handle raw-mode hypercall page patching. */ 590 LogRelFunc(("Raw-mode not yet implemented!\n")); 591 } 592 GIMR3Mmio2Unmap(pVM, pRegion); 593 } 594 else 595 LogRelFunc(("GIMR3Mmio2Map failed. rc=%Rrc\n", rc)); 596 597 return rc; 598 } 599 -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r51220 r51643 733 733 HM_REG_COUNTER(&pVCpu->hm.s.StatTlbShootdownFlush, "/HM/CPU%d/Flush/Shootdown/TLB", "Inter-VCPU request to flush entire guest-TLB."); 734 734 735 HM_REG_COUNTER(&pVCpu->hm.s.StatTscOffsetAdjusted, "/HM/CPU%d/TSC/OffsetAdjusted", "TSC offset overflowed for paravirt. TSC. Fudged."); 736 HM_REG_COUNTER(&pVCpu->hm.s.StatTscParavirt, "/HM/CPU%d/TSC/Paravirt", "Paravirtualized TSC in effect."); 735 737 HM_REG_COUNTER(&pVCpu->hm.s.StatTscOffset, "/HM/CPU%d/TSC/Offset", "TSC offsetting is in effect."); 736 738 HM_REG_COUNTER(&pVCpu->hm.s.StatTscIntercept, "/HM/CPU%d/TSC/Intercept", "Guest is in catchup mode, intercept TSC accesses."); … … 1172 1174 { 1173 1175 CPUMClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP); 1174 LogRel(("HM: RDTSCP disabled .\n"));1176 LogRel(("HM: RDTSCP disabled\n")); 1175 1177 } 1176 1178 … … 1241 1243 1242 1244 LogRel((pVM->hm.s.fAllow64BitGuests 1243 ? "HM: Guest support: 32-bit and 64-bit .\n"1244 : "HM: Guest support: 32-bit only .\n"));1245 ? "HM: Guest support: 32-bit and 64-bit\n" 1246 : "HM: Guest support: 32-bit only\n")); 1245 1247 1246 1248 /* … … 1287 1289 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX); 1288 1290 else 1289 LogRel(("HM: NX not enabled on the host, unavailable to PAE guest .\n"));1291 LogRel(("HM: NX not enabled on the host, unavailable to PAE guest\n")); 1290 1292 } 1291 1293 … … 1296 1298 { 1297 1299 LogRel(("HM: Nested paging enabled!\n")); 1298 if (pVM->hm.s.vmx.enmFlushEpt == VMX _FLUSH_EPT_SINGLE_CONTEXT)1299 LogRel(("HM: EPT flush type = VMX _FLUSH_EPT_SINGLE_CONTEXT\n"));1300 else if (pVM->hm.s.vmx.enmFlushEpt == VMX _FLUSH_EPT_ALL_CONTEXTS)1301 LogRel(("HM: EPT flush type = VMX _FLUSH_EPT_ALL_CONTEXTS\n"));1302 else if (pVM->hm.s.vmx.enmFlushEpt == VMX _FLUSH_EPT_NOT_SUPPORTED)1303 LogRel(("HM: EPT flush type = VMX _FLUSH_EPT_NOT_SUPPORTED\n"));1300 if (pVM->hm.s.vmx.enmFlushEpt == VMXFLUSHEPT_SINGLE_CONTEXT) 1301 LogRel(("HM: EPT flush type = VMXFLUSHEPT_SINGLE_CONTEXT\n")); 1302 else if (pVM->hm.s.vmx.enmFlushEpt == VMXFLUSHEPT_ALL_CONTEXTS) 1303 LogRel(("HM: EPT flush type = VMXFLUSHEPT_ALL_CONTEXTS\n")); 1304 else if (pVM->hm.s.vmx.enmFlushEpt == VMXFLUSHEPT_NOT_SUPPORTED) 1305 LogRel(("HM: EPT flush type = VMXFLUSHEPT_NOT_SUPPORTED\n")); 1304 1306 else 1305 1307 LogRel(("HM: EPT flush type = %d\n", pVM->hm.s.vmx.enmFlushEpt)); … … 1313 1315 /* Use large (2 MB) pages for our EPT PDEs where possible. */ 1314 1316 PGMSetLargePageUsage(pVM, true); 1315 LogRel(("HM: Large page support enabled !\n"));1317 LogRel(("HM: Large page support enabled\n")); 1316 1318 } 1317 1319 #endif … … 1323 1325 { 1324 1326 LogRel(("HM: VPID enabled!\n")); 1325 if (pVM->hm.s.vmx.enmFlushVpid == VMX _FLUSH_VPID_INDIV_ADDR)1326 LogRel(("HM: VPID flush type = VMX _FLUSH_VPID_INDIV_ADDR\n"));1327 else if (pVM->hm.s.vmx.enmFlushVpid == VMX _FLUSH_VPID_SINGLE_CONTEXT)1328 LogRel(("HM: VPID flush type = VMX _FLUSH_VPID_SINGLE_CONTEXT\n"));1329 else if (pVM->hm.s.vmx.enmFlushVpid == VMX _FLUSH_VPID_ALL_CONTEXTS)1330 LogRel(("HM: VPID flush type = VMX _FLUSH_VPID_ALL_CONTEXTS\n"));1331 else if (pVM->hm.s.vmx.enmFlushVpid == VMX _FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS)1332 LogRel(("HM: VPID flush type = VMX _FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS\n"));1327 if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_INDIV_ADDR) 1328 LogRel(("HM: VPID flush type = VMXFLUSHVPID_INDIV_ADDR\n")); 1329 else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_SINGLE_CONTEXT) 1330 LogRel(("HM: VPID flush type = VMXFLUSHVPID_SINGLE_CONTEXT\n")); 1331 else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_ALL_CONTEXTS) 1332 LogRel(("HM: VPID flush type = VMXFLUSHVPID_ALL_CONTEXTS\n")); 1333 else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS) 1334 LogRel(("HM: VPID flush type = VMXFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS\n")); 1333 1335 else 1334 1336 LogRel(("HM: VPID flush type = %d\n", pVM->hm.s.vmx.enmFlushVpid)); 1335 1337 } 1336 else if (pVM->hm.s.vmx.enmFlushVpid == VMX _FLUSH_VPID_NOT_SUPPORTED)1337 LogRel(("HM: Ignoring VPID capabilities of CPU .\n"));1338 else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_NOT_SUPPORTED) 1339 LogRel(("HM: Ignoring VPID capabilities of CPU\n")); 1338 1340 1339 1341 /* … … 1347 1349 } 1348 1350 if (pVM->hm.s.vmx.fUsePreemptTimer) 1349 LogRel(("HM: VMX-preemption timer enabled (cPreemptTimerShift=%u) .\n", pVM->hm.s.vmx.cPreemptTimerShift));1351 LogRel(("HM: VMX-preemption timer enabled (cPreemptTimerShift=%u)\n", pVM->hm.s.vmx.cPreemptTimerShift)); 1350 1352 else 1351 LogRel(("HM: VMX-preemption timer disabled .\n"));1353 LogRel(("HM: VMX-preemption timer disabled\n")); 1352 1354 1353 1355 return VINF_SUCCESS; … … 1465 1467 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX); 1466 1468 1467 LogRel(("HM: TPR patching %s .\n", (pVM->hm.s.fTprPatchingAllowed) ? "enabled" : "disabled"));1469 LogRel(("HM: TPR patching %s\n", (pVM->hm.s.fTprPatchingAllowed) ? "enabled" : "disabled")); 1468 1470 1469 1471 LogRel((pVM->hm.s.fAllow64BitGuests 1470 ? "HM: Guest support: 32-bit and 64-bit .\n"1471 : "HM: Guest support: 32-bit only .\n"));1472 ? "HM: Guest support: 32-bit and 64-bit\n" 1473 : "HM: Guest support: 32-bit only\n")); 1472 1474 1473 1475 return VINF_SUCCESS; -
trunk/src/VBox/VMM/include/GIMHvInternal.h
r51563 r51643 389 389 AssertCompile(MSR_GIM_HV_RANGE11_START <= MSR_GIM_HV_RANGE11_END); 390 390 391 /** @name Hyper-V MSR - Reset (MSR_GIM_HV_RESET). 392 * @{ 393 */ 394 /** The hypercall enable bit. */ 395 #define MSR_GIM_HV_RESET_BIT RT_BIT_64(0) 396 /** Whether the hypercall-page is enabled or not. */ 397 #define MSR_GIM_HV_RESET_IS_SET(a) RT_BOOL((a) & MSR_GIM_HV_RESET_BIT) 398 /** @} */ 391 399 392 400 /** @name Hyper-V MSR - Hypercall (MSR_GIM_HV_HYPERCALL). … … 434 442 uint32_t uReserved0; 435 443 uint64_t volatile u64TscScale; 436 uint64_t volatile u64TscOffset;444 int64_t volatile i64TscOffset; 437 445 } GIMHVTSCPAGE; 438 /** Pointer to GIM VMCPU instance data. */446 /** Pointer to Hyper-V reference TSC. */ 439 447 typedef GIMHVREFTSC *PGIMHVREFTSC; 448 /** Pointer to a const Hyper-V reference TSC. */ 449 typedef GIMHVREFTSC const *PCGIMHVREFTSC; 450 440 451 441 452 /** … … 465 476 uint32_t u32Alignment0; 466 477 478 /** Per-VM R0 Spinlock for protecting EMT writes to the TSC page. */ 479 RTSPINLOCK hSpinlockR0; 480 #if HC_ARCH_BITS == 32 481 uint32_t u32Alignment1; 482 #endif 483 467 484 /** Array of MMIO2 regions. */ 468 485 GIMMMIO2REGION aMmio2Regions[GIM_HV_REGION_IDX_MAX + 1]; … … 473 490 typedef GIMHV const *PCGIMHV; 474 491 AssertCompileMemberAlignment(GIMHV, aMmio2Regions, 8); 492 AssertCompileMemberAlignment(GIMHV, hSpinlockR0, sizeof(uintptr_t)); 475 493 476 494 RT_C_DECLS_BEGIN … … 479 497 VMMR0_INT_DECL(int) GIMR0HvInitVM(PVM pVM); 480 498 VMMR0_INT_DECL(int) GIMR0HvTermVM(PVM pVM); 499 VMMR0_INT_DECL(int) GIMR0HvUpdateParavirtTsc(PVM pVM, uint64_t u64Offset); 481 500 #endif /* IN_RING0 */ 482 501 483 502 #ifdef IN_RING3 484 503 VMMR3_INT_DECL(int) GIMR3HvInit(PVM pVM); 504 VMMR3_INT_DECL(int) GIMR3HvTerm(PVM pVM); 485 505 VMMR3_INT_DECL(void) GIMR3HvRelocate(PVM pVM, RTGCINTPTR offDelta); 486 506 VMMR3_INT_DECL(void) GIMR3HvReset(PVM pVM); 487 507 VMMR3_INT_DECL(PGIMMMIO2REGION) GIMR3HvGetMmio2Regions(PVM pVM, uint32_t *pcRegions); 508 VMMR3_INT_DECL(int) GIMR3HvSave(PVM pVM, PSSMHANDLE pSSM); 509 VMMR3_INT_DECL(int) GIMR3HvLoad(PVM pVM, PSSMHANDLE pSSM, uint32_t uSSMVersion); 510 511 VMMR3_INT_DECL(int) GIMR3HvDisableTscPage(PVM pVM); 512 VMMR3_INT_DECL(int) GIMR3HvEnableTscPage(PVM pVM, RTGCPHYS GCPhysTscPage); 513 VMMR3_INT_DECL(int) GIMR3HvDisableHypercallPage(PVM pVM); 514 VMMR3_INT_DECL(int) GIMR3HvEnableHypercallPage(PVM pVM, RTGCPHYS GCPhysHypercallPage); 488 515 #endif /* IN_RING3 */ 489 516 490 517 VMM_INT_DECL(bool) GIMHvIsParavirtTscEnabled(PVM pVM); 491 VMM_INT_DECL(int) GIMHvUpdateParavirtTsc(PVM pVM, uint64_t u64Offset);492 518 VMM_INT_DECL(int) GIMHvHypercall(PVMCPU pVCpu, PCPUMCTX pCtx); 493 519 VMM_INT_DECL(int) GIMHvReadMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue); -
trunk/src/VBox/VMM/include/GIMInternal.h
r51560 r51643 29 29 * @{ 30 30 */ 31 32 /** The saved state version. */ 33 #define GIM_SSM_VERSION 1 31 34 32 35 /** … … 89 92 90 93 #ifdef IN_RING3 91 VMM_INT_DECL(int) GIMR3Mmio2Unmap(PVM pVM, PGIMMMIO2REGION pRegion); 92 VMM_INT_DECL(int) GIMR3Mmio2Map(PVM pVM, PGIMMMIO2REGION pRegion, RTGCPHYS GCPhysRegion, const char *pszDesc); 94 VMMR3_INT_DECL(int) GIMR3Mmio2Unmap(PVM pVM, PGIMMMIO2REGION pRegion); 95 VMMR3_INT_DECL(int) GIMR3Mmio2Map(PVM pVM, PGIMMMIO2REGION pRegion, RTGCPHYS GCPhysRegion); 96 VMMR3_INT_DECL(int) GIMR3Mmio2HandlerPhysicalRegister(PVM pVM, PGIMMMIO2REGION pRegion); 97 VMMR3_INT_DECL(int) GIMR3Mmio2HandlerPhysicalDeregister(PVM pVM, PGIMMMIO2REGION pRegion); 93 98 #endif /* IN_RING3 */ 94 99 -
trunk/src/VBox/VMM/include/HMInternal.h
r51220 r51643 5 5 6 6 /* 7 * Copyright (C) 2006-201 3Oracle Corporation7 * Copyright (C) 2006-2014 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 313 313 /** Set when we've initialized VMX or SVM. */ 314 314 bool fInitialized; 315 316 315 /** Set if nested paging is enabled. */ 317 316 bool fNestedPaging; 318 319 317 /** Set if nested paging is allowed. */ 320 318 bool fAllowNestedPaging; 321 322 319 /** Set if large pages are enabled (requires nested paging). */ 323 320 bool fLargePages; 324 325 321 /** Set if we can support 64-bit guests or not. */ 326 322 bool fAllow64BitGuests; 327 328 323 /** Set if an IO-APIC is configured for this VM. */ 329 324 bool fHasIoApic; 330 331 325 /** Set when TPR patching is allowed. */ 332 326 bool fTprPatchingAllowed; 333 334 327 /** Set when we initialize VT-x or AMD-V once for all CPUs. */ 335 328 bool fGlobalInit; 336 337 329 /** Set when TPR patching is active. */ 338 330 bool fTPRPatchingActive; … … 341 333 /** Maximum ASID allowed. */ 342 334 uint32_t uMaxAsid; 343 344 335 /** The maximum number of resumes loops allowed in ring-0 (safety precaution). 345 336 * This number is set much higher when RTThreadPreemptIsPending is reliable. */ … … 352 343 /** Size of the guest patch memory block. */ 353 344 uint32_t cbGuestPatchMem; 354 uint32_t u Padding1;345 uint32_t u32Alignment0; 355 346 356 347 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 357 348 /** 32 to 64 bits switcher entrypoint. */ 358 349 R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0; 359 RTR0PTR uPadding2;350 RTR0PTR pvR0Alignment0; 360 351 #endif 361 352 … … 365 356 * CPU. */ 366 357 bool fSupported; 367 368 358 /** Set when we've enabled VMX. */ 369 359 bool fEnabled; 370 371 360 /** Set if VPID is supported. */ 372 361 bool fVpid; 373 374 362 /** Set if VT-x VPID is allowed. */ 375 363 bool fAllowVpid; 376 377 364 /** Set if unrestricted guest execution is in use (real and protected mode without paging). */ 378 365 bool fUnrestrictedGuest; 379 380 366 /** Set if unrestricted guest execution is allowed to be used. */ 381 367 bool fAllowUnrestricted; 382 383 368 /** Whether we're using the preemption timer or not. */ 384 369 bool fUsePreemptTimer; … … 388 373 /** Virtual address of the TSS page used for real mode emulation. */ 389 374 R3PTRTYPE(PVBOXTSS) pRealModeTSS; 390 391 375 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */ 392 376 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable; 393 377 378 /** Physical address of the APIC-access page. */ 379 RTHCPHYS HCPhysApicAccess; 394 380 /** R0 memory object for the APIC-access page. */ 395 381 RTR0MEMOBJ hMemObjApicAccess; 396 /** Physical address of the APIC-access page. */397 RTHCPHYS HCPhysApicAccess;398 382 /** Virtual address of the APIC-access page. */ 399 383 R0PTRTYPE(uint8_t *) pbApicAccess; 400 384 401 385 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 386 RTHCPHYS HCPhysScratch; 402 387 RTR0MEMOBJ hMemObjScratch; 403 RTHCPHYS HCPhysScratch;404 388 R0PTRTYPE(uint8_t *) pbScratch; 405 389 #endif 406 390 407 391 /** Internal Id of which flush-handler to use for tagged-TLB entries. */ 408 unsigned uFlushTaggedTlb; 409 410 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 411 uint32_t u32Alignment; 412 #endif 392 uint32_t uFlushTaggedTlb; 393 uint32_t u32Alignment0; 413 394 /** Host CR4 value (set by ring-0 VMX init) */ 414 395 uint64_t u64HostCr4; … … 418 399 /** Whether the CPU supports VMCS fields for swapping EFER. */ 419 400 bool fSupportsVmcsEfer; 420 bool afAlignment1[7];401 uint8_t u8Alignment2[7]; 421 402 422 403 /** VMX MSR values */ … … 424 405 425 406 /** Flush types for invept & invvpid; they depend on capabilities. */ 426 VMX _FLUSH_EPTenmFlushEpt;427 VMX _FLUSH_VPIDenmFlushVpid;407 VMXFLUSHEPT enmFlushEpt; 408 VMXFLUSHVPID enmFlushVpid; 428 409 } vmx; 429 410 … … 439 420 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */ 440 421 bool fIgnoreInUseError; 441 422 uint8_t u8Alignment0[4]; 423 424 /** Physical address of the IO bitmap (12kb). */ 425 RTHCPHYS HCPhysIOBitmap; 442 426 /** R0 memory object for the IO bitmap (12kb). */ 443 427 RTR0MEMOBJ hMemObjIOBitmap; 444 /** Physical address of the IO bitmap (12kb). */445 RTHCPHYS HCPhysIOBitmap;446 428 /** Virtual address of the IO bitmap. */ 447 429 R0PTRTYPE(void *) pvIOBitmap; … … 452 434 /** SVM revision. */ 453 435 uint32_t u32Rev; 454 455 436 /** SVM feature bits from cpuid 0x8000000a */ 456 437 uint32_t u32Features; … … 458 439 459 440 /** 460 * AVL tree with all patches (active or disabled) sorted by guest instruction address 441 * AVL tree with all patches (active or disabled) sorted by guest instruction 442 * address. 461 443 */ 462 444 AVLOU32TREE PatchTree; … … 488 470 #define VMCSCACHE_MAX_ENTRY 128 489 471 490 /* Structure for storing read and write VMCS actions. */ 472 /** 473 * Structure for storing read and write VMCS actions. 474 */ 491 475 typedef struct VMCSCACHE 492 476 { … … 546 530 /** Pointer to VMCSCACHE. */ 547 531 typedef VMCSCACHE *PVMCSCACHE; 532 AssertCompileSizeAlignment(VMCSCACHE, 8); 548 533 549 534 /** VMX StartVM function. */ … … 597 582 struct 598 583 { 599 /** Physical address of the VM control structure (VMCS). */600 RTHCPHYS HCPhysVmcs;601 /** R0 memory object for the VM control structure (VMCS). */602 RTR0MEMOBJ hMemObjVmcs;603 /** Virtual address of the VM control structure (VMCS). */604 R0PTRTYPE(void *) pvVmcs;605 584 /** Ring 0 handlers for VT-x. */ 606 585 PFNHMVMXSTARTVM pfnStartVM; 607 586 #if HC_ARCH_BITS == 32 608 uint32_t u32Alignment1; 609 #endif 610 587 uint32_t u32Alignment0; 588 #endif 611 589 /** Current VMX_VMCS32_CTRL_PIN_EXEC. */ 612 590 uint32_t u32PinCtls; … … 620 598 uint32_t u32EntryCtls; 621 599 622 /** Physical address of the virtual APIC page for TPR caching. */623 RTHCPHYS HCPhysVirtApic;624 /** R0 memory object for the virtual APIC page for TPR caching. */625 RTR0MEMOBJ hMemObjVirtApic;626 /** Virtual address of the virtual APIC page for TPR caching. */627 R0PTRTYPE(uint8_t *) pbVirtApic;628 #if HC_ARCH_BITS == 32629 uint32_t u32Alignment2;630 #endif631 632 600 /** Current CR0 mask. */ 633 601 uint32_t u32CR0Mask; … … 638 606 /** The updated-guest-state mask. */ 639 607 volatile uint32_t fUpdatedGuestState; 640 /** Current EPTP. */ 641 RTHCPHYS HCPhysEPTP; 608 uint32_t u32Alignment1; 609 610 /** Physical address of the VM control structure (VMCS). */ 611 RTHCPHYS HCPhysVmcs; 612 /** R0 memory object for the VM control structure (VMCS). */ 613 RTR0MEMOBJ hMemObjVmcs; 614 /** Virtual address of the VM control structure (VMCS). */ 615 R0PTRTYPE(void *) pvVmcs; 616 617 /** Physical address of the virtual APIC page for TPR caching. */ 618 RTHCPHYS HCPhysVirtApic; 619 /** R0 memory object for the virtual APIC page for TPR caching. */ 620 RTR0MEMOBJ hMemObjVirtApic; 621 /** Virtual address of the virtual APIC page for TPR caching. */ 622 R0PTRTYPE(uint8_t *) pbVirtApic; 642 623 643 624 /** Physical address of the MSR bitmap. */ … … 665 646 R0PTRTYPE(void *) pvHostMsr; 666 647 648 /** Current EPTP. */ 649 RTHCPHYS HCPhysEPTP; 650 667 651 /** Number of guest/host MSR pairs in the auto-load/store area. */ 668 652 uint32_t cMsrs; 669 653 /** Whether the host MSR values are up-to-date in the auto-load/store area. */ 670 654 bool fUpdatedHostMsrs; 671 uint8_t u8Align [7];655 uint8_t u8Alignment0[3]; 672 656 673 657 /** Host LSTAR MSR value to restore lazily while leaving VT-x. */ … … 681 665 /** A mask of which MSRs have been swapped and need restoration. */ 682 666 uint32_t fRestoreHostMsrs; 683 uint32_t u32Alignment 3;667 uint32_t u32Alignment2; 684 668 685 669 /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */ … … 704 688 } RealMode; 705 689 690 /** VT-x error-reporting (mainly for ring-3 propagation). */ 706 691 struct 707 692 { … … 712 697 RTCPUID idEnteredCpu; 713 698 RTCPUID idCurrentCpu; 714 uint32_t u32 Padding;699 uint32_t u32Alignment0; 715 700 } LastError; 716 701 717 /** State of the VMCS. */702 /** Current state of the VMCS. */ 718 703 uint32_t uVmcsState; 719 704 /** Which host-state bits to restore before being preempted. */ … … 721 706 /** The host-state restoration structure. */ 722 707 VMXRESTOREHOST RestoreHost; 708 723 709 /** Set if guest was executing in real mode (extra checks). */ 724 710 bool fWasInRealMode; 725 uint8_t u8Align2[7]; 726 727 /** Alignment padding. */ 728 uint32_t u32Padding; 711 uint8_t u8Alignment1[7]; 729 712 } vmx; 730 713 731 714 struct 732 715 { 716 /** Ring 0 handlers for VT-x. */ 717 PFNHMSVMVMRUN pfnVMRun; 718 #if HC_ARCH_BITS == 32 719 uint32_t u32Alignment0; 720 #endif 721 722 /** Physical address of the host VMCB which holds additional host-state. */ 723 RTHCPHYS HCPhysVmcbHost; 733 724 /** R0 memory object for the host VMCB which holds additional host-state. */ 734 725 RTR0MEMOBJ hMemObjVmcbHost; 735 /** Physical address of the host VMCB which holds additional host-state. */736 RTHCPHYS HCPhysVmcbHost;737 726 /** Virtual address of the host VMCB which holds additional host-state. */ 738 727 R0PTRTYPE(void *) pvVmcbHost; 739 728 729 /** Physical address of the guest VMCB. */ 730 RTHCPHYS HCPhysVmcb; 740 731 /** R0 memory object for the guest VMCB. */ 741 732 RTR0MEMOBJ hMemObjVmcb; 742 /** Physical address of the guest VMCB. */743 RTHCPHYS HCPhysVmcb;744 733 /** Virtual address of the guest VMCB. */ 745 734 R0PTRTYPE(void *) pvVmcb; 746 735 747 /** Ring 0 handlers for VT-x. */ 748 PFNHMSVMVMRUN pfnVMRun; 749 736 /** Physical address of the MSR bitmap (8 KB). */ 737 RTHCPHYS HCPhysMsrBitmap; 750 738 /** R0 memory object for the MSR bitmap (8 KB). */ 751 739 RTR0MEMOBJ hMemObjMsrBitmap; 752 /** Physical address of the MSR bitmap (8 KB). */753 RTHCPHYS HCPhysMsrBitmap;754 740 /** Virtual address of the MSR bitmap. */ 755 741 R0PTRTYPE(void *) pvMsrBitmap; … … 758 744 * we should check if the VTPR changed on every VM-exit. */ 759 745 bool fSyncVTpr; 760 uint8_t u8Align[7]; 761 762 /** Alignment padding. */ 763 uint32_t u32Padding; 746 uint8_t u8Alignment0[7]; 764 747 } svm; 765 748 … … 791 774 /** Pending IO operation type. */ 792 775 HMPENDINGIO enmType; 793 uint32_t u Padding;776 uint32_t u32Alignment0; 794 777 RTGCPTR GCPtrRip; 795 778 RTGCPTR GCPtrRipNext; … … 822 805 RTGCPTR aPages[HM_MAX_TLB_SHOOTDOWN_PAGES]; 823 806 uint32_t cPages; 824 uint32_t u32 Padding; /**< Explicit alignment padding. */807 uint32_t u32Alignment0; /**< Explicit alignment padding. */ 825 808 } TlbShootdown; 826 809 … … 931 914 STAMCOUNTER StatSwitchLongJmpToR3; 932 915 916 STAMCOUNTER StatTscOffsetAdjusted; 917 STAMCOUNTER StatTscParavirt; 933 918 STAMCOUNTER StatTscOffset; 934 919 STAMCOUNTER StatTscIntercept; … … 970 955 /** Pointer to HM VMCPU instance data. */ 971 956 typedef HMCPU *PHMCPU; 957 AssertCompileMemberAlignment(HMCPU, vmx, 8); 958 AssertCompileMemberAlignment(HMCPU, svm, 8); 959 AssertCompileMemberAlignment(HMCPU, Event, 8); 972 960 973 961 974 962 #ifdef IN_RING0 975 976 963 VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpu(void); 977 964 VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu); 978 965 979 966 980 # ifdef VBOX_STRICT967 # ifdef VBOX_STRICT 981 968 VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 982 969 VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg); 983 # else984 # define HMDumpRegs(a, b ,c) do { } while (0)985 # define HMR0DumpDescriptor(a, b, c) do { } while (0)986 # endif970 # else 971 # define HMDumpRegs(a, b ,c) do { } while (0) 972 # define HMR0DumpDescriptor(a, b, c) do { } while (0) 973 # endif /* VBOX_STRICT */ 987 974 988 975 # ifdef VBOX_WITH_KERNEL_USING_XMM … … 1004 991 */ 1005 992 DECLASM(uint64_t) HMR0Get64bitCR3(void); 1006 # endif 993 # endif /* VBOX_WITH_HYBRID_32BIT_KERNEL */ 1007 994 1008 995 #endif /* IN_RING0 */ -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r50953 r51643 7 7 8 8 /* 9 * Copyright (C) 2006-201 2Oracle Corporation9 * Copyright (C) 2006-2014 Oracle Corporation 10 10 * 11 11 * This file is part of VirtualBox Open Source Edition (OSE), as … … 408 408 /* hm - 32-bit gcc won't align uint64_t naturally, so check. */ 409 409 CHECK_MEMBER_ALIGNMENT(HM, uMaxAsid, 8); 410 CHECK_MEMBER_ALIGNMENT(HM, vmx.u64HostCr4, 8); 411 CHECK_MEMBER_ALIGNMENT(HM, vmx.Msrs.u64FeatureCtrl, 8); 412 CHECK_MEMBER_ALIGNMENT(HM, StatTprPatchSuccess, 8); 410 CHECK_MEMBER_ALIGNMENT(HM, vmx, 8); 411 CHECK_MEMBER_ALIGNMENT(HM, vmx.Msrs, 8); 412 CHECK_MEMBER_ALIGNMENT(HM, svm, 8); 413 CHECK_MEMBER_ALIGNMENT(HM, PatchTree, 8); 414 CHECK_MEMBER_ALIGNMENT(HM, aPatches, 8); 415 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx, 8); 416 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.pfnStartVM, 8); 417 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.HCPhysVmcs, 8); 418 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.LastError, 8); 419 CHECK_MEMBER_ALIGNMENT(HMCPU, svm, 8); 420 CHECK_MEMBER_ALIGNMENT(HMCPU, svm.pfnVMRun, 8); 421 CHECK_MEMBER_ALIGNMENT(HMCPU, Event, 8); 422 CHECK_MEMBER_ALIGNMENT(HMCPU, Event.u64IntInfo, 8); 423 CHECK_MEMBER_ALIGNMENT(HMCPU, DisState, 8); 413 424 CHECK_MEMBER_ALIGNMENT(HMCPU, StatEntry, 8); 414 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.HCPhysVmcs, sizeof(RTHCPHYS));415 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.u32PinCtls, 8);416 CHECK_MEMBER_ALIGNMENT(HMCPU, DisState, 8);417 CHECK_MEMBER_ALIGNMENT(HMCPU, Event.u64IntInfo, 8);418 425 419 426 /* Make sure the set is large enough and has the correct size. */
Note:
See TracChangeset
for help on using the changeset viewer.

