Index: /trunk/src/VBox/VMM/VMMR0/HMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMR0.cpp	(revision 48217)
+++ /trunk/src/VBox/VMM/VMMR0/HMR0.cpp	(revision 48218)
@@ -91,5 +91,5 @@
     DECLR0CALLBACKMEMBER(int,  pfnRunGuestCode,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
     DECLR0CALLBACKMEMBER(int,  pfnEnableCpu,(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,
-                                             bool fEnabledByHost));
+                                             bool fEnabledByHost, void *pvArg));
     DECLR0CALLBACKMEMBER(int,  pfnDisableCpu,(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage));
     DECLR0CALLBACKMEMBER(int,  pfnInitVM,(PVM pVM));
@@ -243,7 +243,7 @@
 
 static DECLCALLBACK(int) hmR0DummyEnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,
-                                            bool fEnabledBySystem)
-{
-    NOREF(pCpu); NOREF(pVM); NOREF(pvCpuPage); NOREF(HCPhysCpuPage); NOREF(fEnabledBySystem);
+                                            bool fEnabledBySystem, void *pvArg)
+{
+    NOREF(pCpu); NOREF(pVM); NOREF(pvCpuPage); NOREF(HCPhysCpuPage); NOREF(fEnabledBySystem); NOREF(pvArg);
     return VINF_SUCCESS;
 }
@@ -908,5 +908,5 @@
     int rc;
     if (g_HvmR0.vmx.fSupported && g_HvmR0.vmx.fUsingSUPR0EnableVTx)
-        rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, NULL /* pvCpuPage */, NIL_RTHCPHYS, true);
+        rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, NULL /* pvCpuPage */, NIL_RTHCPHYS, true, &g_HvmR0.vmx.Msrs);
     else
     {
@@ -914,5 +914,9 @@
         void    *pvCpuPage     = RTR0MemObjAddress(pCpu->hMemObj);
         RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
-        rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false);
+
+        if (g_HvmR0.vmx.fSupported)
+            rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false, &g_HvmR0.vmx.Msrs);
+        else
+            rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false, NULL /* pvArg */);
     }
     AssertRC(rc);
@@ -1767,5 +1771,5 @@
         void           *pvCpuPage     = RTR0MemObjAddress(pCpu->hMemObj);
         RTHCPHYS        HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
-        VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false);
+        VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false, &g_HvmR0.vmx.Msrs);
     }
 }
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 48217)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 48218)
@@ -301,6 +301,8 @@
  * @param   pvCpuPage       Pointer to the global CPU page.
  * @param   HCPhysCpuPage   Physical address of the global CPU page.
- */
-VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
+ * @param   pvArg           Unused on AMD-V.
+ */
+VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
+                              void *pvArg)
 {
     AssertReturn(!fEnabledByHost, VERR_INVALID_PARAMETER);
@@ -308,4 +310,5 @@
                  && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
     AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
+    NOREF(pvArg);
 
     /*
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.h
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.h	(revision 48217)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.h	(revision 48218)
@@ -42,5 +42,6 @@
 VMMR0DECL(int)  SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu);
 VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit);
-VMMR0DECL(int)  SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage, bool fEnabledBySystem);
+VMMR0DECL(int)  SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage, bool fEnabledBySystem,
+                               void *pvArg);
 VMMR0DECL(int)  SVMR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
 VMMR0DECL(int)  SVMR0InitVM(PVM pVM);
Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 48217)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 48218)
@@ -314,5 +314,5 @@
 *   Internal Functions                                                         *
 *******************************************************************************/
-static void               hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush);
+static void               hmR0VmxFlushEpt(PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush);
 static void               hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr);
 static int                hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
@@ -1009,8 +1009,11 @@
  * @param   fEnabledByHost  Set if SUPR0EnableVTx() or similar was used to
  *                          enable VT-x on the host.
- */
-VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
+ * @param   pvMsrs          Opaque pointer to VMXMSRS struct.
+ */
+VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
+                              void *pvMsrs)
 {
     AssertReturn(pCpu, VERR_INVALID_PARAMETER);
+    AssertReturn(pvMsrs, VERR_INVALID_PARAMETER);
     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
 
@@ -1023,26 +1026,15 @@
 
     /*
-     * Flush all EPTP tagged-TLB entries (in case any other hypervisor have been using EPTPs) so that
-     * we can avoid an explicit flush while using new VPIDs. We would still need to flush
-     * each time while reusing a VPID after hitting the MaxASID limit once.
+     * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been using EPTPs) so
+     * we don't retain any stale guest-physical mappings which won't get invalidated when flushing by VPID.
      */
-    if (   pVM
-        && pVM->hm.s.fNestedPaging)
-    {
-        /* We require ALL_CONTEXT flush-type to be available on the CPU. See hmR0VmxSetupTaggedTlb(). */
-        Assert(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);
-        hmR0VmxFlushEpt(pVM, NULL /* pVCpu */, VMX_FLUSH_EPT_ALL_CONTEXTS);
+    PVMXMSRS pMsrs = (PVMXMSRS)pvMsrs;
+    if (pMsrs->u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
+    {
+        hmR0VmxFlushEpt(NULL /* pVCpu */, VMX_FLUSH_EPT_ALL_CONTEXTS);
         pCpu->fFlushAsidBeforeUse = false;
     }
     else
-    {
-        /** @todo This is still not perfect. If on host resume (pVM is NULL or a VM
-         *        without Nested Paging triggered this function) we still have the risk
-         *        of potentially running with stale TLB-entries from other hypervisors
-         *        when later we use a VM with NestedPaging. To fix this properly we will
-         *        have to pass '&g_HvmR0' (see HMR0.cpp) to this function and read
-         *        'u64EptVpidCaps' from it. Sigh. */
         pCpu->fFlushAsidBeforeUse = true;
-    }
 
     /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
@@ -1126,14 +1118,14 @@
  *
  * @returns VBox status code.
- * @param   pVM         Pointer to the VM.
  * @param   pVCpu       Pointer to the VMCPU (can be NULL depending on @a
  *                      enmFlush).
  * @param   enmFlush    Type of flush.
- */
-static void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush)
-{
-    AssertPtr(pVM);
-    Assert(pVM->hm.s.fNestedPaging);
-
+ *
+ * @remarks Caller is responsible for making sure this function is called only
+ *          when NestedPaging is supported and providing @a enmFlush that is
+ *          supported by the CPU.
+ */
+static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush)
+{
     uint64_t descriptor[2];
     if (enmFlush == VMX_FLUSH_EPT_ALL_CONTEXTS)
@@ -1351,5 +1343,5 @@
          * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
          */
-        hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
+        hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
         STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
         HMVMX_SET_TAGGED_TLB_FLUSHED();
@@ -1367,5 +1359,5 @@
          * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". See @bugref{6568}.
          */
-        hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
+        hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
         STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
         HMVMX_SET_TAGGED_TLB_FLUSHED();
@@ -1389,5 +1381,5 @@
         }
         else
-            hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
+            hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
 
         HMVMX_SET_TAGGED_TLB_FLUSHED();
@@ -1459,5 +1451,5 @@
     if (pVCpu->hm.s.fForceTLBFlush)
     {
-        hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
+        hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
         pVCpu->hm.s.fForceTLBFlush = false;
     }
@@ -1471,5 +1463,5 @@
             /* We cannot flush individual entries without VPID support. Flush using EPT. */
             STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
-            hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
+            hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
         }
         else
Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.h
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.h	(revision 48217)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.h	(revision 48218)
@@ -31,5 +31,6 @@
 VMMR0DECL(int)  VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu);
 VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit);
-VMMR0DECL(int)  VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys, bool fEnabledBySystem);
+VMMR0DECL(int)  VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys, bool fEnabledBySystem,
+                               void *pvMsrs);
 VMMR0DECL(int)  VMXR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
 VMMR0DECL(int)  VMXR0GlobalInit(void);
