Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 48236)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 48237)
@@ -1265,16 +1265,16 @@
  * @param   pVM             Pointer to the VM.
  * @param   pVCpu           Pointer to the VMCPU.
+ * @param   pCpu            Pointer to the global HM struct.
  *
  * @remarks Called with interrupts disabled.
  */
-static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu)
-{
+static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
+{
+    AssertPtr(pVCpu);
+    AssertPtr(pCpu);
     NOREF(pVM);
-    AssertPtr(pVCpu);
+
     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
-
-    PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
-    AssertPtr(pCpu);
 
     pVCpu->hm.s.TlbShootdown.cPages = 0;
@@ -1291,4 +1291,5 @@
  * @param    pVM            Pointer to the VM.
  * @param    pVCpu          Pointer to the VMCPU.
+ * @param    pCpu           Pointer to the global HM CPU struct.
  * @remarks All references to "ASID" in this function pertains to "VPID" in
  *          Intel's nomenclature. The reason is, to avoid confusion in compare
@@ -1297,5 +1298,5 @@
  * @remarks Called with interrupts disabled.
  */
-static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu)
+static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
 {
 #ifdef VBOX_WITH_STATISTICS
@@ -1312,4 +1313,5 @@
 
     AssertPtr(pVM);
+    AssertPtr(pCpu);
     AssertPtr(pVCpu);
     AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
@@ -1317,6 +1319,4 @@
                "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
 
-    PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
-    AssertPtr(pCpu);
 
     /*
@@ -1416,16 +1416,15 @@
  * @param   pVM         Pointer to the VM.
  * @param   pVCpu       Pointer to the VMCPU.
+ * @param   pCpu        Pointer to the global HM CPU struct.
  *
  * @remarks Called with interrupts disabled.
  */
-static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu)
+static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
 {
     AssertPtr(pVM);
     AssertPtr(pVCpu);
+    AssertPtr(pCpu);
     AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
     AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
-
-    PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
-    AssertPtr(pCpu);
 
     /*
@@ -1481,15 +1480,15 @@
  * @param   pVM         Pointer to the VM.
  * @param   pVCpu       Pointer to the VMCPU.
+ * @param   pCpu        Pointer to the global HM CPU struct.
  *
  * @remarks Called with interrupts disabled.
  */
-static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu)
+static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
 {
     AssertPtr(pVM);
     AssertPtr(pVCpu);
+    AssertPtr(pCpu);
     AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
     AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
-
-    PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
 
     /*
@@ -1577,15 +1576,16 @@
  * Flushes the guest TLB entry based on CPU capabilities.
  *
- * @param pVCpu     Pointer to the VMCPU.
- */
-DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu)
+ * @param   pVCpu     Pointer to the VMCPU.
+ * @param   pCpu      Pointer to the global HM CPU struct.
+ */
+DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
 {
     PVM pVM = pVCpu->CTX_SUFF(pVM);
     switch (pVM->hm.s.vmx.uFlushTaggedTlb)
     {
-        case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu); break;
-        case HMVMX_FLUSH_TAGGED_TLB_EPT:      hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu);  break;
-        case HMVMX_FLUSH_TAGGED_TLB_VPID:     hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu); break;
-        case HMVMX_FLUSH_TAGGED_TLB_NONE:     hmR0VmxFlushTaggedTlbNone(pVM, pVCpu); break;
+        case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu, pCpu); break;
+        case HMVMX_FLUSH_TAGGED_TLB_EPT:      hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu, pCpu);  break;
+        case HMVMX_FLUSH_TAGGED_TLB_VPID:     hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu, pCpu); break;
+        case HMVMX_FLUSH_TAGGED_TLB_NONE:     hmR0VmxFlushTaggedTlbNone(pVM, pVCpu, pCpu); break;
         default:
             AssertMsgFailed(("Invalid flush-tag function identifier\n"));
@@ -4466,8 +4466,8 @@
 DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
 {
-    uint32_t        aParam[6];
+    uint32_t         aParam[6];
     PHMGLOBALCPUINFO pCpu          = NULL;
-    RTHCPHYS        HCPhysCpuPage = 0;
-    int             rc            = VERR_INTERNAL_ERROR_5;
+    RTHCPHYS         HCPhysCpuPage = 0;
+    int              rc            = VERR_INTERNAL_ERROR_5;
 
     pCpu = HMR0GetCurrentCpu();
@@ -4852,5 +4852,5 @@
         /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
         pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
-        rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);               AssertRC(rc);
+        rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);              AssertRC(rc);
         STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
     }
@@ -4951,5 +4951,5 @@
 {
     int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
-    AssertRC(rc);
+    AssertRCReturn(rc, rc);
     if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
     {
@@ -5417,5 +5417,5 @@
             case MSR_K8_TSC_AUX:        CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value);  break;
             case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value;             break;
-            case MSR_K6_EFER:          /* EFER can't be changed without causing a VM-exit. */    break;
+            case MSR_K6_EFER:           /* EFER can't be changed without causing a VM-exit. */   break;
             default:
             {
@@ -5793,6 +5793,6 @@
         return VINF_SUCCESS;
 
-    /* Though we can longjmp to ring-3 due to log-flushes here and get recalled again on the ring-3 callback path,
-       there is no real need to. */
+    /* Though we can longjmp to ring-3 due to log-flushes here and get recalled
+       again on the ring-3 callback path, there is no real need to. */
     if (VMMRZCallRing3IsEnabled(pVCpu))
         VMMR0LogFlushDisable(pVCpu);
@@ -5898,5 +5898,6 @@
         if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
         {
-            int rc2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
+            int rc2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4,
+                                 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
             if (rc2 != VINF_SUCCESS)
             {
@@ -5908,5 +5909,4 @@
 
         /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
-        /* -XXX- what was that about single stepping?  */
         if (   VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
             || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
@@ -5941,5 +5941,4 @@
     }
 
-    /* Paranoia. */
     return VINF_SUCCESS;
 }
@@ -6298,5 +6297,5 @@
 
     /*
-     * Clear the X86_EFL_TF if necessary .
+     * Clear the X86_EFL_TF if necessary.
      */
     if (pVCpu->hm.s.fClearTrapFlag)
@@ -7452,6 +7451,8 @@
         pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
 
+    PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
+    RTCPUID idCurrentCpu  = pCpu->idCpu;
     if (   pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
-        || HMR0GetCurrentCpu()->idCpu != pVCpu->hm.s.idLastCpu)
+        || idCurrentCpu != pVCpu->hm.s.idLastCpu)
     {
         hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pMixedCtx);
@@ -7460,7 +7461,6 @@
 
     ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);    /* Used for TLB-shootdowns, set this across the world switch. */
-    hmR0VmxFlushTaggedTlb(pVCpu);                               /* Invalidate the appropriate guest entries from the TLB. */
-
-    RTCPUID idCurrentCpu = HMR0GetCurrentCpu()->idCpu;
+    hmR0VmxFlushTaggedTlb(pVCpu, pCpu);                         /* Invalidate the appropriate guest entries from the TLB. */
+
     Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
     pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu;      /* Update the error reporting info. with the current host CPU. */
