Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 55814)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 55815)
@@ -843,5 +843,5 @@
 {
     AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
-    AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
+    AssertPtrReturn(ppVirt,  VERR_INVALID_PARAMETER);
     AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
 
@@ -1262,13 +1262,15 @@
  * auto-load/store MSR area in the VMCS.
  *
- * @returns true if the MSR was added -and- its value was updated, false
- *          otherwise.
- * @param   pVCpu           Pointer to the VMCPU.
- * @param   uMsr            The MSR.
- * @param   uGuestMsr       Value of the guest MSR.
- * @param   fUpdateHostMsr  Whether to update the value of the host MSR if
- *                          necessary.
- */
-static bool hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, bool fUpdateHostMsr)
+ * @returns VBox status code.
+ * @param   pVCpu               Pointer to the VMCPU.
+ * @param   uMsr                The MSR.
+ * @param   uGuestMsr           Value of the guest MSR.
+ * @param   fUpdateHostMsr      Whether to update the value of the host MSR if
+ *                              necessary.
+ * @param   pfAddedAndUpdated   Where to store whether the MSR was added -and-
+ *                              its value was updated. Optional, can be NULL.
+ */
+static int hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, bool fUpdateHostMsr,
+                                       bool *pfAddedAndUpdated)
 {
     PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
@@ -1287,5 +1289,5 @@
         ++cMsrs;
         int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
-        AssertRC(rc);
+        AssertMsgRCReturn(rc, ("hmR0VmxAddAutoLoadStoreMsr: Insufficient space to add MSR %u\n", uMsr), rc);
 
         /* Now that we're swapping MSRs during the world-switch, allow the guest to read/write them without causing VM-exits. */
@@ -1320,5 +1322,7 @@
     }
 
-    return fUpdatedMsrValue;
+    if (pfAddedAndUpdated)
+        *pfAddedAndUpdated = fUpdatedMsrValue;
+    return VINF_SUCCESS;
 }
 
@@ -4776,9 +4780,11 @@
         if (pVM->hm.s.fAllow64BitGuests)
         {
-            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR,          pMixedCtx->msrLSTAR,        false /* fUpdateHostMsr */);
-            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR,           pMixedCtx->msrSTAR,         false /* fUpdateHostMsr */);
-            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK,        pMixedCtx->msrSFMASK,       false /* fUpdateHostMsr */);
-            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false /* fUpdateHostMsr */);
-# ifdef DEBUG
+            int rc = VINF_SUCCESS;
+            rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR,          pMixedCtx->msrLSTAR,        false, NULL);
+            rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR,           pMixedCtx->msrSTAR,         false, NULL);
+            rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK,        pMixedCtx->msrSFMASK,       false, NULL);
+            rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false, NULL);
+            AssertRCReturn(rc, rc);
+#ifdef DEBUG
             PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
             for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
@@ -4832,5 +4838,8 @@
             else
             {
-                hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */);
+                int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */,
+                                                    NULL /* pfAddedAndUpdated */);
+                AssertRCReturn(rc, rc);
+
                 /* We need to intercept reads too, see @bugref{7386} comment #16. */
                 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
@@ -6791,9 +6800,9 @@
      * We assume all general purpose registers other than RSP are available.
      *
-     * RIP is a must as it will be incremented or otherwise changed.
+     * RIP is a must, as it will be incremented or otherwise changed.
      *
      * RFLAGS are always required to figure the CPL.
      *
-     * RSP isn't always required, however it's a GPR so frequently required.
+     * RSP isn't always required, however it's a GPR, so frequently required.
      *
      * SS and CS are the only segment register needed if IEM doesn't do memory
@@ -6819,5 +6828,5 @@
 
 /**
- * Ensures that we've got a complete basic context.
+ * Ensures that we've got a complete basic guest-context.
  *
  * This excludes the FPU, SSE, AVX, and similar extended state.  The interface
@@ -7099,5 +7108,5 @@
     /*
      * !!! IMPORTANT !!!
-     * If you modify code here, make sure to check whether hmR0VmxCallRing3Callback() needs to be updated too.
+     * If you modify code here, check whether hmR0VmxCallRing3Callback() needs to be updated too.
      */
 
@@ -7364,6 +7373,6 @@
         /*
          * !!! IMPORTANT !!!
-         * If you modify code here, make sure to check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs
-         * to be updated too. This is a stripped down version which gets out ASAP trying to not trigger any assertion.
+         * If you modify code here, check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs to be updated too.
+         * This is a stripped down version which gets out ASAP, trying to not trigger any further assertions.
          */
         RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; \
@@ -8787,10 +8796,14 @@
         if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
         {
+            bool fMsrUpdated;
             int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
             AssertRC(rc2);
             Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS));
-            bool fMsrUpdated = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMR0GetGuestTscAux(pVCpu),
-                                                          true /* fUpdateHostMsr */);
+
+            rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMR0GetGuestTscAux(pVCpu), true /* fUpdateHostMsr */,
+                                             &fMsrUpdated);
+            AssertRC(rc2);
             Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
+
             /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */
             pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
