Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 49878)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 49879)
@@ -138,5 +138,5 @@
 /** @name VMCB Clean Bits.
  *
- * These flags are used for VMCB-state caching. A set VMCB Clean Bit indicates
+ * These flags are used for VMCB-state caching. A set VMCB Clean bit indicates
  * AMD-V doesn't need to reload the corresponding value(s) from the VMCB in
  * memory.
@@ -1054,5 +1054,5 @@
 /**
  * Adds an exception to the intercept exception bitmap in the VMCB and updates
- * the corresponding VMCB Clean Bit.
+ * the corresponding VMCB Clean bit.
  *
  * @param   pVmcb       Pointer to the VM control block.
@@ -1071,5 +1071,5 @@
 /**
  * Removes an exception from the intercept-exception bitmap in the VMCB and
- * updates the corresponding VMCB Clean Bit.
+ * updates the corresponding VMCB Clean bit.
  *
  * @param   pVmcb       Pointer to the VM control block.
@@ -1203,5 +1203,5 @@
                 enmShwPagingMode = PGMGetHostMode(pVM);
 
-            pVmcb->ctrl.u64NestedPagingCR3  = PGMGetNestedCR3(pVCpu, enmShwPagingMode);
+            pVmcb->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode);
             pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
             Assert(pVmcb->ctrl.u64NestedPagingCR3);
@@ -1286,4 +1286,5 @@
         HMSVM_LOAD_SEG_REG(GS, gs);
 
+        pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl;
         pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);
@@ -1784,5 +1785,4 @@
     pVmcb->guest.u64RSP    = pCtx->rsp;
     pVmcb->guest.u64RFlags = pCtx->eflags.u32;
-    pVmcb->guest.u8CPL     = pCtx->ss.Attr.n.u2Dpl;
     pVmcb->guest.u64RAX    = pCtx->rax;
 
@@ -2836,8 +2836,8 @@
     /*
      * Re-enable nested paging (automatically disabled on every VM-exit). See AMD spec. 15.25.3 "Enabling Nested Paging".
-     * We avoid changing the corresponding VMCB Clean Bit as we're not changing it to a different value since the previous run.
+     * We avoid changing the corresponding VMCB Clean bit as we're not changing it to a different value since the previous run.
      */
     /** @todo The above assumption could be wrong. It's not documented what
-     *        should be done wrt to the VMCB Clean Bit, but we'll find out the
+     *        should be done wrt to the VMCB Clean bit, but we'll find out the
      *        hard way. */
     PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
@@ -2949,5 +2949,5 @@
     }
 
-    /* If we've migrating CPUs, mark the VMCB clean bits as dirty. */
+    /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */
     if (HMR0GetCurrentCpu()->idCpu != pVCpu->hm.s.idLastCpu)
         pVmcb->ctrl.u64VmcbCleanBits = 0;
@@ -3000,5 +3000,5 @@
         hmR0SvmSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
 
-    /* If VMCB Clean Bits isn't supported by the CPU, simply mark all state-bits as dirty, indicating (re)load-from-VMCB. */
+    /* If VMCB Clean bits isn't supported by the CPU, simply mark all state-bits as dirty, indicating (re)load-from-VMCB. */
     if (!(pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN))
         pVmcb->ctrl.u64VmcbCleanBits = 0;
