Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 45509)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 45510)
@@ -210,6 +210,6 @@
 static int                hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
 #endif
-#if 0
-DECLINLINE(int)           hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, unsigned rcReason);
+#if 1
+DECLINLINE(int)           hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
 #endif
 
@@ -934,11 +934,11 @@
  *
  * @param   pVCpu       Pointer to the VMCPU.
- * @param   ulMSR       The MSR value.
+ * @param   uMSR        The MSR value.
  * @param   enmRead     Whether reading this MSR causes a VM-exit.
  * @param   enmWrite    Whether writing this MSR causes a VM-exit.
  */
-static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, unsigned ulMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
-{
-    unsigned ulBit;
+static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
+{
+    int32_t iBit;
     uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
 
@@ -950,32 +950,32 @@
      * 0xc00 - 0xfff - High MSR write bits
      */
-    if (ulMsr <= 0x00001FFF)
+    if (uMsr <= 0x00001FFF)
     {
         /* Pentium-compatible MSRs */
-        ulBit = ulMsr;
-    }
-    else if (   ulMsr >= 0xC0000000
-             && ulMsr <= 0xC0001FFF)
+        iBit = uMsr;
+    }
+    else if (   uMsr >= 0xC0000000
+             && uMsr <= 0xC0001FFF)
     {
         /* AMD Sixth Generation x86 Processor MSRs */
-        ulBit = (ulMsr - 0xC0000000);
+        iBit = (uMsr - 0xC0000000);
         pbMsrBitmap += 0x400;
     }
     else
     {
-        AssertMsgFailed(("Invalid MSR %lx\n", ulMsr));
+        AssertMsgFailed(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
         return;
     }
 
-    Assert(ulBit <= 0x1fff);
+    Assert(iBit <= 0x1fff);
     if (enmRead == VMXMSREXIT_INTERCEPT_READ)
-        ASMBitSet(pbMsrBitmap, ulBit);
+        ASMBitSet(pbMsrBitmap, iBit);
     else
-        ASMBitClear(pbMsrBitmap, ulBit);
+        ASMBitClear(pbMsrBitmap, iBit);
 
     if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
-        ASMBitSet(pbMsrBitmap + 0x800, ulBit);
+        ASMBitSet(pbMsrBitmap + 0x800, iBit);
     else
-        ASMBitClear(pbMsrBitmap + 0x800, ulBit);
+        ASMBitClear(pbMsrBitmap + 0x800, iBit);
 }
 
@@ -2209,5 +2209,5 @@
 
     PVMXMSR  pHostMsr           = (PVMXMSR)pVCpu->hm.s.vmx.pvHostMsr;
-    unsigned idxHostMsr         = 0;
+    uint32_t cHostMsrs          = 0;
     uint32_t u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX;
 
@@ -2225,5 +2225,5 @@
 #endif
             pHostMsr->u64Value = ASMRdMsr(MSR_K6_EFER);
-        pHostMsr++; idxHostMsr++;
+        pHostMsr++; cHostMsrs++;
     }
 
@@ -2234,13 +2234,13 @@
         pHostMsr->u32Reserved  = 0;
         pHostMsr->u64Value     = ASMRdMsr(MSR_K6_STAR);              /* legacy syscall eip, cs & ss */
-        pHostMsr++; idxHostMsr++;
+        pHostMsr++; cHostMsrs++;
         pHostMsr->u32IndexMSR  = MSR_K8_LSTAR;
         pHostMsr->u32Reserved  = 0;
         pHostMsr->u64Value     = ASMRdMsr(MSR_K8_LSTAR);             /* 64 bits mode syscall rip */
-        pHostMsr++; idxHostMsr++;
+        pHostMsr++; cHostMsrs++;
         pHostMsr->u32IndexMSR  = MSR_K8_SF_MASK;
         pHostMsr->u32Reserved  = 0;
         pHostMsr->u64Value     = ASMRdMsr(MSR_K8_SF_MASK);           /* syscall flag mask */
-        pHostMsr++; idxHostMsr++;
+        pHostMsr++; cHostMsrs++;
         /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208}  */
 #if 0
@@ -2248,5 +2248,5 @@
         pMsr->u32Reserved = 0;
         pMsr->u64Value    = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);         /* swapgs exchange value */
-        pHostMsr++; idxHostMsr++;
+        pHostMsr++; cHostMsrs++;
 #endif
     }
@@ -2254,11 +2254,11 @@
 
     /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
-    if (RT_UNLIKELY(idxHostMsr > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)))
-    {
-        LogRel(("idxHostMsr=%u Cpu=%u\n", idxHostMsr, (unsigned)MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)));
+    if (RT_UNLIKELY(cHostMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)))
+    {
+        LogRel(("cHostMsrs=%u Cpu=%u\n", cHostMsrs, (unsigned)MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)));
         return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     }
 
-    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, idxHostMsr);
+    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs);
 
     /*
@@ -3564,5 +3564,5 @@
     {
         PVMXMSR  pGuestMsr  = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
-        unsigned cGuestMsrs = 0;
+        uint32_t cGuestMsrs = 0;
 
         /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */
@@ -4442,5 +4442,5 @@
 
     /* Make sure there are no duplicates. */
-    for (unsigned i = 0; i < pCache->Write.cValidEntries; i++)
+    for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
     {
         if (pCache->Write.aField[i] == idxField)
@@ -5102,5 +5102,5 @@
         return VINF_SUCCESS;
 
-    for (unsigned i = 0; i < pVCpu->hm.s.vmx.cGuestMsrs; i++)
+    for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cGuestMsrs; i++)
     {
         PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
@@ -6662,5 +6662,5 @@
     VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
     int          rc     = VERR_INTERNAL_ERROR_5;
-    unsigned     cLoops = 0;
+    uint32_t     cLoops = 0;
 
     for (;; cLoops++)
@@ -6725,5 +6725,5 @@
 
 #if 1
-DECLINLINE(int) hmR0VmxHandleExit(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, unsigned rcReason)
+DECLINLINE(int) hmR0VmxHandleExit(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
 {
     int rc;
@@ -7891,5 +7891,5 @@
                 for (unsigned i = 0; i < 4; i++)
                 {
-                    unsigned uBPLen = s_aIOSize[X86_DR7_GET_LEN(pMixedCtx->dr[7], i)];
+                    uint32_t uBPLen = s_aIOSize[X86_DR7_GET_LEN(pMixedCtx->dr[7], i)];
                     if (   (   uIOPort >= pMixedCtx->dr[i]
                             && uIOPort < pMixedCtx->dr[i] + uBPLen)
@@ -8036,5 +8036,5 @@
 
     /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
-    unsigned uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
+    uint32_t uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
     switch (uAccessType)
     {
@@ -8467,5 +8467,5 @@
 
     PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
-    unsigned int cbOp = 0;
+    uint32_t cbOp     = 0;
     PVM pVM           = pVCpu->CTX_SUFF(pVM);
     rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
