Index: /trunk/include/VBox/gvm.h
===================================================================
--- /trunk/include/VBox/gvm.h	(revision 22889)
+++ /trunk/include/VBox/gvm.h	(revision 22890)
@@ -42,5 +42,5 @@
 typedef struct GVMCPU
 {
-    /** VCPU id (0 - (pVM->cCPUs - 1). */
+    /** VCPU id (0 - (pVM->cCpus - 1). */
     VMCPUID         idCpu;
 
@@ -84,5 +84,5 @@
     PVM             pVM;
     /** Number of Virtual CPUs, i.e. how many entries there are in aCpus.
-     * Same same as PVM::cCPUs. */
+     * Same same as VM::cCpus. */
     uint32_t        cCpus;
     uint32_t        padding;
Index: /trunk/include/VBox/vm.h
===================================================================
--- /trunk/include/VBox/vm.h	(revision 22889)
+++ /trunk/include/VBox/vm.h	(revision 22890)
@@ -698,5 +698,5 @@
     uint32_t                    hSelf;
     /** Number of virtual CPUs. */
-    uint32_t                    cCPUs;
+    uint32_t                    cCpus;
 
     /** Size of the VM structure including the VMCPU array. */
Index: /trunk/include/VBox/vm.mac
===================================================================
--- /trunk/include/VBox/vm.mac	(revision 22889)
+++ /trunk/include/VBox/vm.mac	(revision 22890)
@@ -56,5 +56,5 @@
     .pVMRC                  RTRCPTR_RES 1
     .hSelf                  resd 1
-    .cCPUs                  resd 1
+    .cCpus                  resd 1
     .cbSelf                 resd 1
     .offVMCPU               resd 1
Index: /trunk/include/VBox/vmapi.h
===================================================================
--- /trunk/include/VBox/vmapi.h	(revision 22889)
+++ /trunk/include/VBox/vmapi.h	(revision 22890)
@@ -349,5 +349,5 @@
 
 
-VMMR3DECL(int)  VMR3Create(uint32_t cCPUs, PFNVMATERROR pfnVMAtError, void *pvUserVM, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM, PVM *ppVM);
+VMMR3DECL(int)  VMR3Create(uint32_t cCpus, PFNVMATERROR pfnVMAtError, void *pvUserVM, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM, PVM *ppVM);
 VMMR3DECL(int)  VMR3PowerOn(PVM pVM);
 VMMR3DECL(int)  VMR3Suspend(PVM pVM);
Index: /trunk/src/VBox/VMM/CPUM.cpp
===================================================================
--- /trunk/src/VBox/VMM/CPUM.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/CPUM.cpp	(revision 22890)
@@ -137,5 +137,5 @@
 
     /* Calculate the offset from CPUMCPU to CPUM. */
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -320,5 +320,5 @@
                                        | X86_CPUID_FEATURE_ECX_SSE3
                                        /* Can't properly emulate monitor & mwait with guest SMP; force the guest to use hlt for idling VCPUs. */
-                                       | ((pVM->cCPUs == 1) ? X86_CPUID_FEATURE_ECX_MONITOR : 0)
+                                       | ((pVM->cCpus == 1) ? X86_CPUID_FEATURE_ECX_MONITOR : 0)
                                        //| X86_CPUID_FEATURE_ECX_CPLDS - no CPL qualified debug store.
                                        //| X86_CPUID_FEATURE_ECX_VMX   - not virtualized.
@@ -388,8 +388,8 @@
     pCPUM->aGuestCpuIdStd[1].ebx &= 0x0000ffff;
 #ifdef VBOX_WITH_MULTI_CORE
-    if (pVM->cCPUs > 1)
+    if (pVM->cCpus > 1)
     {
         /* If CPUID Fn0000_0001_EDX[HTT] = 1 then LogicalProcessorCount is the number of threads per CPU core times the number of CPU cores per processor */
-        pCPUM->aGuestCpuIdStd[1].ebx |= (pVM->cCPUs << 16);
+        pCPUM->aGuestCpuIdStd[1].ebx |= (pVM->cCpus << 16);
         pCPUM->aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_HTT;  /* necessary for hyper-threading *or* multi-core CPUs */
     }
@@ -423,11 +423,11 @@
     pCPUM->aGuestCpuIdStd[4].eax = pCPUM->aGuestCpuIdStd[4].ebx = 0;
 #ifdef VBOX_WITH_MULTI_CORE
-    if (    pVM->cCPUs > 1
+    if (    pVM->cCpus > 1
         &&  pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_INTEL)
     {
-        AssertReturn(pVM->cCPUs <= 64, VERR_TOO_MANY_CPUS);
+        AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);
         /* One logical processor with possibly multiple cores. */
         /* See  http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */
-        pCPUM->aGuestCpuIdStd[4].eax |= ((pVM->cCPUs - 1) << 26);   /* 6 bits only -> 64 cores! */
+        pCPUM->aGuestCpuIdStd[4].eax |= ((pVM->cCpus - 1) << 26);   /* 6 bits only -> 64 cores! */
     }
 #endif
@@ -517,10 +517,10 @@
         pCPUM->aGuestCpuIdExt[8].ecx = 0;
 #ifdef VBOX_WITH_MULTI_CORE
-        if (    pVM->cCPUs > 1
+        if (    pVM->cCpus > 1
             &&  pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD)
         {
             /* Legacy method to determine the number of cores. */
             pCPUM->aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_CMPL;
-            pCPUM->aGuestCpuIdExt[8].ecx |= (pVM->cCPUs - 1); /* NC: Number of CPU cores - 1; 8 bits */
+            pCPUM->aGuestCpuIdExt[8].ecx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */
 
         }
@@ -565,5 +565,5 @@
      * of processors from (cpuid(4).eax >> 26) + 1.
      */
-    if (pVM->cCPUs == 1)
+    if (pVM->cCpus == 1)
         pCPUM->aGuestCpuIdStd[4].eax = 0;
 
@@ -686,10 +686,10 @@
 {
     LogFlow(("CPUMR3Relocate\n"));
-    for (unsigned i=0;i<pVM->cCPUs;i++)
-    {
-        PVMCPU pVCpu  = &pVM->aCpus[i];
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
+    {
         /*
          * Switcher pointers.
          */
+        PVMCPU pVCpu = &pVM->aCpus[i];
         pVCpu->cpum.s.pHyperCoreRC = MMHyperCCToRC(pVM, pVCpu->cpum.s.pHyperCoreR3);
         Assert(pVCpu->cpum.s.pHyperCoreRC != NIL_RTRCPTR);
@@ -726,8 +726,8 @@
 {
 #ifdef VBOX_WITH_CRASHDUMP_MAGIC
-    for (unsigned i=0;i<pVM->cCPUs;i++)
-    {
-        PVMCPU pVCpu  = &pVM->aCpus[i];
-        PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
+    {
+        PVMCPU   pVCpu = &pVM->aCpus[i];
+        PCPUMCTX pCtx  = CPUMQueryGuestCtxPtr(pVCpu);
 
         memset(pVCpu->cpum.s.aMagic, 0, sizeof(pVCpu->cpum.s.aMagic));
@@ -825,5 +825,5 @@
 VMMR3DECL(void) CPUMR3Reset(PVM pVM)
 {
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         CPUMR3ResetCpu(&pVM->aCpus[i]);
@@ -853,5 +853,5 @@
      * Save.
      */
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -860,6 +860,6 @@
     }
 
-    SSMR3PutU32(pSSM, pVM->cCPUs);
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    SSMR3PutU32(pSSM, pVM->cCpus);
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -1019,5 +1019,5 @@
      * Restore.
      */
-    for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU   pVCpu = &pVM->aCpus[i];
@@ -1046,18 +1046,21 @@
         if (uVersion >= CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR)
         {
-            int rc = SSMR3GetU32(pSSM, &pVM->cCPUs);
+            /** @todo r=bird: cCPUs: Why are we doing this?!? cCpus is a config value that
+             *        cannot be changed by a saved state. If the saved one differs we
+             *        fail. */
+            int rc = SSMR3GetU32(pSSM, &pVM->cCpus);
             AssertRCReturn(rc, rc);
         }
 
-        if (    !pVM->cCPUs
-            ||  pVM->cCPUs > VMM_MAX_CPU_COUNT
+        if (    !pVM->cCpus
+            ||  pVM->cCpus > VMM_MAX_CPU_COUNT
             ||  (   uVersion == CPUM_SAVED_STATE_VERSION_VER2_0
-                 && pVM->cCPUs != 1))
+                 && pVM->cCpus != 1))
         {
-            AssertMsgFailed(("Unexpected number of VMCPUs (%d)\n", pVM->cCPUs));
+            AssertMsgFailed(("Unexpected number of VMCPUs (%u)\n", pVM->cCpus));
             return VERR_SSM_UNEXPECTED_DATA;
         }
 
-        for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+        for (VMCPUID i = 0; i < pVM->cCpus; i++)
         {
             SSMR3GetMem(pSSM, &pVM->aCpus[i].cpum.s.Guest, sizeof(pVM->aCpus[i].cpum.s.Guest));
Index: /trunk/src/VBox/VMM/DBGF.cpp
===================================================================
--- /trunk/src/VBox/VMM/DBGF.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/DBGF.cpp	(revision 22890)
@@ -1075,5 +1075,5 @@
     AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
     AssertReturn(RTSemPongIsSpeaker(&pVM->dbgf.s.PingPong), VERR_SEM_OUT_OF_TURN);
-    AssertReturn(idCpu < pVM->cCPUs, VERR_INVALID_PARAMETER);
+    AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
 
     /*
Index: /trunk/src/VBox/VMM/DBGFAddr.cpp
===================================================================
--- /trunk/src/VBox/VMM/DBGFAddr.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/DBGFAddr.cpp	(revision 22890)
@@ -103,5 +103,5 @@
 VMMR3DECL(int) DBGFR3AddrFromSelOff(PVM pVM, VMCPUID idCpu, PDBGFADDRESS pAddress, RTSEL Sel, RTUINTPTR off)
 {
-    AssertReturn(idCpu < pVM->cCPUs, VERR_INVALID_PARAMETER);
+    AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
 
     pAddress->Sel = Sel;
@@ -258,5 +258,5 @@
     AssertReturn(DBGFADDRESS_IS_VALID(pAddress), VERR_INVALID_PARAMETER);
     VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_STATE);
-    AssertReturn(idCpu < pVM->cCPUs, VERR_INVALID_PARAMETER);
+    AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
 
     /*
@@ -324,5 +324,5 @@
     AssertReturn(DBGFADDRESS_IS_VALID(pAddress), VERR_INVALID_PARAMETER);
     VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_STATE);
-    AssertReturn(idCpu < pVM->cCPUs, VERR_INVALID_PARAMETER);
+    AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
 
     /*
@@ -438,5 +438,5 @@
     AssertReturn(DBGFADDRESS_IS_VALID(pAddress), VERR_INVALID_PARAMETER);
     VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_STATE);
-    AssertReturn(idCpu < pVM->cCPUs, VERR_INVALID_PARAMETER);
+    AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
 
     /*
Index: /trunk/src/VBox/VMM/DBGFDisas.cpp
===================================================================
--- /trunk/src/VBox/VMM/DBGFDisas.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/DBGFDisas.cpp	(revision 22890)
@@ -551,5 +551,5 @@
 {
     VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
-    AssertReturn(idCpu < pVM->cCPUs, VERR_INVALID_CPU_ID);
+    AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
 
     /*
Index: /trunk/src/VBox/VMM/DBGFMem.cpp
===================================================================
--- /trunk/src/VBox/VMM/DBGFMem.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/DBGFMem.cpp	(revision 22890)
@@ -122,5 +122,5 @@
 VMMR3DECL(int) DBGFR3MemScan(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, RTGCUINTPTR cbRange, const uint8_t *pabNeedle, size_t cbNeedle, PDBGFADDRESS pHitAddress)
 {
-    AssertReturn(idCpu < pVM->cCPUs, VERR_INVALID_PARAMETER);
+    AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
 
     PVMREQ pReq;
@@ -207,5 +207,5 @@
 VMMR3DECL(int) DBGFR3MemRead(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void *pvBuf, size_t cbRead)
 {
-    AssertReturn(idCpu < pVM->cCPUs, VERR_INVALID_PARAMETER);
+    AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
     if ((pAddress->fFlags & DBGFADDRESS_FLAGS_TYPE_MASK) == DBGFADDRESS_FLAGS_RING0)
     {
@@ -295,5 +295,5 @@
         return VERR_INVALID_PARAMETER;
     memset(pszBuf, 0, cchBuf);
-    AssertReturn(idCpu < pVM->cCPUs, VERR_INVALID_PARAMETER);
+    AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
 
     /*
@@ -381,5 +381,5 @@
 VMMR3DECL(int) DBGFR3MemWrite(PVM pVM, VMCPUID idCpu, PCDBGFADDRESS pAddress, void const *pvBuf, size_t cbWrite)
 {
-    AssertReturn(idCpu < pVM->cCPUs, VERR_INVALID_PARAMETER);
+    AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
 
     PVMREQ pReq;
@@ -481,5 +481,5 @@
 VMMR3DECL(int) DBGFR3SelQueryInfo(PVM pVM, VMCPUID idCpu, RTSEL Sel, uint32_t fFlags, PDBGFSELINFO pSelInfo)
 {
-    AssertReturn(idCpu < pVM->cCPUs, VERR_INVALID_PARAMETER);
+    AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
     AssertReturn(!(fFlags & ~(DBGFSELQI_FLAGS_DT_GUEST | DBGFSELQI_FLAGS_DT_SHADOW | DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE)), VERR_INVALID_PARAMETER);
     AssertReturn(    (fFlags & (DBGFSELQI_FLAGS_DT_SHADOW | DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE))
Index: /trunk/src/VBox/VMM/DBGFStack.cpp
===================================================================
--- /trunk/src/VBox/VMM/DBGFStack.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/DBGFStack.cpp	(revision 22890)
@@ -433,5 +433,5 @@
     *ppFirstFrame = NULL;
     VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
-    AssertReturn(idCpu < pVM->cCPUs, VERR_INVALID_CPU_ID);
+    AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
     if (pAddrFrame)
         AssertReturn(DBGFR3AddrIsValid(pVM, pAddrFrame), VERR_INVALID_PARAMETER);
Index: /trunk/src/VBox/VMM/EM.cpp
===================================================================
--- /trunk/src/VBox/VMM/EM.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/EM.cpp	(revision 22890)
@@ -137,5 +137,5 @@
         return rc;
 
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -428,8 +428,7 @@
 {
     LogFlow(("EMR3Relocate\n"));
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
-
         if (pVCpu->em.s.pStatsR3)
             pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
@@ -446,8 +445,7 @@
 {
     LogFlow(("EMR3Reset: \n"));
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
-
         pVCpu->em.s.fForceRAW = false;
     }
@@ -495,5 +493,5 @@
 static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
 {
-    for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -536,5 +534,5 @@
      * Load the saved state.
      */
-    for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -1365,5 +1363,5 @@
         {
             /* Try not to cause deadlocks. */
-            if (    pVM->cCPUs == 1
+            if (    pVM->cCpus == 1
                 ||  (   !PGMIsLockOwner(pVM)
                      && !IOMIsLockOwner(pVM))
Index: /trunk/src/VBox/VMM/EMHwaccm.cpp
===================================================================
--- /trunk/src/VBox/VMM/EMHwaccm.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/EMHwaccm.cpp	(revision 22890)
@@ -518,5 +518,5 @@
         uint32_t cpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));
 
-        if (pVM->cCPUs == 1)
+        if (pVM->cCpus == 1)
         {
             if (pCtx->eflags.Bits.u1VM)
Index: /trunk/src/VBox/VMM/HWACCM.cpp
===================================================================
--- /trunk/src/VBox/VMM/HWACCM.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/HWACCM.cpp	(revision 22890)
@@ -398,5 +398,5 @@
     LogFlow(("HWACCMR3InitCPU\n"));
 
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -414,5 +414,5 @@
      * Statistics.
      */
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -573,5 +573,5 @@
 #ifdef VBOX_WITH_CRASHDUMP_MAGIC
     /* Magic marker for searching in crash dumps. */
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -610,5 +610,5 @@
 
     /* Reinit the paging mode to force the new shadow mode. */
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -648,5 +648,5 @@
             LogRel(("HWACCM: The host kernel does not support VT-x!\n"));
 #endif
-            if (   pVM->cCPUs > 1
+            if (   pVM->cCpus > 1
                 || VMMIsHwVirtExtForced(pVM))
                 return rc;
@@ -981,5 +981,5 @@
             AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc) >= 512);
 
-            for (unsigned i=0;i<pVM->cCPUs;i++)
+            for (VMCPUID i = 0; i < pVM->cCpus; i++)
             {
                 LogRel(("HWACCM: VCPU%d: MSR bitmap physaddr      = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pMSRBitmapPhys));
@@ -1203,5 +1203,5 @@
     if (VMR3GetState(pVM) == VMSTATE_LOADING)
     {
-        for (unsigned i=0;i<pVM->cCPUs;i++)
+        for (VMCPUID i = 0; i < pVM->cCpus; i++)
         {
             PVMCPU pVCpu = &pVM->aCpus[i];
@@ -1351,5 +1351,5 @@
 VMMR3DECL(int) HWACCMR3TermCPU(PVM pVM)
 {
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -1394,5 +1394,5 @@
         hwaccmR3DisableRawMode(pVM);
 
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -1535,5 +1535,5 @@
         return VERR_NOT_SUPPORTED;
 
-    if (pVM->cCPUs > 1)
+    if (pVM->cCpus > 1)
     {
         /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
@@ -2318,5 +2318,5 @@
 VMMR3DECL(void) HWACCMR3CheckError(PVM pVM, int iStatusCode)
 {
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         switch(iStatusCode)
@@ -2367,5 +2367,5 @@
     Log(("hwaccmR3Save:\n"));
 
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         /*
@@ -2463,5 +2463,5 @@
         return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
     }
-    for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.fPending);
Index: /trunk/src/VBox/VMM/MMHyper.cpp
===================================================================
--- /trunk/src/VBox/VMM/MMHyper.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/MMHyper.cpp	(revision 22890)
@@ -80,6 +80,6 @@
     if (rc == VERR_CFGM_NO_PARENT || rc == VERR_CFGM_VALUE_NOT_FOUND)
     {
-        if (pVM->cCPUs > 1)
-            cbHyperHeap = _2M + pVM->cCPUs * _64K;
+        if (pVM->cCpus > 1)
+            cbHyperHeap = _2M + pVM->cCpus * _64K;
         else
             cbHyperHeap = VMMIsHwVirtExtForced(pVM)
@@ -109,5 +109,5 @@
          * Map the VM structure into the hypervisor space.
          */
-        AssertRelease(pVM->cbSelf == RT_UOFFSETOF(VM, aCpus[pVM->cCPUs]));
+        AssertRelease(pVM->cbSelf == RT_UOFFSETOF(VM, aCpus[pVM->cCpus]));
         RTGCPTR GCPtr;
         rc = MMR3HyperMapPages(pVM, pVM, pVM->pVMR0, RT_ALIGN_Z(pVM->cbSelf, PAGE_SIZE) >> PAGE_SHIFT, pVM->paVMPagesR3, "VM", &GCPtr);
@@ -115,5 +115,5 @@
         {
             pVM->pVMRC = (RTRCPTR)GCPtr;
-            for (uint32_t i = 0; i < pVM->cCPUs; i++)
+            for (VMCPUID i = 0; i < pVM->cCpus; i++)
                 pVM->aCpus[i].pVMRC = pVM->pVMRC;
 
@@ -320,5 +320,5 @@
             RTGCINTPTR offDelta = GCPtrNew - GCPtrOld;
             pVM->pVMRC                          += offDelta;
-            for (uint32_t i = 0; i < pVM->cCPUs; i++)
+            for (VMCPUID i = 0; i < pVM->cCpus; i++)
                 pVM->aCpus[i].pVMRC              = pVM->pVMRC;
 
Index: /trunk/src/VBox/VMM/PATM/CSAM.cpp
===================================================================
--- /trunk/src/VBox/VMM/PATM/CSAM.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/PATM/CSAM.cpp	(revision 22890)
@@ -563,5 +563,5 @@
     int rc;
     R3PTRTYPE(void *) pHCPtr;
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
     PVMCPU pVCpu = VMMGetCpu0(pVM);
 
@@ -612,5 +612,5 @@
     RTGCUINTPTR32 pInstrGC = (uintptr_t)pCpu->apvUserData[2];
     int           orgsize  = size;
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
     PVMCPU        pVCpu = VMMGetCpu0(pVM);
 
@@ -1087,5 +1087,5 @@
     R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
     int rc2;
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
     PVMCPU pVCpu = VMMGetCpu0(pVM);
 
@@ -1363,5 +1363,5 @@
     uint32_t val[5];
     int      rc;
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
     PVMCPU pVCpu = VMMGetCpu0(pVM);
 
@@ -1431,5 +1431,5 @@
     RTGCPHYS GCPhys = 0;
     uint64_t fFlags = 0;
-    Assert(pVM->cCPUs == 1 || !CSAMIsEnabled(pVM));
+    Assert(pVM->cCpus == 1 || !CSAMIsEnabled(pVM));
 
     if (!CSAMIsEnabled(pVM))
@@ -1624,5 +1624,5 @@
     int          rc;
     bool         ret;
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
     PVMCPU pVCpu = VMMGetCpu0(pVM);
 
@@ -1727,5 +1727,5 @@
     int          rc;
     bool         fMonitorInvalidation;
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
     PVMCPU pVCpu = VMMGetCpu0(pVM);
 
@@ -1873,5 +1873,5 @@
 {
     PCSAMPAGEREC pPageRec;
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
     PVMCPU pVCpu = VMMGetCpu0(pVM);
 
@@ -2194,5 +2194,5 @@
 static int csamR3FlushDirtyPages(PVM pVM)
 {
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
     PVMCPU pVCpu = VMMGetCpu0(pVM);
 
@@ -2245,5 +2245,5 @@
 static int csamR3FlushCodePages(PVM pVM)
 {
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
     PVMCPU pVCpu = VMMGetCpu0(pVM);
 
@@ -2289,5 +2289,5 @@
 VMMR3DECL(int) CSAMR3CheckGates(PVM pVM, uint32_t iGate, uint32_t cGates)
 {
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
     PVMCPU      pVCpu = VMMGetCpu0(pVM);
     uint16_t    cbIDT;
Index: /trunk/src/VBox/VMM/PATM/PATM.cpp
===================================================================
--- /trunk/src/VBox/VMM/PATM/PATM.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/PATM/PATM.cpp	(revision 22890)
@@ -6005,5 +6005,5 @@
     PVMCPU           pVCpu = VMMGetCpu0(pVM);
 
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
 
     pNewEip   = 0;
Index: /trunk/src/VBox/VMM/PDM.cpp
===================================================================
--- /trunk/src/VBox/VMM/PDM.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/PDM.cpp	(revision 22890)
@@ -637,5 +637,5 @@
      * Save interrupt and DMA states.
      */
-    for (unsigned idCpu = 0; idCpu < pVM->cCPUs; idCpu++)
+    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     {
         PVMCPU pVCpu = &pVM->aCpus[idCpu];
@@ -680,5 +680,5 @@
              ));
 #ifdef LOG_ENABLED
-    for (unsigned idCpu=0;idCpu<pVM->cCPUs;idCpu++)
+    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     {
         PVMCPU pVCpu = &pVM->aCpus[idCpu];
@@ -698,5 +698,5 @@
 
     /* Clear the FFs. */
-    for (unsigned idCpu=0;idCpu<pVM->cCPUs;idCpu++)
+    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     {
         PVMCPU pVCpu = &pVM->aCpus[idCpu];
@@ -741,5 +741,5 @@
      * Load the interrupt and DMA states.
      */
-    for (VMCPUID idCpu = 0; idCpu < pVM->cCPUs; idCpu++)
+    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     {
         PVMCPU pVCpu = &pVM->aCpus[idCpu];
@@ -960,5 +960,5 @@
      * Clear all pending interrupts and DMA operations.
      */
-    for (unsigned idCpu=0;idCpu<pVM->cCPUs;idCpu++)
+    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     {
         PVMCPU pVCpu = &pVM->aCpus[idCpu];
Index: /trunk/src/VBox/VMM/PDMDevHlp.cpp
===================================================================
--- /trunk/src/VBox/VMM/PDMDevHlp.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/PDMDevHlp.cpp	(revision 22890)
@@ -2355,5 +2355,5 @@
              pDevIns->pDevReg->szDeviceName, pDevIns->iInstance));
 
-    if (pVM->cCPUs > 1)
+    if (pVM->cCpus > 1)
     {
         /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
@@ -2382,5 +2382,5 @@
              pDevIns->pDevReg->szDeviceName, pDevIns->iInstance));
 
-    if (pVM->cCPUs > 1)
+    if (pVM->cCpus > 1)
     {
         /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
Index: /trunk/src/VBox/VMM/PDMDevMiscHlp.cpp
===================================================================
--- /trunk/src/VBox/VMM/PDMDevMiscHlp.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/PDMDevMiscHlp.cpp	(revision 22890)
@@ -151,5 +151,5 @@
     PVMCPU pVCpu = &pVM->aCpus[idCpu];
 
-    AssertReturnVoid(idCpu < pVM->cCPUs);
+    AssertReturnVoid(idCpu < pVM->cCpus);
 
     LogFlow(("pdmR3ApicHlp_SetInterruptFF: caller='%s'/%d: VM_FF_INTERRUPT(%d) %d -> 1\n",
@@ -183,5 +183,5 @@
     PVMCPU pVCpu = &pVM->aCpus[idCpu];
 
-    AssertReturnVoid(idCpu < pVM->cCPUs);
+    AssertReturnVoid(idCpu < pVM->cCpus);
 
     LogFlow(("pdmR3ApicHlp_ClearInterruptFF: caller='%s'/%d: VM_FF_INTERRUPT(%d) %d -> 0\n",
Index: /trunk/src/VBox/VMM/PGM.cpp
===================================================================
--- /trunk/src/VBox/VMM/PGM.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/PGM.cpp	(revision 22890)
@@ -1198,5 +1198,5 @@
 
     /* Init the per-CPU part. */
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -1340,8 +1340,7 @@
     if (RT_SUCCESS(rc))
     {
-        for (unsigned i=0;i<pVM->cCPUs;i++)
+        for (VMCPUID i = 0; i < pVM->cCpus; i++)
         {
             PVMCPU pVCpu = &pVM->aCpus[i];
-
             rc = PGMR3ChangeMode(pVM, pVCpu, PGMMODE_REAL);
             if (RT_FAILURE(rc))
@@ -1426,5 +1425,5 @@
      * Force a recalculation of modes and switcher so everyone gets notified.
      */
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -1674,8 +1673,8 @@
      * Common - stats
      */
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU  pVCpu = &pVM->aCpus[i];
-        PPGMCPU pPGM = &pVCpu->pgm.s;
+        PPGMCPU pPGM  = &pVCpu->pgm.s;
 
 #define PGM_REG_COUNTER(a, b, c) \
@@ -2009,5 +2008,5 @@
 
     /* Shadow, guest and both mode switch & relocation for each VCPU. */
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU  pVCpu = &pVM->aCpus[i];
@@ -2180,8 +2179,7 @@
      * Important to clean up the amd64 case.
      */
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU  pVCpu = &pVM->aCpus[i];
-
         rc = PGM_GST_PFN(Exit, pVCpu)(pVCpu);
         AssertRC(rc);
@@ -2196,5 +2194,5 @@
      * Switch mode back to real mode. (before resetting the pgm pool!)
      */
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU  pVCpu = &pVM->aCpus[i];
@@ -2211,5 +2209,5 @@
     pgmR3PoolReset(pVM);
 
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU  pVCpu = &pVM->aCpus[i];
@@ -2439,8 +2437,7 @@
     SSMR3PutStruct(pSSM, pPGM, &s_aPGMFields[0]);
 
-    for (i=0;i<pVM->cCPUs;i++)
-    {
-        PVMCPU pVCpu = &pVM->aCpus[i];
-
+    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
+    {
+        PVMCPU pVCpu = &pVM->aCpus[idCpu];
         SSMR3PutStruct(pSSM, &pVCpu->pgm.s, &s_aPGMCpuFields[0]);
     }
@@ -2690,5 +2687,5 @@
         AssertLogRelRCReturn(rc, rc);
 
-        for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+        for (VMCPUID i = 0; i < pVM->cCpus; i++)
         {
             rc = SSMR3GetStruct(pSSM, &pVM->aCpus[i].pgm.s, &s_aPGMCpuFields[0]);
@@ -2698,5 +2695,5 @@
     else if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
     {
-        AssertRelease(pVM->cCPUs == 1);
+        AssertRelease(pVM->cCpus == 1);
 
         PGMOLD pgmOld;
@@ -2714,5 +2711,5 @@
     else
     {
-        AssertRelease(pVM->cCPUs == 1);
+        AssertRelease(pVM->cCpus == 1);
 
         SSMR3GetBool(pSSM,      &pPGM->fMappingsFixed);
@@ -3066,5 +3063,5 @@
          * We require a full resync now.
          */
-        for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+        for (VMCPUID i = 0; i < pVM->cCpus; i++)
         {
             PVMCPU pVCpu = &pVM->aCpus[i];
@@ -3077,5 +3074,5 @@
         pgmR3HandlerPhysicalUpdateAll(pVM);
 
-        for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+        for (VMCPUID i = 0; i < pVM->cCpus; i++)
         {
             PVMCPU pVCpu = &pVM->aCpus[i];
Index: /trunk/src/VBox/VMM/PGMMap.cpp
===================================================================
--- /trunk/src/VBox/VMM/PGMMap.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/PGMMap.cpp	(revision 22890)
@@ -214,5 +214,5 @@
     }
 
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -271,5 +271,5 @@
             MMHyperFree(pVM, pCur);
 
-            for (unsigned i=0;i<pVM->cCPUs;i++)
+            for (VMCPUID i = 0; i < pVM->cCpus; i++)
             {
                 PVMCPU pVCpu = &pVM->aCpus[i];
@@ -521,5 +521,5 @@
 
     /* Only applies to VCPU 0 as we don't support SMP guests with raw mode. */
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
 
     PVMCPU pVCpu = &pVM->aCpus[0];
@@ -653,8 +653,8 @@
     pVM->pgm.s.cbMappingFixed    = cb;
 
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
-        pVCpu->pgm.s.fSyncFlags       &= ~PGM_SYNC_MONITOR_CR3;
+        pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
         VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
     }
@@ -689,9 +689,8 @@
     pVM->pgm.s.GCPtrMappingFixed = MM_HYPER_AREA_ADDRESS;
     pVM->pgm.s.cbMappingFixed    = cb;
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
-
-        pVCpu->pgm.s.fSyncFlags       &= ~PGM_SYNC_MONITOR_CR3;
+        pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
         VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
     }
@@ -718,8 +717,7 @@
     pVM->pgm.s.GCPtrMappingFixed = 0;
     pVM->pgm.s.cbMappingFixed    = 0;
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
-
         VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
     }
@@ -1161,5 +1159,5 @@
 
     /* Raw mode only which implies one VCPU. */
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
 
     pMapping->aGCPtrConflicts[pMapping->cConflicts & (PGMMAPPING_CONFLICT_MAX-1)] = GCPtrOldMapping;
@@ -1237,5 +1235,5 @@
 
     /* Raw mode only which implies one VCPU. */
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
     PVMCPU pVCpu = VMMGetCpu(pVM);
 
Index: /trunk/src/VBox/VMM/SELM.cpp
===================================================================
--- /trunk/src/VBox/VMM/SELM.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/SELM.cpp	(revision 22890)
@@ -398,5 +398,5 @@
     LogFlow(("SELMR3Relocate\n"));
 
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
Index: /trunk/src/VBox/VMM/TM.cpp
===================================================================
--- /trunk/src/VBox/VMM/TM.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/TM.cpp	(revision 22890)
@@ -201,5 +201,5 @@
 
     pVM->tm.s.offVM = RT_OFFSETOF(VM, tm.s);
-    pVM->tm.s.idTimerCpu = pVM->cCPUs - 1; /* The last CPU. */
+    pVM->tm.s.idTimerCpu = pVM->cCpus - 1; /* The last CPU. */
     pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].enmClock        = TMCLOCK_VIRTUAL;
     pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].u64Expire       = INT64_MAX;
@@ -627,5 +627,5 @@
 #endif /* VBOX_WITH_STATISTICS */
 
-    for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
         STAMR3RegisterF(pVM, &pVM->aCpus[i].tm.s.offTSCRawSrc, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS, "TSC offset relative the raw source", "/TM/TSC/offCPU%u", i);
 
@@ -1046,5 +1046,5 @@
     LogFlow(("tmR3Save:\n"));
 #ifdef VBOX_STRICT
-    for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -1072,9 +1072,8 @@
     SSMR3PutU64(pSSM, TMCLOCK_FREQ_REAL);
 
-    for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+    /* the cpu tick clock. */
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
-
-        /* the cpu tick clock. */
         SSMR3PutU64(pSSM, TMCpuTickGet(pVCpu));
     }
@@ -1098,5 +1097,5 @@
     Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
 #ifdef VBOX_STRICT
-    for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -1161,5 +1160,5 @@
 
     /* the cpu tick clock. */
-    for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -1748,9 +1747,9 @@
      * (fRunningQueues is only used as an indicator.)
      */
-    Assert(pVM->tm.s.idTimerCpu < pVM->cCPUs);
+    Assert(pVM->tm.s.idTimerCpu < pVM->cCpus);
     PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
     if (VMMGetCpu(pVM) != pVCpuDst)
     {
-        Assert(pVM->cCPUs > 1);
+        Assert(pVM->cCpus > 1);
         return;
     }
@@ -2686,5 +2685,5 @@
     const uint64_t u64Real        = TMRealGet(pVM);
 
-    for (unsigned i = 0; i < pVM->cCPUs; i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU   pVCpu  = &pVM->aCpus[i];
Index: /trunk/src/VBox/VMM/TRPM.cpp
===================================================================
--- /trunk/src/VBox/VMM/TRPM.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/TRPM.cpp	(revision 22890)
@@ -467,5 +467,5 @@
     pVM->trpm.s.offTRPMCPU         = RT_OFFSETOF(VM, aCpus[0].trpm) - RT_OFFSETOF(VM, trpm);
 
-    for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -744,5 +744,5 @@
      * Reinitialize other members calling the relocator to get things right.
      */
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -776,8 +776,7 @@
      * Active and saved traps.
      */
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PTRPMCPU pTrpmCpu = &pVM->aCpus[i].trpm.s;
-
         SSMR3PutUInt(pSSM,      pTrpmCpu->uActiveVector);
         SSMR3PutUInt(pSSM,      pTrpmCpu->enmActiveType);
@@ -849,5 +848,5 @@
     if (uVersion == TRPM_SAVED_STATE_VERSION)
     {
-        for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+        for (VMCPUID i = 0; i < pVM->cCpus; i++)
         {
             PTRPMCPU pTrpmCpu = &pVM->aCpus[i].trpm.s;
@@ -1237,5 +1236,5 @@
 {
     /* Only valid in raw mode which implies 1 VCPU */
-    Assert(PATMIsEnabled(pVM) && pVM->cCPUs == 1);
+    Assert(PATMIsEnabled(pVM) && pVM->cCpus == 1);
     PVMCPU pVCpu = &pVM->aCpus[0];
 
@@ -1360,5 +1359,5 @@
 {
     /* Only valid in raw mode which implies 1 VCPU */
-    Assert(PATMIsEnabled(pVM) && pVM->cCPUs == 1);
+    Assert(PATMIsEnabled(pVM) && pVM->cCpus == 1);
     PVMCPU pVCpu = &pVM->aCpus[0];
 
Index: /trunk/src/VBox/VMM/VM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VM.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VM.cpp	(revision 22890)
@@ -124,6 +124,6 @@
 *   Internal Functions                                                         *
 *******************************************************************************/
-static int               vmR3CreateUVM(uint32_t cCPUs, PUVM *ppUVM);
-static int               vmR3CreateU(PUVM pUVM, uint32_t cCPUs, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM);
+static int               vmR3CreateUVM(uint32_t cCpus, PUVM *ppUVM);
+static int               vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM);
 static int               vmR3InitRing3(PVM pVM, PUVM pUVM);
 static int               vmR3InitVMCpu(PVM pVM);
@@ -182,5 +182,5 @@
  * @returns 0 on success.
  * @returns VBox error code on failure.
- * @param   cCPUs               Number of virtual CPUs for the new VM.
+ * @param   cCpus               Number of virtual CPUs for the new VM.
  * @param   pfnVMAtError        Pointer to callback function for setting VM
  *                              errors. This was added as an implicit call to
@@ -195,7 +195,8 @@
  * @param   ppVM                Where to store the 'handle' of the created VM.
  */
-VMMR3DECL(int)   VMR3Create(uint32_t cCPUs, PFNVMATERROR pfnVMAtError, void *pvUserVM, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM, PVM *ppVM)
-{
-    LogFlow(("VMR3Create: cCPUs=%RU32 pfnVMAtError=%p pvUserVM=%p  pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p\n", cCPUs, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM));
+VMMR3DECL(int)   VMR3Create(uint32_t cCpus, PFNVMATERROR pfnVMAtError, void *pvUserVM, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM, PVM *ppVM)
+{
+    LogFlow(("VMR3Create: cCpus=%RU32 pfnVMAtError=%p pvUserVM=%p  pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p\n",
+             cCpus, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM));
 
     /*
@@ -216,5 +217,5 @@
      * Validate input.
      */
-    AssertLogRelMsgReturn(cCPUs > 0 && cCPUs <= VMM_MAX_CPU_COUNT, ("%RU32\n", cCPUs), VERR_TOO_MANY_CPUS);
+    AssertLogRelMsgReturn(cCpus > 0 && cCpus <= VMM_MAX_CPU_COUNT, ("%RU32\n", cCpus), VERR_TOO_MANY_CPUS);
 
     /*
@@ -223,5 +224,5 @@
      */
     PUVM pUVM = NULL;                   /* shuts up gcc */
-    int rc = vmR3CreateUVM(cCPUs, &pUVM);
+    int rc = vmR3CreateUVM(cCpus, &pUVM);
     if (RT_FAILURE(rc))
         return rc;
@@ -246,5 +247,5 @@
             PVMREQ pReq;
             rc = VMR3ReqCallU(pUVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, 0, (PFNRT)vmR3CreateU, 4,
-                              pUVM, cCPUs, pfnCFGMConstructor, pvUserCFGM);
+                              pUVM, cCpus, pfnCFGMConstructor, pvUserCFGM);
             if (RT_SUCCESS(rc))
             {
@@ -555,9 +556,9 @@
         AssertRelease(pVM->pVMR0 == CreateVMReq.pVMR0);
         AssertRelease(pVM->pSession == pUVM->vm.s.pSession);
-        AssertRelease(pVM->cCPUs == cCpus);
+        AssertRelease(pVM->cCpus == cCpus);
         AssertRelease(pVM->offVMCPU == RT_UOFFSETOF(VM, aCpus));
 
-        Log(("VMR3Create: Created pUVM=%p pVM=%p pVMR0=%p hSelf=%#x cCPUs=%RU32\n",
-             pUVM, pVM, pVM->pVMR0, pVM->hSelf, pVM->cCPUs));
+        Log(("VMR3Create: Created pUVM=%p pVM=%p pVMR0=%p hSelf=%#x cCpus=%RU32\n",
+             pUVM, pVM, pVM->pVMR0, pVM->hSelf, pVM->cCpus));
 
         /*
@@ -566,5 +567,5 @@
         pVM->pUVM = pUVM;
 
-        for (uint32_t i = 0; i < pVM->cCPUs; i++)
+        for (VMCPUID i = 0; i < pVM->cCpus; i++)
         {
             pVM->aCpus[i].pUVCpu        = &pUVM->aCpus[i];
@@ -610,5 +611,5 @@
                 if (RT_SUCCESS(rc) && cCPUsCfg != cCpus)
                 {
-                    AssertLogRelMsgFailed(("Configuration error: \"NumCPUs\"=%RU32 and VMR3CreateVM::cCPUs=%RU32 does not match!\n",
+                    AssertLogRelMsgFailed(("Configuration error: \"NumCPUs\"=%RU32 and VMR3CreateVM::cCpus=%RU32 does not match!\n",
                                            cCPUsCfg, cCpus));
                     rc = VERR_INVALID_PARAMETER;
@@ -756,5 +757,5 @@
      * Register the other EMTs with GVM.
      */
-    for (VMCPUID idCpu = 1; idCpu < pVM->cCPUs; idCpu++)
+    for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
     {
         PVMREQ pReq;
@@ -789,11 +790,11 @@
         STAM_REG(pVM, &pVM->StatSwitcherRstrRegs,   STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/RstrRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
 
-        for (unsigned iCpu=0;iCpu<pVM->cCPUs;iCpu++)
+        for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
         {
-            rc = STAMR3RegisterF(pVM, &pUVM->aCpus[iCpu].vm.s.StatHaltYield,  STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state yielding.", "/PROF/VM/CPU%d/Halt/Yield", iCpu);
+            rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield,  STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state yielding.", "/PROF/VM/CPU%d/Halt/Yield", idCpu);
             AssertRC(rc);
-            rc = STAMR3RegisterF(pVM, &pUVM->aCpus[iCpu].vm.s.StatHaltBlock,  STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state blocking.", "/PROF/VM/CPU%d/Halt/Block", iCpu);
+            rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock,  STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state blocking.", "/PROF/VM/CPU%d/Halt/Block", idCpu);
             AssertRC(rc);
-            rc = STAMR3RegisterF(pVM, &pUVM->aCpus[iCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/VM/CPU%d/Halt/Timers", iCpu);
+            rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/VM/CPU%d/Halt/Timers", idCpu);
             AssertRC(rc);
         }
@@ -1788,5 +1789,5 @@
 
         /* Inform all other VCPUs too. */
-        for (VMCPUID idCpu = 1; idCpu < pVM->cCPUs; idCpu++)
+        for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
         {
             /*
Index: /trunk/src/VBox/VMM/VMM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMM.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMM.cpp	(revision 22890)
@@ -248,5 +248,5 @@
 #endif
 
-    for (VMCPUID idCpu = 0; idCpu < pVM->cCPUs; idCpu++)
+    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     {
         PVMCPU pVCpu = &pVM->aCpus[idCpu];
@@ -306,5 +306,5 @@
 
 # ifdef VBOX_WITH_R0_LOGGING
-        for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+        for (VMCPUID i = 0; i < pVM->cCpus; i++)
         {
             PVMCPU pVCpu = &pVM->aCpus[i];
@@ -404,5 +404,5 @@
 
 #ifdef VBOX_WITH_STATISTICS
-    for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.CallRing3JmpBufR0.cbUsedMax,  STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,      "Max amount of stack used.", "/VMM/Stack/CPU%u/Max", i);
@@ -437,5 +437,5 @@
     int rc = VINF_SUCCESS;
 
-    for (VMCPUID idCpu = 0; idCpu < pVM->cCPUs; idCpu++)
+    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     {
         PVMCPU pVCpu = &pVM->aCpus[idCpu];
@@ -559,5 +559,5 @@
         return VINF_SUCCESS;
 
-    AssertReturn(pVM->cCPUs == 1, VERR_RAW_MODE_INVALID_SMP);
+    AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
 
     /*
@@ -681,5 +681,5 @@
      * Make the two stack guard pages present again.
      */
-    for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         MMR3HyperSetGuard(pVM, pVM->aCpus[i].vmm.s.pbEMTStackR3 - PAGE_SIZE,      PAGE_SIZE, false /*fSet*/);
@@ -728,5 +728,5 @@
      * The stack.
      */
-    for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -814,5 +814,5 @@
      * in ring-0. Only initialize it once.
      */
-    for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU       pVCpu = &pVM->aCpus[i];
@@ -922,5 +922,5 @@
      * be running. This avoids breaking the saved state version. :-)
      */
-    for (VMCPUID i = 1; i < pVM->cCPUs; i++)
+    for (VMCPUID i = 1; i < pVM->cCpus; i++)
         SSMR3PutBool(pSSM, VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(&pVM->aCpus[i])));
 
@@ -969,5 +969,5 @@
     /* Restore the VMCPU states. VCPU 0 is always started. */
     VMCPU_SET_STATE(&pVM->aCpus[0], VMCPUSTATE_STARTED);
-    for (VMCPUID i = 1; i < pVM->cCPUs; i++)
+    for (VMCPUID i = 1; i < pVM->cCpus; i++)
     {
         bool fStarted;
@@ -1129,5 +1129,5 @@
     Log2(("VMMR3RawRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
 
-    AssertReturn(pVM->cCPUs == 1, VERR_RAW_MODE_INVALID_SMP);
+    AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
 
     /*
@@ -1289,5 +1289,5 @@
 VMMR3DECL(void) VMMR3SendSipi(PVM pVM, VMCPUID idCpu,  uint32_t uVector)
 {
-    AssertReturnVoid(idCpu < pVM->cCPUs);
+    AssertReturnVoid(idCpu < pVM->cCpus);
 
     PVMREQ pReq;
@@ -1305,5 +1305,5 @@
 VMMR3DECL(void) VMMR3SendInitIpi(PVM pVM, VMCPUID idCpu)
 {
-    AssertReturnVoid(idCpu < pVM->cCPUs);
+    AssertReturnVoid(idCpu < pVM->cCpus);
 
     PVMREQ pReq;
@@ -1379,9 +1379,9 @@
 
     /* Shortcut for the uniprocessor case. */
-    if (pVM->cCPUs == 1)
+    if (pVM->cCpus == 1)
         return pfnHandler(pVM, pvUser);
 
     RTCritSectEnter(&pVM->vmm.s.CritSectSync);
-    for (VMCPUID idCpu = 0; idCpu < pVM->cCPUs; idCpu++)
+    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     {
         if (idCpu != pVCpu->idCpu)
@@ -1393,5 +1393,5 @@
     }
     /* Wait until all other VCPUs are waiting for us. */
-    while (RTCritSectGetWaiters(&pVM->vmm.s.CritSectSync) != (int32_t)(pVM->cCPUs - 1))
+    while (RTCritSectGetWaiters(&pVM->vmm.s.CritSectSync) != (int32_t)(pVM->cCpus - 1))
         RTThreadSleep(1);
 
@@ -1410,5 +1410,5 @@
 {
     uint32_t cReturned = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsReturned);
-    if (cReturned == pVM->cCPUs - 1U)
+    if (cReturned == pVM->cCpus - 1U)
     {
         int rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousDoneCaller);
@@ -1438,5 +1438,5 @@
      */
     uint32_t cEntered = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsEntered);
-    if (cEntered != pVM->cCPUs)
+    if (cEntered != pVM->cCpus)
     {
         if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
@@ -1514,5 +1514,5 @@
      */
     uint32_t cDone = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsDone);
-    if (    cDone != pVM->cCPUs
+    if (    cDone != pVM->cCpus
         &&  (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE)
     {
@@ -1587,5 +1587,5 @@
 
     int rc;
-    if (pVM->cCPUs == 1)
+    if (pVM->cCpus == 1)
         /*
          * Shortcut for the single EMT case.
@@ -1706,5 +1706,5 @@
 {
     /* Raw mode implies 1 VCPU. */
-    AssertReturn(pVM->cCPUs == 1, VERR_RAW_MODE_INVALID_SMP);
+    AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
     PVMCPU pVCpu = &pVM->aCpus[0];
 
@@ -1832,5 +1832,5 @@
 {
     Log(("VMMR3ResumeHyper: eip=%RRv esp=%RRv\n", CPUMGetHyperEIP(pVCpu), CPUMGetHyperESP(pVCpu)));
-    AssertReturn(pVM->cCPUs == 1, VERR_RAW_MODE_INVALID_SMP);
+    AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
 
     /*
@@ -2109,5 +2109,5 @@
      * Per CPU flags.
      */
-    for (VMCPUID i = 0; i < pVM->cCPUs; i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         const uint32_t fLocalForcedActions = pVM->aCpus[i].fLocalForcedActions;
Index: /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 22890)
@@ -1073,5 +1073,5 @@
 
     if (    iLeaf == 1
-        &&  pVM->cCPUs > 1)
+        &&  pVM->cCpus > 1)
     {
         /* Bits 31-24: Initial APIC ID */
@@ -1311,8 +1311,7 @@
             break;
     }
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
-
         pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
     }
@@ -1431,8 +1430,7 @@
             break;
     }
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
-
         pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
     }
Index: /trunk/src/VBox/VMM/VMMAll/HWACCMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HWACCMAll.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMAll/HWACCMAll.cpp	(revision 22890)
@@ -116,5 +116,5 @@
     VMCPUID idCurCpu = VMMGetCpuId(pVM);
 
-    for (unsigned idCpu = 0; idCpu < pVM->cCPUs; idCpu++)
+    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     {
         PVMCPU pVCpu = &pVM->aCpus[idCpu];
@@ -154,10 +154,10 @@
 VMMDECL(int) HWACCMFlushTLBOnAllVCpus(PVM pVM)
 {
-    if (pVM->cCPUs == 1)
+    if (pVM->cCpus == 1)
         return HWACCMFlushTLB(&pVM->aCpus[0]);
 
     VMCPUID idThisCpu = VMMGetCpuId(pVM);
 
-    for (unsigned idCpu = 0; idCpu < pVM->cCPUs; idCpu++)
+    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     {
         PVMCPU pVCpu = &pVM->aCpus[idCpu];
@@ -231,5 +231,5 @@
         VMCPUID idThisCpu = VMMGetCpuId(pVM);
 
-        for (unsigned idCpu = 0; idCpu < pVM->cCPUs; idCpu++)
+        for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
         {
             PVMCPU pVCpu = &pVM->aCpus[idCpu];
Index: /trunk/src/VBox/VMM/VMMAll/IOMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IOMAll.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMAll/IOMAll.cpp	(revision 22890)
@@ -50,5 +50,5 @@
 int iomLock(PVM pVM)
 {
-    Assert(pVM->cCPUs == 1 || !PGMIsLockOwner(pVM));
+    Assert(pVM->cCpus == 1 || !PGMIsLockOwner(pVM));
     int rc = PDMCritSectEnter(&pVM->iom.s.EmtLock, VERR_SEM_BUSY);
     return rc;
Index: /trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp	(revision 22890)
@@ -493,5 +493,5 @@
     PVM pVM = pCritSect->s.CTX_SUFF(pVM);
     AssertPtr(pVM);
-    Assert(idCpu < pVM->cCPUs);
+    Assert(idCpu < pVM->cCpus);
     return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
         && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
Index: /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAll.cpp	(revision 22890)
@@ -460,5 +460,5 @@
 # ifdef IN_RING0
     /* Note: hack alert for difficult to reproduce problem. */
-    if (    pVM->cCPUs > 1
+    if (    pVM->cCpus > 1
         &&  rc == VERR_PAGE_TABLE_NOT_PRESENT)
     {
@@ -2518,5 +2518,5 @@
 
     /* Only applies to raw mode -> 1 VPCU */
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
     PVMCPU pVCpu = &pVM->aCpus[0];
 
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 22890)
@@ -856,5 +856,5 @@
                         uint64_t fPageShw;
                         rc = PGMShwGetPage(pVCpu, pvFault, &fPageShw, NULL);
-                        AssertMsg((RT_SUCCESS(rc) && (fPageShw & X86_PTE_RW)) || pVM->cCPUs > 1 /* new monitor can be installed/page table flushed between the trap exit and PGMTrap0eHandler */, ("rc=%Rrc fPageShw=%RX64\n", rc, fPageShw));
+                        AssertMsg((RT_SUCCESS(rc) && (fPageShw & X86_PTE_RW)) || pVM->cCpus > 1 /* new monitor can be installed/page table flushed between the trap exit and PGMTrap0eHandler */, ("rc=%Rrc fPageShw=%RX64\n", rc, fPageShw));
 #   endif /* VBOX_STRICT */
                         STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c);
@@ -1729,5 +1729,5 @@
     if (!PdeDst.n.u1Present)
     {
-        AssertMsg(pVM->cCPUs > 1, ("%Unexpected missing PDE p=%llx\n", pPdeDst, (uint64_t)PdeDst.u));
+        AssertMsg(pVM->cCpus > 1, ("%Unexpected missing PDE p=%llx\n", pPdeDst, (uint64_t)PdeDst.u));
         Log(("CPU%d: SyncPage: Pde at %RGv changed behind our back!\n", GCPtrPage));
         return VINF_SUCCESS;    /* force the instruction to be executed again. */
@@ -2255,5 +2255,5 @@
                 else
                 /* Check for stale TLB entry; only applies to the SMP guest case. */
-                if (    pVM->cCPUs > 1
+                if (    pVM->cCpus > 1
                     &&  pPdeDst->n.u1Write
                     &&  pPdeDst->n.u1Accessed)
@@ -2395,5 +2395,5 @@
                         else
                         /* Check for stale TLB entry; only applies to the SMP guest case. */
-                        if (    pVM->cCPUs > 1
+                        if (    pVM->cCpus > 1
                             &&  pPteDst->n.u1Write == 1
                             &&  pPteDst->n.u1Accessed == 1)
@@ -4471,5 +4471,5 @@
 
     /* Clean up the old CR3 root. */
-    if (    pOldShwPageCR3 
+    if (    pOldShwPageCR3
         &&  pOldShwPageCR3 != pNewShwPageCR3    /* @todo can happen due to incorrect syncing between REM & PGM; find the real cause */)
     {
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllGst.h	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllGst.h	(revision 22890)
@@ -484,5 +484,5 @@
     STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualUpdate), a);
 
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PGMHVUSTATE State;
@@ -509,5 +509,5 @@
         RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualResetOne, pVM);
 
-        for (unsigned i=0;i<pVM->cCPUs;i++)
+        for (VMCPUID i = 0; i < pVM->cCpus; i++)
         {
             PVMCPU pVCpu = &pVM->aCpus[i];
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp	(revision 22890)
@@ -1542,5 +1542,5 @@
     for (unsigned iPage = 0; iPage < pVirt->cPages; iPage++, GCPtr += PAGE_SIZE)
     {
-        for (unsigned i=0;i<pVM->cCPUs;i++)
+        for (VMCPUID i = 0; i < pVM->cCpus; i++)
         {
             PVMCPU pVCpu = &pVM->aCpus[i];
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp	(revision 22890)
@@ -222,5 +222,5 @@
 
     if (    !pgmMapAreMappingsEnabled(&pVM->pgm.s)
-        ||  pVM->cCPUs > 1)
+        ||  pVM->cCpus > 1)
         return;
 
@@ -390,5 +390,5 @@
 
     if (    !pgmMapAreMappingsEnabled(&pVM->pgm.s)
-        ||  pVM->cCPUs > 1)
+        ||  pVM->cCpus > 1)
         return;
 
@@ -602,5 +602,5 @@
         return;
 
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
 
     /* This only applies to raw mode where we only support 1 VCPU. */
@@ -636,5 +636,5 @@
      */
     if (    !pgmMapAreMappingsEnabled(&pVM->pgm.s)
-        ||  pVM->cCPUs > 1)
+        ||  pVM->cCpus > 1)
         return VINF_SUCCESS;
 
@@ -672,5 +672,5 @@
      */
     if (    !pgmMapAreMappingsEnabled(&pVM->pgm.s)
-        ||  pVM->cCPUs > 1)
+        ||  pVM->cCpus > 1)
         return VINF_SUCCESS;
 
@@ -705,5 +705,5 @@
         return false;
 
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
 
     /* This only applies to raw mode where we only support 1 VCPU. */
@@ -801,5 +801,5 @@
         return VINF_SUCCESS;
 
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
 
     /* This only applies to raw mode where we only support 1 VCPU. */
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp	(revision 22890)
@@ -885,5 +885,5 @@
             return false;
     }
-    if (    (    (pDis->param1.flags & USE_REG_GEN32) 
+    if (    (    (pDis->param1.flags & USE_REG_GEN32)
              ||  (pDis->param1.flags & USE_REG_GEN64))
         &&  (pDis->param1.base.reg_gen == USE_REG_ESP))
@@ -1226,5 +1226,5 @@
             if (fReused)
                 goto flushPage;
-                
+
             /* A mov instruction to change the first page table entry will be remembered so we can detect
              * full page table changes early on. This will reduce the amount of unnecessary traps we'll take.
@@ -1315,5 +1315,5 @@
         &&  !fForcedFlush
         &&  pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT
-        &&  (   fNotReusedNotForking 
+        &&  (   fNotReusedNotForking
              || (   !pgmPoolMonitorIsReused(pVM, pVCpu, pRegFrame, pDis, pvFault)
                  && !pgmPoolMonitorIsForking(pPool, pDis, GCPhysFault & PAGE_OFFSET_MASK))
@@ -1362,13 +1362,13 @@
             {
                 rc = PGMShwModifyPage(pVCpu, pvFault, 1, X86_PTE_RW, ~(uint64_t)X86_PTE_RW);
-                AssertMsg(rc == VINF_SUCCESS 
+                AssertMsg(rc == VINF_SUCCESS
                         /* In the SMP case the page table might be removed while we wait for the PGM lock in the trap handler. */
-                        ||  rc == VERR_PAGE_TABLE_NOT_PRESENT 
-                        ||  rc == VERR_PAGE_NOT_PRESENT, 
+                        ||  rc == VERR_PAGE_TABLE_NOT_PRESENT
+                        ||  rc == VERR_PAGE_NOT_PRESENT,
                         ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", pvFault, rc));
 
                 pgmPoolAddDirtyPage(pVM, pPool, pPage);
                 pPage->pvDirtyFault = pvFault;
-     
+
                 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), a);
                 pgmUnlock(pVM);
@@ -1388,5 +1388,5 @@
      * interpret then. This may be a bit risky, in which case
      * the reuse detection must be fixed.
-     */       
+     */
     rc = pgmPoolAccessHandlerFlush(pVM, pVCpu, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault);
     if (rc == VINF_EM_RAW_EMULATE_INSTR && fReused)
@@ -1423,5 +1423,5 @@
             RTHCPHYS HCPhys = -1;
             int rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pGstPT->a[i].u & X86_PTE_PAE_PG_MASK, &HCPhys);
-            if (    rc != VINF_SUCCESS 
+            if (    rc != VINF_SUCCESS
                 ||  (pShwPT->a[i].u & X86_PTE_PAE_PG_MASK) != HCPhys)
             {
@@ -1555,9 +1555,9 @@
     RTHCPHYS HCPhys;
     rc = PGMShwGetPage(VMMGetCpu(pVM), pPage->pvDirtyFault, &fFlags, &HCPhys);
-    AssertMsg(      (   rc == VINF_SUCCESS 
+    AssertMsg(      (   rc == VINF_SUCCESS
                      && (!(fFlags & X86_PTE_RW) || HCPhys != pPage->Core.Key))
               /* In the SMP case the page table might be removed while we wait for the PGM lock in the trap handler. */
-              ||    rc == VERR_PAGE_TABLE_NOT_PRESENT 
-              ||    rc == VERR_PAGE_NOT_PRESENT, 
+              ||    rc == VERR_PAGE_TABLE_NOT_PRESENT
+              ||    rc == VERR_PAGE_NOT_PRESENT,
               ("PGMShwGetPage -> GCPtr=%RGv rc=%d flags=%RX64\n", pPage->pvDirtyFault, rc, fFlags));
 #endif
@@ -1828,5 +1828,5 @@
 
     /*
-     * Found a usable page, flush it and return. 
+     * Found a usable page, flush it and return.
      */
     return pgmPoolFlushPage(pPool, pPage);
@@ -2635,8 +2635,7 @@
 
     /* Clear the PGM_SYNC_CLEAR_PGM_POOL flag on all VCPUs to prevent redundant flushes. */
-    for (unsigned idCpu = 0; idCpu < pVM->cCPUs; idCpu++)
+    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     {
         PVMCPU pVCpu = &pVM->aCpus[idCpu];
-
         pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_CLEAR_PGM_POOL;
     }
@@ -4911,5 +4910,5 @@
      * including the root page.
      */
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -5067,10 +5066,10 @@
     }
 
-    for (unsigned i=0;i<pVM->cCPUs;i++)
-    {
-        PVMCPU pVCpu = &pVM->aCpus[i];
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
+    {
         /*
          * Re-enter the shadowing mode and assert Sync CR3 FF.
          */
+        PVMCPU pVCpu = &pVM->aCpus[i];
         pgmR3ReEnterShadowModeAfterPoolFlush(pVM, pVCpu);
         VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
Index: /trunk/src/VBox/VMM/VMMAll/REMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/REMAll.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMAll/REMAll.cpp	(revision 22890)
@@ -211,5 +211,5 @@
 VMMDECL(void) REMNotifyHandlerPhysicalFlushIfAlmostFull(PVM pVM, PVMCPU pVCpu)
 {
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
 
     /*
Index: /trunk/src/VBox/VMM/VMMAll/SELMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/SELMAll.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMAll/SELMAll.cpp	(revision 22890)
@@ -56,5 +56,5 @@
 VMMDECL(RTGCPTR) SELMToFlatBySel(PVM pVM, RTSEL Sel, RTGCPTR Addr)
 {
-    Assert(pVM->cCPUs == 1 && !CPUMIsGuestInLongMode(VMMGetCpu(pVM)));    /* DON'T USE! */
+    Assert(pVM->cCpus == 1 && !CPUMIsGuestInLongMode(VMMGetCpu(pVM)));    /* DON'T USE! */
 
     /** @todo check the limit. */
@@ -835,5 +835,5 @@
 VMMDECL(int) SELMValidateAndConvertCSAddrGCTrap(PVM pVM, X86EFLAGS eflags, RTSEL SelCPL, RTSEL SelCS, RTGCPTR Addr, PRTGCPTR ppvFlat, uint32_t *pcBits)
 {
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
     PVMCPU pVCpu = &pVM->aCpus[0];
 
@@ -996,5 +996,5 @@
 VMMDECL(int) SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp)
 {
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
     PVMCPU pVCpu = &pVM->aCpus[0];
 
Index: /trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp	(revision 22890)
@@ -828,5 +828,5 @@
 {
     uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cVirtualTicking);
-    AssertMsgReturn(c < pVM->cCPUs, ("%u vs %u\n", c, pVM->cCPUs), VERR_INTERNAL_ERROR);
+    AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_INTERNAL_ERROR);
     if (c == 0)
     {
@@ -848,5 +848,5 @@
 {
     uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cVirtualTicking);
-    AssertMsgReturn(c <= pVM->cCPUs, ("%u vs %u\n", c, pVM->cCPUs), VERR_INTERNAL_ERROR);
+    AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_INTERNAL_ERROR);
     if (c == 1)
     {
Index: /trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp	(revision 22890)
@@ -366,5 +366,5 @@
     PVM pVM = pVCpu->CTX_SUFF(pVM);
     X86EFLAGS eflags;
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
 
     STAM_PROFILE_ADV_START(&pVM->trpm.s.CTX_SUFF_Z(StatForwardProf), a);
Index: /trunk/src/VBox/VMM/VMMAll/VMMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/VMMAll.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMAll/VMMAll.cpp	(revision 22890)
@@ -65,5 +65,5 @@
 
 #elif defined(IN_RING0)
-    if (pVM->cCPUs == 1)
+    if (pVM->cCpus == 1)
         return 0;
     return HWACCMR0GetVMCPUId(pVM);
@@ -88,9 +88,9 @@
     if (idCpu == NIL_VMCPUID)
         return NULL;
-    Assert(idCpu < pVM->cCPUs);
+    Assert(idCpu < pVM->cCpus);
     return &pVM->aCpus[VMR3GetVMCPUId(pVM)];
 
 #elif defined(IN_RING0)
-    if (pVM->cCPUs == 1)
+    if (pVM->cCpus == 1)
         return &pVM->aCpus[0];
     return HWACCMR0GetVMCPU(pVM);
@@ -110,5 +110,5 @@
 VMMDECL(PVMCPU) VMMGetCpu0(PVM pVM)
 {
-    Assert(pVM->cCPUs == 1);
+    Assert(pVM->cCpus == 1);
     return &pVM->aCpus[0];
 }
@@ -125,5 +125,5 @@
 VMMDECL(PVMCPU) VMMGetCpuById(PVM pVM, RTCPUID idCpu)
 {
-    AssertReturn(idCpu < pVM->cCPUs, NULL);
+    AssertReturn(idCpu < pVM->cCpus, NULL);
     return &pVM->aCpus[idCpu];
 }
Index: /trunk/src/VBox/VMM/VMMGC/PDMGCDevice.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMGC/PDMGCDevice.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMGC/PDMGCDevice.cpp	(revision 22890)
@@ -418,5 +418,5 @@
     PVMCPU pVCpu = &pVM->aCpus[idCpu];
 
-    AssertReturnVoid(idCpu < pVM->cCPUs);
+    AssertReturnVoid(idCpu < pVM->cCpus);
 
     LogFlow(("pdmRCApicHlp_SetInterruptFF: caller=%p/%d: VM_FF_INTERRUPT %d -> 1\n",
@@ -447,5 +447,5 @@
     PVMCPU pVCpu = &pVM->aCpus[idCpu];
 
-    AssertReturnVoid(idCpu < pVM->cCPUs);
+    AssertReturnVoid(idCpu < pVM->cCpus);
 
     LogFlow(("pdmRCApicHlp_ClearInterruptFF: caller=%p/%d: VM_FF_INTERRUPT %d -> 0\n",
Index: /trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp	(revision 22890)
@@ -144,5 +144,5 @@
     if (u32DR7 & X86_DR7_ENABLED_MASK)
     {
-        for (unsigned i=0;i<pVM->cCPUs;i++)
+        for (VMCPUID i = 0; i < pVM->cCpus; i++)
             pVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HOST;
         Log(("CPUMR0Init: host uses debug registers (dr7=%x)\n", u32DR7));
Index: /trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp	(revision 22890)
@@ -610,5 +610,5 @@
                             pVM->hSelf      = iHandle;
                             pVM->cbSelf     = cbVM;
-                            pVM->cCPUs      = cCpus;
+                            pVM->cCpus      = cCpus;
                             pVM->offVMCPU   = RT_UOFFSETOF(VM, aCpus);
 
@@ -1100,5 +1100,5 @@
         return rc;
 
-    AssertReturn(idCpu < pVM->cCPUs, VERR_INVALID_CPU_ID);
+    AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
     AssertReturn(pGVM->aCpus[idCpu].hEMT == NIL_RTNATIVETHREAD, VERR_ACCESS_DENIED);
 
Index: /trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp	(revision 22890)
@@ -847,5 +847,5 @@
     }
 
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -932,5 +932,5 @@
     ASMAtomicWriteBool(&pCpu->fInUse, true);
 
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         /* On first entry we'll sync everything. */
@@ -1196,5 +1196,5 @@
 
     /** @todo optimize for large number of VCPUs when that becomes more common. */
-    for (unsigned idCpu=0;idCpu<pVM->cCPUs;idCpu++)
+    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     {
         PVMCPU pVCpu = &pVM->aCpus[idCpu];
Index: /trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp	(revision 22890)
@@ -175,5 +175,5 @@
 
     /* Allocate VMCBs for all guest CPUs. */
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -223,5 +223,5 @@
 VMMR0DECL(int) SVMR0TermVM(PVM pVM)
 {
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -274,5 +274,5 @@
     Assert(pVM->hwaccm.s.svm.fSupported);
 
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU    pVCpu = &pVM->aCpus[i];
@@ -2778,5 +2778,5 @@
 
     /* @todo This code is not guest SMP safe (hyper stack and switchers) */
-    AssertReturn(pVM->cCPUs == 1, VERR_TOO_MANY_CPUS);
+    AssertReturn(pVM->cCpus == 1, VERR_TOO_MANY_CPUS);
     Assert(pfnHandler);
 
Index: /trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp	(revision 22890)
@@ -216,5 +216,5 @@
 
     /* Allocate VMCBs for all guest CPUs. */
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -299,5 +299,5 @@
 VMMR0DECL(int) VMXR0TermVM(PVM pVM)
 {
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -374,5 +374,5 @@
     AssertReturn(pVM, VERR_INVALID_PARAMETER);
 
-    for (unsigned i=0;i<pVM->cCPUs;i++)
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
@@ -4447,5 +4447,5 @@
 
     /* @todo This code is not guest SMP safe (hyper stack and switchers) */
-    AssertReturn(pVM->cCPUs == 1, VERR_TOO_MANY_CPUS);
+    AssertReturn(pVM->cCpus == 1, VERR_TOO_MANY_CPUS);
     AssertReturn(pVM->hwaccm.s.pfnHost32ToGuest64R0, VERR_INTERNAL_ERROR);
     Assert(pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hwaccm.s.vmx.VMCSCache.Write.aField));
Index: /trunk/src/VBox/VMM/VMMR0/PDMR0Device.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/PDMR0Device.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMR0/PDMR0Device.cpp	(revision 22890)
@@ -433,5 +433,5 @@
     PVMCPU pVCpu = &pVM->aCpus[idCpu];
 
-    AssertReturnVoid(idCpu < pVM->cCPUs);
+    AssertReturnVoid(idCpu < pVM->cCpus);
 
     LogFlow(("pdmR0ApicHlp_SetInterruptFF: CPU%d=caller=%p/%d: VM_FF_INTERRUPT %d -> 1 (CPU%d)\n",
@@ -481,5 +481,5 @@
     PVMCPU pVCpu = &pVM->aCpus[idCpu];
 
-    AssertReturnVoid(idCpu < pVM->cCPUs);
+    AssertReturnVoid(idCpu < pVM->cCpus);
 
     LogFlow(("pdmR0ApicHlp_ClearInterruptFF: caller=%p/%d: VM_FF_INTERRUPT %d -> 0\n",
Index: /trunk/src/VBox/VMM/VMMR0/PGMR0DynMap.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/PGMR0DynMap.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMR0/PGMR0DynMap.cpp	(revision 22890)
@@ -355,5 +355,5 @@
      * Initialize the auto sets.
      */
-    VMCPUID idCpu = pVM->cCPUs;
+    VMCPUID idCpu = pVM->cCpus;
     AssertReturn(idCpu > 0 && idCpu <= VMM_MAX_CPU_COUNT, VERR_INTERNAL_ERROR);
     while (idCpu-- > 0)
@@ -443,5 +443,5 @@
          * Clean up and check the auto sets.
          */
-        VMCPUID idCpu = pVM->cCPUs;
+        VMCPUID idCpu = pVM->cCpus;
         while (idCpu-- > 0)
         {
Index: /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp	(revision 22890)
@@ -512,5 +512,5 @@
 VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
 {
-    if (RT_UNLIKELY(idCpu >= pVM->cCPUs))
+    if (RT_UNLIKELY(idCpu >= pVM->cCpus))
         return;
     PVMCPU pVCpu = &pVM->aCpus[idCpu];
@@ -531,5 +531,5 @@
                 bool        fVTxDisabled;
 
-                if (RT_UNLIKELY(pVM->cCPUs > 1))
+                if (RT_UNLIKELY(pVM->cCpus > 1))
                 {
                     pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_INVALID_SMP;
@@ -728,7 +728,7 @@
         }
 
-        if (RT_UNLIKELY(idCpu >= pVM->cCPUs && idCpu != NIL_VMCPUID))
-        {
-            SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCPUs=%u)\n", idCpu, pVM->cCPUs);
+        if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
+        {
+            SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
             return VERR_INVALID_PARAMETER;
         }
@@ -1090,5 +1090,5 @@
     if (    VALID_PTR(pVM)
         &&  pVM->pVMR0
-        &&  idCpu < pVM->cCPUs)
+        &&  idCpu < pVM->cCpus)
     {
         switch (enmOperation)
Index: /trunk/src/VBox/VMM/testcase/tstMMHyperHeap.cpp
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstMMHyperHeap.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/testcase/tstMMHyperHeap.cpp	(revision 22890)
@@ -73,6 +73,6 @@
     pVM->pUVM = pUVM;
 
-    pVM->cCPUs = NUM_CPUS;
-    pVM->cbSelf = RT_UOFFSETOF(VM, aCpus[pVM->cCPUs]);
+    pVM->cCpus = NUM_CPUS;
+    pVM->cbSelf = RT_UOFFSETOF(VM, aCpus[pVM->cCpus]);
 
     rc = STAMR3InitUVM(pUVM);
Index: /trunk/src/VBox/VMM/testcase/tstSSM.cpp
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstSSM.cpp	(revision 22889)
+++ /trunk/src/VBox/VMM/testcase/tstSSM.cpp	(revision 22890)
@@ -654,5 +654,5 @@
                     pVM->pVMR3 = pVM;
                     pVM->pUVM = pUVM;
-                    pVM->cCPUs = 1;
+                    pVM->cCpus = 1;
                     pVM->aCpus[0].pVMR3 = pVM;
                     pVM->aCpus[0].hNativeThread = RTThreadNativeSelf();
Index: /trunk/src/recompiler/VBoxRecompiler.c
===================================================================
--- /trunk/src/recompiler/VBoxRecompiler.c	(revision 22889)
+++ /trunk/src/recompiler/VBoxRecompiler.c	(revision 22890)
@@ -668,5 +668,5 @@
     uint32_t fRawRing0 = false;
     uint32_t u32Sep;
-    unsigned i;
+    uint32_t i;
     int rc;
     PREM pRem;
@@ -777,8 +777,7 @@
      * Sync the whole CPU state when executing code in the recompiler.
      */
-    for (i=0;i<pVM->cCPUs;i++)
+    for (i = 0; i < pVM->cCpus; i++)
     {
         PVMCPU pVCpu = &pVM->aCpus[i];
-
         CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
     }
@@ -2827,5 +2826,5 @@
 
 #ifdef VBOX_STRICT
-        if (pVM->cCPUs == 1)
+        if (pVM->cCpus == 1)
         {
             /* Check that all records are now on the free list. */
