Index: /trunk/include/VBox/sup.h
===================================================================
--- /trunk/include/VBox/sup.h	(revision 64280)
+++ /trunk/include/VBox/sup.h	(revision 64281)
@@ -272,5 +272,5 @@
     /** CPU group number (always zero, except on windows). */
     uint16_t                iCpuGroup;
-    /** CPU group number (same as iCpuSet, except on windows). */
+    /** CPU group member number (same as iCpuSet, except on windows). */
     uint16_t                iCpuGroupMember;
     /** The APIC ID of this CPU. */
@@ -289,4 +289,24 @@
 typedef SUPGIPCPU *PSUPGIPCPU;
 
+/**
+ * CPU group information.
+ * @remarks Windows only.
+ */
+typedef struct SUPGIPCPUGROUP
+{
+    /** Current number of CPUs in this group. */
+    uint16_t volatile       cMembers;
+    /** Maximum number of CPUs in the group. */
+    uint16_t                cMaxMembers;
+    /** The CPU set index of the members. This table has cMaxMembers entries.
+     * @note For various reasons, entries from cMembers and up to cMaxMembers are
+     *       may change as the host OS does set dynamic assignments during CPU
+     *       hotplugging. */
+    int16_t                 aiCpuSetIdxs[1];
+} SUPGIPCPUGROUP;
+/** Pointer to a GIP CPU group structure. */
+typedef SUPGIPCPUGROUP *PSUPGIPCPUGROUP;
+/** Pointer to a const GIP CPU group structure. */
+typedef SUPGIPCPUGROUP const *PCSUPGIPCPUGROUP;
 
 /**
@@ -358,4 +378,11 @@
 /** @} */
 
+/** @def SUPGIP_MAX_CPU_GROUPS
+ * Maximum number of CPU groups.  */
+#if RTCPUSET_MAX_CPUS >= 256
+# define SUPGIP_MAX_CPU_GROUPS 256
+#else
+# define SUPGIP_MAX_CPU_GROUPS 256
+#endif
 
 /**
@@ -419,7 +446,9 @@
     /** CPU set index to CPU table index. */
     uint16_t            aiCpuFromCpuSetIdx[RTCPUSET_MAX_CPUS];
-    /** Table indexed by CPU group index to get the CPU set index of the first
-     *  CPU. */
-    uint16_t            aiFirstCpuSetIdxFromCpuGroup[RTCPUSET_MAX_CPUS];
+    /** Table indexed by CPU group to containing offsets to SUPGIPCPUGROUP
+     * structures, invalid entries are set to UINT16_MAX.  The offsets are relative
+     * to the start of this structure.
+     * @note Windows only. The other hosts sets all entries to UINT16_MAX! */
+    uint16_t            aoffCpuGroup[SUPGIP_MAX_CPU_GROUPS];
 
     /** Array of per-cpu data.
@@ -450,5 +479,5 @@
  * Upper 16 bits is the major version. Major version is only changed with
  * incompatible changes in the GIP. */
-#define SUPGLOBALINFOPAGE_VERSION   0x00070000
+#define SUPGLOBALINFOPAGE_VERSION   0x00080000
 
 /**
Index: /trunk/include/iprt/mangling.h
===================================================================
--- /trunk/include/iprt/mangling.h	(revision 64280)
+++ /trunk/include/iprt/mangling.h	(revision 64281)
@@ -1303,4 +1303,6 @@
 # define RTMpGetCurFrequency                            RT_MANGLER(RTMpGetCurFrequency)
 # define RTMpGetDescription                             RT_MANGLER(RTMpGetDescription)
+# define RTMpGetCpuGroupCounts                          RT_MANGLER(RTMpGetCpuGroupCounts)
+# define RTMpGetMaxCpuGroupCount                        RT_MANGLER(RTMpGetMaxCpuGroupCount)
 # define RTMpGetMaxCpuId                                RT_MANGLER(RTMpGetMaxCpuId)
 # define RTMpGetMaxFrequency                            RT_MANGLER(RTMpGetMaxFrequency)
@@ -1326,4 +1328,5 @@
 # define RTMpOnSpecific                                 RT_MANGLER(RTMpOnSpecific)             /* r0drv */
 # define RTMpPokeCpu                                    RT_MANGLER(RTMpPokeCpu)                /* r0drv */
+# define RTMpSetIndexFromCpuGroupMember                 RT_MANGLER(RTMpSetIndexFromCpuGroupMember)
 # define RTMsgError                                     RT_MANGLER(RTMsgError)
 # define RTMsgErrorExit                                 RT_MANGLER(RTMsgErrorExit)
Index: /trunk/include/iprt/mp.h
===================================================================
--- /trunk/include/iprt/mp.h	(revision 64280)
+++ /trunk/include/iprt/mp.h	(revision 64281)
@@ -88,4 +88,35 @@
  */
 RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu);
+
+/**
+ * Translates an NT process group member to a CPU set index.
+ *
+ * @returns CPU set index, -1 if not valid.
+ * @param   idxGroup        The CPU group.
+ * @param   idxMember       The CPU group member number.
+ *
+ * @remarks Only available on Windows.
+ */
+RTDECL(int) RTMpSetIndexFromCpuGroupMember(uint32_t idxGroup, uint32_t idxMember);
+
+/**
+ * Gets the member numbers for a CPU group.
+ *
+ * @returns Maximum number of group members.
+ * @param   idxGroup        The CPU group.
+ * @param   pcActive        Where to return the number of active members.
+ *
+ * @remarks Only available on Windows.
+ */
+RTDECL(uint32_t) RTMpGetCpuGroupCounts(uint32_t idxGroup, uint32_t *pcActive);
+
+/**
+ * Get the maximum number of CPU groups.
+ *
+ * @returns Maximum number of CPU groups.
+ *
+ * @remarks Only available on Windows.
+ */
+RTDECL(uint32_t) RTMpGetMaxCpuGroupCount(void);
 
 /**
Index: /trunk/include/iprt/nt/nt.h
===================================================================
--- /trunk/include/iprt/nt/nt.h	(revision 64280)
+++ /trunk/include/iprt/nt/nt.h	(revision 64281)
@@ -2454,4 +2454,6 @@
 typedef  ULONG   (NTAPI *PFNKEQUERYMAXIMUMPROCESSORCOUNTEX)(USHORT GroupNumber);
 typedef  USHORT  (NTAPI *PFNKEQUERYMAXIMUMGROUPCOUNT)(VOID);
+typedef  ULONG   (NTAPI *PFNKEQUERYACTIVEPROCESSORCOUNT)(KAFFINITY *pfActiveProcessors);
+typedef  ULONG   (NTAPI *PFNKEQUERYACTIVEPROCESSORCOUNTEX)(USHORT GroupNumber);
 typedef  NTSTATUS (NTAPI *PFNKEQUERYLOGICALPROCESSORRELATIONSHIP)(PROCESSOR_NUMBER *pProcNumber,
                                                                   LOGICAL_PROCESSOR_RELATIONSHIP RelationShipType,
Index: /trunk/src/VBox/HostDrivers/Support/SUPDrvGip.cpp
===================================================================
--- /trunk/src/VBox/HostDrivers/Support/SUPDrvGip.cpp	(revision 64280)
+++ /trunk/src/VBox/HostDrivers/Support/SUPDrvGip.cpp	(revision 64281)
@@ -1320,5 +1320,5 @@
     pGip->aCPUs[i].iCpuGroupMember = iCpuSet;
 #ifdef RT_OS_WINDOWS
-    pGip->aCPUs[i].iCpuGroup = supdrvOSGipGetGroupFromCpu(pDevExt, idCpu, &pGip->aCPUs[i].iCpuGroupMember);
+    supdrvOSGipInitGroupBitsForCpu(pDevExt, pGip, &pGip->aCPUs[i]);
 #endif
 
@@ -1744,4 +1744,5 @@
  * Initializes the GIP data.
  *
+ * @returns VBox status code.
  * @param   pDevExt             Pointer to the device instance data.
  * @param   pGip                Pointer to the read-write kernel mapping of the GIP.
@@ -1751,10 +1752,13 @@
  * @param   uUpdateIntervalNS   The update interval in nanoseconds.
  * @param   cCpus               The CPU count.
- */
-static void supdrvGipInit(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, RTHCPHYS HCPhys,
-                          uint64_t u64NanoTS, unsigned uUpdateHz, unsigned uUpdateIntervalNS, unsigned cCpus)
-{
-    size_t const    cbGip = RT_ALIGN_Z(RT_OFFSETOF(SUPGLOBALINFOPAGE, aCPUs[cCpus]), PAGE_SIZE);
-    unsigned        i;
+ * @param   cbGipCpuGroups      The supdrvOSGipGetGroupTableSize return value we
+ *                              used when allocating the GIP structure.
+ */
+static int supdrvGipInit(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, RTHCPHYS HCPhys,
+                         uint64_t u64NanoTS, unsigned uUpdateHz, unsigned uUpdateIntervalNS,
+                         unsigned cCpus, size_t cbGipCpuGroups)
+{
+    size_t const cbGip = RT_ALIGN_Z(RT_OFFSETOF(SUPGLOBALINFOPAGE, aCPUs[cCpus]) + cbGipCpuGroups, PAGE_SIZE);
+    unsigned i;
 #ifdef DEBUG_DARWIN_GIP
     OSDBGPRINT(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d cCpus=%u\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz, cCpus));
@@ -1794,12 +1798,12 @@
     for (i = 0; i < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx); i++)
         pGip->aiCpuFromCpuSetIdx[i] = UINT16_MAX;
-    pGip->aiFirstCpuSetIdxFromCpuGroup[0] = 0;
-    for (i = 1; i < RT_ELEMENTS(pGip->aiFirstCpuSetIdxFromCpuGroup); i++)
-        pGip->aiFirstCpuSetIdxFromCpuGroup[i] = UINT16_MAX;
-#ifdef RT_OS_WINDOWS
-    supdrvOSInitGipGroupTable(pDevExt, pGip);
-#endif
+    for (i = 0; i < RT_ELEMENTS(pGip->aoffCpuGroup); i++)
+        pGip->aoffCpuGroup[i] = UINT16_MAX;
     for (i = 0; i < cCpus; i++)
         supdrvGipInitCpu(pGip, &pGip->aCPUs[i], u64NanoTS, 0 /*uCpuHz*/);
+#ifdef RT_OS_WINDOWS
+    int rc = supdrvOSInitGipGroupTable(pDevExt, pGip, cbGipCpuGroups);
+    AssertRCReturn(rc, rc);
+#endif
 
     /*
@@ -1809,4 +1813,6 @@
     pDevExt->HCPhysGip = HCPhys;
     pDevExt->cGipUsers = 0;
+
+    return VINF_SUCCESS;
 }
 
@@ -1821,4 +1827,6 @@
 {
     PSUPGLOBALINFOPAGE  pGip;
+    size_t              cbGip;
+    size_t              cbGipCpuGroups;
     RTHCPHYS            HCPhysGip;
     uint32_t            u32SystemResolution;
@@ -1862,5 +1870,7 @@
      * Allocate a contiguous set of pages with a default kernel mapping.
      */
-    rc = RTR0MemObjAllocCont(&pDevExt->GipMemObj, RT_UOFFSETOF(SUPGLOBALINFOPAGE, aCPUs[cCpus]), false /*fExecutable*/);
+    cbGipCpuGroups = supdrvOSGipGetGroupTableSize(pDevExt);
+    cbGip = RT_UOFFSETOF(SUPGLOBALINFOPAGE, aCPUs[cCpus]) + cbGipCpuGroups;
+    rc = RTR0MemObjAllocCont(&pDevExt->GipMemObj, cbGip, false /*fExecutable*/);
     if (RT_FAILURE(rc))
     {
@@ -1884,8 +1894,9 @@
         u32Interval += u32SystemResolution - uMod;
 
-    supdrvGipInit(pDevExt, pGip, HCPhysGip, RTTimeSystemNanoTS(), RT_NS_1SEC / u32Interval /*=Hz*/, u32Interval, cCpus);
-
-    /*
-     * Important sanity check...
+    rc = supdrvGipInit(pDevExt, pGip, HCPhysGip, RTTimeSystemNanoTS(), RT_NS_1SEC / u32Interval /*=Hz*/, u32Interval,
+                       cCpus, cbGipCpuGroups);
+
+    /*
+     * Important sanity check...  (Sets rc)
      */
     if (RT_UNLIKELY(   pGip->enmUseTscDelta == SUPGIPUSETSCDELTA_ZERO_CLAIMED
@@ -1894,10 +1905,11 @@
     {
         OSDBGPRINT(("supdrvGipCreate: Host-OS/user claims the TSC-deltas are zero but we detected async. TSC! Bad.\n"));
-        return VERR_INTERNAL_ERROR_2;
+        rc = VERR_INTERNAL_ERROR_2;
     }
 
     /* It doesn't make sense to do TSC-delta detection on systems we detect as async. */
-    AssertReturn(   pGip->u32Mode != SUPGIPMODE_ASYNC_TSC
-                 || pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ZERO_CLAIMED, VERR_INTERNAL_ERROR_3);
+    AssertStmt(   pGip->u32Mode != SUPGIPMODE_ASYNC_TSC
+               || pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ZERO_CLAIMED,
+               rc = VERR_INTERNAL_ERROR_3);
 
     /*
@@ -1912,108 +1924,111 @@
      * array with more reasonable values.
      */
-    if (pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC)
-    {
-        rc = supdrvGipInitMeasureTscFreq(pGip, true /*fRough*/); /* cannot fail */
-        supdrvGipInitStartTimerForRefiningInvariantTscFreq(pDevExt);
-    }
-    else
-        rc = supdrvGipInitMeasureTscFreq(pGip, false /*fRough*/);
     if (RT_SUCCESS(rc))
     {
-        /*
-         * Start TSC-delta measurement thread before we start getting MP
-         * events that will try kick it into action (includes the
-         * RTMpOnAll/supdrvGipInitOnCpu call below).
-         */
-        RTCpuSetEmpty(&pDevExt->TscDeltaCpuSet);
-        RTCpuSetEmpty(&pDevExt->TscDeltaObtainedCpuSet);
-#ifdef SUPDRV_USE_TSC_DELTA_THREAD
-        if (pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_ZERO_CLAIMED)
-            rc = supdrvTscDeltaThreadInit(pDevExt);
-#endif
+        if (pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC)
+        {
+            rc = supdrvGipInitMeasureTscFreq(pGip, true /*fRough*/); /* cannot fail */
+            supdrvGipInitStartTimerForRefiningInvariantTscFreq(pDevExt);
+        }
+        else
+            rc = supdrvGipInitMeasureTscFreq(pGip, false /*fRough*/);
         if (RT_SUCCESS(rc))
         {
-            rc = RTMpNotificationRegister(supdrvGipMpEvent, pDevExt);
+            /*
+             * Start TSC-delta measurement thread before we start getting MP
+             * events that will try kick it into action (includes the
+             * RTMpOnAll/supdrvGipInitOnCpu call below).
+             */
+            RTCpuSetEmpty(&pDevExt->TscDeltaCpuSet);
+            RTCpuSetEmpty(&pDevExt->TscDeltaObtainedCpuSet);
+    #ifdef SUPDRV_USE_TSC_DELTA_THREAD
+            if (pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_ZERO_CLAIMED)
+                rc = supdrvTscDeltaThreadInit(pDevExt);
+    #endif
             if (RT_SUCCESS(rc))
             {
-                /*
-                 * Do GIP initialization on all online CPUs.  Wake up the
-                 * TSC-delta thread afterwards.
-                 */
-                rc = RTMpOnAll(supdrvGipInitOnCpu, pDevExt, pGip);
+                rc = RTMpNotificationRegister(supdrvGipMpEvent, pDevExt);
                 if (RT_SUCCESS(rc))
                 {
-#ifdef SUPDRV_USE_TSC_DELTA_THREAD
-                    supdrvTscDeltaThreadStartMeasurement(pDevExt, true /* fForceAll */);
-#else
-                    uint16_t iCpu;
-                    if (pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_ZERO_CLAIMED)
+                    /*
+                     * Do GIP initialization on all online CPUs.  Wake up the
+                     * TSC-delta thread afterwards.
+                     */
+                    rc = RTMpOnAll(supdrvGipInitOnCpu, pDevExt, pGip);
+                    if (RT_SUCCESS(rc))
                     {
-                        /*
-                         * Measure the TSC deltas now that we have MP notifications.
-                         */
-                        int cTries = 5;
-                        do
+    #ifdef SUPDRV_USE_TSC_DELTA_THREAD
+                        supdrvTscDeltaThreadStartMeasurement(pDevExt, true /* fForceAll */);
+    #else
+                        uint16_t iCpu;
+                        if (pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_ZERO_CLAIMED)
                         {
-                            rc = supdrvTscMeasureInitialDeltas(pDevExt);
-                            if (   rc != VERR_TRY_AGAIN
-                                && rc != VERR_CPU_OFFLINE)
-                                break;
-                        } while (--cTries > 0);
-                        for (iCpu = 0; iCpu < pGip->cCpus; iCpu++)
-                            Log(("supdrvTscDeltaInit: cpu[%u] delta %lld\n", iCpu, pGip->aCPUs[iCpu].i64TSCDelta));
+                            /*
+                             * Measure the TSC deltas now that we have MP notifications.
+                             */
+                            int cTries = 5;
+                            do
+                            {
+                                rc = supdrvTscMeasureInitialDeltas(pDevExt);
+                                if (   rc != VERR_TRY_AGAIN
+                                    && rc != VERR_CPU_OFFLINE)
+                                    break;
+                            } while (--cTries > 0);
+                            for (iCpu = 0; iCpu < pGip->cCpus; iCpu++)
+                                Log(("supdrvTscDeltaInit: cpu[%u] delta %lld\n", iCpu, pGip->aCPUs[iCpu].i64TSCDelta));
+                        }
+                        else
+                        {
+                            for (iCpu = 0; iCpu < pGip->cCpus; iCpu++)
+                                AssertMsg(!pGip->aCPUs[iCpu].i64TSCDelta, ("iCpu=%u %lld mode=%d\n", iCpu, pGip->aCPUs[iCpu].i64TSCDelta, pGip->u32Mode));
+                        }
+                        if (RT_SUCCESS(rc))
+    #endif
+                        {
+                            /*
+                             * Create the timer.
+                             * If CPU_ALL isn't supported we'll have to fall back to synchronous mode.
+                             */
+                            if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
+                            {
+                                rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, RTTIMER_FLAGS_CPU_ALL,
+                                                     supdrvGipAsyncTimer, pDevExt);
+                                if (rc == VERR_NOT_SUPPORTED)
+                                {
+                                    OSDBGPRINT(("supdrvGipCreate: omni timer not supported, falling back to synchronous mode\n"));
+                                    pGip->u32Mode = SUPGIPMODE_SYNC_TSC;
+                                }
+                            }
+                            if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
+                                rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, 0 /* fFlags */,
+                                                     supdrvGipSyncAndInvariantTimer, pDevExt);
+                            if (RT_SUCCESS(rc))
+                            {
+                                /*
+                                 * We're good.
+                                 */
+                                Log(("supdrvGipCreate: %u ns interval.\n", u32Interval));
+                                supdrvGipReleaseHigherTimerFrequencyFromSystem(pDevExt);
+
+                                g_pSUPGlobalInfoPage = pGip;
+                                return VINF_SUCCESS;
+                            }
+
+                            OSDBGPRINT(("supdrvGipCreate: failed create GIP timer at %u ns interval. rc=%Rrc\n", u32Interval, rc));
+                            Assert(!pDevExt->pGipTimer);
+                        }
                     }
                     else
-                    {
-                        for (iCpu = 0; iCpu < pGip->cCpus; iCpu++)
-                            AssertMsg(!pGip->aCPUs[iCpu].i64TSCDelta, ("iCpu=%u %lld mode=%d\n", iCpu, pGip->aCPUs[iCpu].i64TSCDelta, pGip->u32Mode));
-                    }
-                    if (RT_SUCCESS(rc))
-#endif
-                    {
-                        /*
-                         * Create the timer.
-                         * If CPU_ALL isn't supported we'll have to fall back to synchronous mode.
-                         */
-                        if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
-                        {
-                            rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, RTTIMER_FLAGS_CPU_ALL,
-                                                 supdrvGipAsyncTimer, pDevExt);
-                            if (rc == VERR_NOT_SUPPORTED)
-                            {
-                                OSDBGPRINT(("supdrvGipCreate: omni timer not supported, falling back to synchronous mode\n"));
-                                pGip->u32Mode = SUPGIPMODE_SYNC_TSC;
-                            }
-                        }
-                        if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
-                            rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, 0 /* fFlags */,
-                                                 supdrvGipSyncAndInvariantTimer, pDevExt);
-                        if (RT_SUCCESS(rc))
-                        {
-                            /*
-                             * We're good.
-                             */
-                            Log(("supdrvGipCreate: %u ns interval.\n", u32Interval));
-                            supdrvGipReleaseHigherTimerFrequencyFromSystem(pDevExt);
-
-                            g_pSUPGlobalInfoPage = pGip;
-                            return VINF_SUCCESS;
-                        }
-
-                        OSDBGPRINT(("supdrvGipCreate: failed create GIP timer at %u ns interval. rc=%Rrc\n", u32Interval, rc));
-                        Assert(!pDevExt->pGipTimer);
-                    }
+                        OSDBGPRINT(("supdrvGipCreate: RTMpOnAll failed. rc=%Rrc\n", rc));
                 }
                 else
-                    OSDBGPRINT(("supdrvGipCreate: RTMpOnAll failed. rc=%Rrc\n", rc));
+                    OSDBGPRINT(("supdrvGipCreate: failed to register MP event notfication. rc=%Rrc\n", rc));
             }
             else
-                OSDBGPRINT(("supdrvGipCreate: failed to register MP event notfication. rc=%Rrc\n", rc));
+                OSDBGPRINT(("supdrvGipCreate: supdrvTscDeltaInit failed. rc=%Rrc\n", rc));
         }
         else
-            OSDBGPRINT(("supdrvGipCreate: supdrvTscDeltaInit failed. rc=%Rrc\n", rc));
-    }
-    else
-        OSDBGPRINT(("supdrvGipCreate: supdrvTscMeasureInitialDeltas failed. rc=%Rrc\n", rc));
+            OSDBGPRINT(("supdrvGipCreate: supdrvTscMeasureInitialDeltas failed. rc=%Rrc\n", rc));
+    }
 
     /* Releases timer frequency increase too. */
Index: /trunk/src/VBox/HostDrivers/Support/SUPDrvIOC.h
===================================================================
--- /trunk/src/VBox/HostDrivers/Support/SUPDrvIOC.h	(revision 64280)
+++ /trunk/src/VBox/HostDrivers/Support/SUPDrvIOC.h	(revision 64281)
@@ -215,5 +215,5 @@
  *          - nothing.
  */
-#define SUPDRV_IOC_VERSION                              0x00270000
+#define SUPDRV_IOC_VERSION                              0x00280000
 
 /** SUP_IOCTL_COOKIE. */
Index: /trunk/src/VBox/HostDrivers/Support/SUPDrvInternal.h
===================================================================
--- /trunk/src/VBox/HostDrivers/Support/SUPDrvInternal.h	(revision 64280)
+++ /trunk/src/VBox/HostDrivers/Support/SUPDrvInternal.h	(revision 64281)
@@ -801,4 +801,14 @@
 
 /**
+ * Called during GIP initializtion to calc the CPU group table size.
+ *
+ * This is currently only implemented on windows [lazy bird].
+ *
+ * @returns Number of bytes needed for SUPGIPCPUGROUP structures.
+ * @param   pDevExt             The device globals.
+ */
+size_t VBOXCALL supdrvOSGipGetGroupTableSize(PSUPDRVDEVEXT pDevExt);
+
+/**
  * Called during GIP initialization to set up the group table and group count.
  *
@@ -808,9 +818,12 @@
  * @param   pGip                The GIP which group table needs initialization.
  *                              It's only partially initialized at this point.
- */
-void VBOXCALL   supdrvOSInitGipGroupTable(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip);
-
-/**
- * Gets the CPU group and member indexes for the given CPU ID.
+ * @param   cbGipCpuGroups      What supdrvOSGipGetGroupTableSize returned.
+ */
+int VBOXCALL    supdrvOSInitGipGroupTable(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, size_t cbGipCpuGroups);
+
+/**
+ * Initializes the group related members when a CPU is added to the GIP.
+ *
+ * This is called both during GIP initalization and during an CPU online event.
  *
  * This is currently only implemented on windows [lazy bird].
@@ -821,5 +834,5 @@
  * @param   piCpuGroupMember    Where to return the group member number.
  */
-uint16_t VBOXCALL supdrvOSGipGetGroupFromCpu(PSUPDRVDEVEXT pDevExt, RTCPUID idCpu, uint16_t *piCpuGroupMember);
+void VBOXCALL supdrvOSGipInitGroupBitsForCpu(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, PSUPGIPCPU pGipCpu);
 
 void VBOXCALL   supdrvOSObjInitCreator(PSUPDRVOBJ pObj, PSUPDRVSESSION pSession);
Index: /trunk/src/VBox/HostDrivers/Support/testcase/tstGIP-2.cpp
===================================================================
--- /trunk/src/VBox/HostDrivers/Support/testcase/tstGIP-2.cpp	(revision 64280)
+++ /trunk/src/VBox/HostDrivers/Support/testcase/tstGIP-2.cpp	(revision 64281)
@@ -137,10 +137,11 @@
                      g_pSUPGlobalInfoPage->u32Version,
                      g_pSUPGlobalInfoPage->fGetGipCpu);
-            RTPrintf("tstGIP-2: cCpus=%d  cPossibleCpus=%d cPossibleCpuGroups=%d cPresentCpus=%d cOnlineCpus=%d\n",
+            RTPrintf("tstGIP-2: cCpus=%d  cPossibleCpus=%d cPossibleCpuGroups=%d cPresentCpus=%d cOnlineCpus=%d idCpuMax=%#x\n",
                      g_pSUPGlobalInfoPage->cCpus,
                      g_pSUPGlobalInfoPage->cPossibleCpus,
                      g_pSUPGlobalInfoPage->cPossibleCpuGroups,
                      g_pSUPGlobalInfoPage->cPresentCpus,
-                     g_pSUPGlobalInfoPage->cOnlineCpus);
+                     g_pSUPGlobalInfoPage->cOnlineCpus,
+                     g_pSUPGlobalInfoPage->idCpuMax);
             RTPrintf("tstGIP-2: u32UpdateHz=%RU32  u32UpdateIntervalNS=%RU32  u64NanoTSLastUpdateHz=%RX64  u64CpuHz=%RU64  uCpuHzRef=%RU64\n",
                      g_pSUPGlobalInfoPage->u32UpdateHz,
@@ -149,4 +150,12 @@
                      g_pSUPGlobalInfoPage->u64CpuHz,
                      uCpuHzRef);
+            for (uint32_t iCpu = 0; iCpu < g_pSUPGlobalInfoPage->cCpus; iCpu++)
+                if (g_pSUPGlobalInfoPage->aCPUs[iCpu].enmState != SUPGIPCPUSTATE_INVALID)
+                {
+                    SUPGIPCPU const *pGipCpu = &g_pSUPGlobalInfoPage->aCPUs[iCpu];
+                    RTPrintf("tstGIP-2: aCPU[%u]: enmState=%d iCpuSet=%u idCpu=%#010x iCpuGroup=%u iCpuGroupMember=%u idApic=%#x\n",
+                             iCpu, pGipCpu->enmState, pGipCpu->iCpuSet, pGipCpu->idCpu, pGipCpu->iCpuGroup,
+                             pGipCpu->iCpuGroupMember, pGipCpu->idApic);
+                }
 
             RTPrintf(fHex
Index: /trunk/src/VBox/HostDrivers/Support/win/SUPDrv-win.cpp
===================================================================
--- /trunk/src/VBox/HostDrivers/Support/win/SUPDrv-win.cpp	(revision 64280)
+++ /trunk/src/VBox/HostDrivers/Support/win/SUPDrv-win.cpp	(revision 64281)
@@ -1711,68 +1711,98 @@
 
 
-void VBOXCALL supdrvOSInitGipGroupTable(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip)
+size_t VBOXCALL supdrvOSGipGetGroupTableSize(PSUPDRVDEVEXT pDevExt)
 {
     NOREF(pDevExt);
+    uint32_t cMaxCpus = RTMpGetCount();
+    uint32_t cGroups  = RTMpGetMaxCpuGroupCount();
+
+    return cGroups * RT_OFFSETOF(SUPGIPCPUGROUP, aiCpuSetIdxs)
+         + RT_SIZEOFMEMB(SUPGIPCPUGROUP, aiCpuSetIdxs[0]) * cMaxCpus;
+}
+
+
+int VBOXCALL supdrvOSInitGipGroupTable(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, size_t cbGipCpuGroups)
+{
+    Assert(cbGipCpuGroups > 0); NOREF(cbGipCpuGroups); NOREF(pDevExt);
+
+    unsigned const  cGroups = RTMpGetMaxCpuGroupCount();
+    AssertReturn(cGroups > 0 && cGroups < RT_ELEMENTS(pGip->aoffCpuGroup), VERR_INTERNAL_ERROR_2);
+    pGip->cPossibleCpuGroups = cGroups;
+
+    PSUPGIPCPUGROUP pGroup = (PSUPGIPCPUGROUP)&pGip->aCPUs[pGip->cCpus];
+    for (uint32_t idxGroup = 0; idxGroup < cGroups; idxGroup++)
+    {
+        uint32_t cActive  = 0;
+        uint32_t cMax     = RTMpGetCpuGroupCounts(idxGroup, &cActive);
+        uint32_t cbNeeded = RT_OFFSETOF(SUPGIPCPUGROUP, aiCpuSetIdxs[cMax]);
+        AssertReturn(cbNeeded <= cbGipCpuGroups, VERR_INTERNAL_ERROR_3);
+        AssertReturn(cActive <= cMax, VERR_INTERNAL_ERROR_4);
+
+        pGip->aoffCpuGroup[idxGroup] = (uint16_t)((uintptr_t)pGroup - (uintptr_t)pGip);
+        pGroup->cMembers    = cActive;
+        pGroup->cMaxMembers = cMax;
+        for (uint32_t idxMember = 0; idxMember < cMax; idxMember++)
+        {
+            pGroup->aiCpuSetIdxs[idxMember] = RTMpSetIndexFromCpuGroupMember(idxGroup, idxMember);
+            Assert((unsigned)pGroup->aiCpuSetIdxs[idxMember] < pGip->cPossibleCpus);
+        }
+
+        /* advance. */
+        cbGipCpuGroups -= cbNeeded;
+        pGroup = (PSUPGIPCPUGROUP)&pGroup->aiCpuSetIdxs[cMax];
+    }
+
+    return VINF_SUCCESS;
+}
+
+
+void VBOXCALL supdrvOSGipInitGroupBitsForCpu(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, PSUPGIPCPU pGipCpu)
+{
+    NOREF(pDevExt);
 
     /*
-     * The indexes are assigned in group order (see initterm-r0drv-nt.cpp).
+     * Translate the CPU index into a group and member.
      */
-    if (   g_pfnKeQueryMaximumGroupCount
-        && g_pfnKeGetProcessorIndexFromNumber)
-    {
-        unsigned cGroups = g_pfnKeQueryMaximumGroupCount();
-        AssertStmt(cGroups > 0, cGroups = 1);
-        AssertStmt(cGroups < RT_ELEMENTS(pGip->aiFirstCpuSetIdxFromCpuGroup),
-                   cGroups = RT_ELEMENTS(pGip->aiFirstCpuSetIdxFromCpuGroup));
-        pGip->cPossibleCpuGroups = cGroups;
-
-        KEPROCESSORINDEX idxCpuMin = 0;
-        for (unsigned iGroup = 0; iGroup < cGroups; iGroup++)
-        {
-            PROCESSOR_NUMBER ProcNum;
-            ProcNum.Group    = (USHORT)iGroup;
-            ProcNum.Number   = 0;
-            ProcNum.Reserved = 0;
-            KEPROCESSORINDEX idxCpu = g_pfnKeGetProcessorIndexFromNumber(&ProcNum);
-            Assert(idxCpu != INVALID_PROCESSOR_INDEX);
-            Assert(idxCpu >= idxCpuMin);
-            idxCpuMin = idxCpu;
-            pGip->aiFirstCpuSetIdxFromCpuGroup[iGroup] = (uint16_t)idxCpu;
-        }
-    }
-    else
-    {
-        Assert(!g_pfnKeQueryMaximumGroupCount);
-        Assert(!g_pfnKeGetProcessorIndexFromNumber);
-
-        pGip->cPossibleCpuGroups              = 1;
-        pGip->aiFirstCpuSetIdxFromCpuGroup[0] = 0;
-    }
-}
-
-
-uint16_t VBOXCALL supdrvOSGipGetGroupFromCpu(PSUPDRVDEVEXT pDevExt, RTCPUID idCpu, uint16_t *piCpuGroupMember)
-{
-    NOREF(pDevExt);
+    PROCESSOR_NUMBER ProcNum = { 0, pGipCpu->iCpuSet, 0 };
+    if (g_pfnKeGetProcessorNumberFromIndex)
+    {
+        NTSTATUS rcNt = g_pfnKeGetProcessorNumberFromIndex(pGipCpu->iCpuSet, &ProcNum);
+        if (NT_SUCCESS(rcNt))
+            Assert(ProcNum.Group < g_pfnKeQueryMaximumGroupCount());
+        else
+        {
+            AssertFailed();
+            ProcNum.Group  = 0;
+            ProcNum.Number = pGipCpu->iCpuSet;
+        }
+    }
+    pGipCpu->iCpuGroup       = ProcNum.Group;
+    pGipCpu->iCpuGroupMember = ProcNum.Number;
 
     /*
-     * This is just a wrapper around KeGetProcessorNumberFromIndex.
+     * Update the group info.  Just do this wholesale for now (doesn't scale well).
      */
-    if (g_pfnKeGetProcessorNumberFromIndex)
-    {
-        PROCESSOR_NUMBER ProcNum = { UINT16_MAX, UINT8_MAX, 0 };
-        NTSTATUS rcNt = g_pfnKeGetProcessorNumberFromIndex(idCpu, &ProcNum);
-        if (NT_SUCCESS(rcNt))
-        {
-            Assert(ProcNum.Group < g_pfnKeQueryMaximumGroupCount());
-            *piCpuGroupMember = ProcNum.Number;
-            return ProcNum.Group;
-        }
-
-        AssertMsgFailed(("rcNt=%#x for idCpu=%u\n", rcNt, idCpu));
-    }
-
-    *piCpuGroupMember = 0;
-    return idCpu;
+    for (uint32_t idxGroup = 0; idxGroup < pGip->cPossibleCpuGroups; idxGroup++)
+        if (pGip->aoffCpuGroup[idxGroup] != UINT16_MAX)
+        {
+            PSUPGIPCPUGROUP pGroup = (PSUPGIPCPUGROUP)((uintptr_t)pGip + pGip->aoffCpuGroup[idxGroup]);
+
+            uint32_t cActive  = 0;
+            uint32_t cMax     = RTMpGetCpuGroupCounts(idxGroup, &cActive);
+            AssertStmt(cMax == pGroup->cMaxMembers, cMax = pGroup->cMaxMembers);
+            AssertStmt(cActive <= cMax, cActive = cMax);
+            if (pGroup->cMembers != cActive)
+                pGroup->cMembers = cActive;
+
+            for (uint32_t idxMember = 0; idxMember < cMax; idxMember++)
+            {
+                int idxCpuSet = RTMpSetIndexFromCpuGroupMember(idxGroup, idxMember);
+                AssertMsg((unsigned)idxCpuSet < pGip->cPossibleCpus,
+                          ("%d vs %d for %u.%u\n", idxCpuSet, pGip->cPossibleCpus, idxGroup, idxMember));
+
+                if (pGroup->aiCpuSetIdxs[idxMember] != idxCpuSet)
+                    pGroup->aiCpuSetIdxs[idxMember] = idxCpuSet;
+            }
+        }
 }
 
Index: /trunk/src/VBox/Runtime/Makefile.kmk
===================================================================
--- /trunk/src/VBox/Runtime/Makefile.kmk	(revision 64280)
+++ /trunk/src/VBox/Runtime/Makefile.kmk	(revision 64281)
@@ -780,5 +780,5 @@
 	generic/RTSemMutexRequestDebug-generic.cpp \
 	generic/RTThreadSetAffinityToCpu-generic.cpp \
-	generic/mppresent-generic.cpp \
+	generic/mppresent-generic-online.cpp \
 	generic/semrw-$(if-expr defined(VBOX_WITH_LOCKLESS_SEMRW),lockless-,)generic.cpp \
 	generic/uuid-generic.cpp \
@@ -2247,5 +2247,5 @@
 	generic/RTLogWriteStdOut-stub-generic.cpp \
 	generic/RTTimerCreate-generic.cpp \
-	generic/mppresent-generic.cpp \
+	generic/mppresent-generic-online.cpp \
 	generic/RTMpGetCoreCount-generic.cpp \
 	nt/RTErrConvertFromNtStatus.cpp \
@@ -2263,5 +2263,4 @@
 	r0drv/nt/memuserkernel-r0drv-nt.cpp \
 	r0drv/nt/mp-r0drv-nt.cpp \
-	r0drv/nt/mpnotification-r0drv-nt.cpp \
 	r0drv/nt/process-r0drv-nt.cpp \
 	r0drv/nt/RTLogWriteDebugger-r0drv-nt.cpp \
Index: /trunk/src/VBox/Runtime/common/misc/assert.cpp
===================================================================
--- /trunk/src/VBox/Runtime/common/misc/assert.cpp	(revision 64280)
+++ /trunk/src/VBox/Runtime/common/misc/assert.cpp	(revision 64281)
@@ -72,5 +72,5 @@
 static bool volatile                g_fQuiet = false;
 /** Set if assertions may panic. */
-static bool volatile                g_fMayPanic = true;
+static bool volatile                g_fMayPanic = false;//true;
 
 
Index: /trunk/src/VBox/Runtime/common/time/timesupref.cpp
===================================================================
--- /trunk/src/VBox/Runtime/common/time/timesupref.cpp	(revision 64280)
+++ /trunk/src/VBox/Runtime/common/time/timesupref.cpp	(revision 64281)
@@ -37,4 +37,5 @@
 #include <iprt/asm-math.h>
 #include <iprt/asm-amd64-x86.h>
+#include <iprt/param.h>
 #include <VBox/sup.h>
 #ifdef IN_RC
Index: /trunk/src/VBox/Runtime/common/time/timesupref.h
===================================================================
--- /trunk/src/VBox/Runtime/common/time/timesupref.h	(revision 64280)
+++ /trunk/src/VBox/Runtime/common/time/timesupref.h	(revision 64281)
@@ -101,5 +101,13 @@
             uint16_t const  iCpuSet  = uAux & (RTCPUSET_MAX_CPUS - 1);
 #  else
-            uint16_t const  iCpuSet  = pGip->aiFirstCpuSetIdxFromCpuGroup[(uAux >> 8) & UINT8_MAX] + (uAux & UINT8_MAX);
+            uint16_t        iCpuSet = 0;
+            uint16_t        offGipCpuGroup = pGip->aoffCpuGroup[(uAux >> 8) & UINT8_MAX];
+            if (offGipCpuGroup < pGip->cPages * PAGE_SIZE)
+            {
+                PSUPGIPCPUGROUP pGipCpuGroup = (PSUPGIPCPUGROUP)((uintptr_t)pGip + offGipCpuGroup);
+                if (   (uAux & UINT8_MAX) < pGipCpuGroup->cMaxMembers
+                    && pGipCpuGroup->aiCpuSetIdxs[uAux & UINT8_MAX] != -1)
+                    iCpuSet = pGipCpuGroup->aiCpuSetIdxs[uAux & UINT8_MAX];
+            }
 #  endif
             uint16_t const  iGipCpu  = pGip->aiCpuFromCpuSetIdx[iCpuSet];
Index: /trunk/src/VBox/Runtime/generic/mppresent-generic-online.cpp
===================================================================
--- /trunk/src/VBox/Runtime/generic/mppresent-generic-online.cpp	(revision 64281)
+++ /trunk/src/VBox/Runtime/generic/mppresent-generic-online.cpp	(revision 64281)
@@ -0,0 +1,61 @@
+/* $Id$ */
+/** @file
+ * IPRT - Multiprocessor, Stubs for the RTMp*Present* API mapping to RTMp*Online.
+ */
+
+/*
+ * Copyright (C) 2008-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+/*********************************************************************************************************************************
+*   Header Files                                                                                                                 *
+*********************************************************************************************************************************/
+#include <iprt/mp.h>
+#include "internal/iprt.h"
+
+
+RTDECL(PRTCPUSET) RTMpGetPresentSet(PRTCPUSET pSet)
+{
+    return RTMpGetOnlineSet(pSet);
+}
+RT_EXPORT_SYMBOL(RTMpGetPresentSet);
+
+
+RTDECL(RTCPUID) RTMpGetPresentCount(void)
+{
+    return RTMpGetOnlineCount();
+}
+RT_EXPORT_SYMBOL(RTMpGetPresentCount);
+
+
+RTDECL(RTCPUID) RTMpGetPresentCoreCount(void)
+{
+    return RTMpGetOnlineCoreCount();
+}
+RT_EXPORT_SYMBOL(RTMpGetPresentCoreCount);
+
+
+RTDECL(bool) RTMpIsCpuPresent(RTCPUID idCpu)
+{
+    return RTMpIsCpuOnline(idCpu);
+}
+RT_EXPORT_SYMBOL(RTMpIsCpuPresent);
+
Index: /trunk/src/VBox/Runtime/include/internal/mp.h
===================================================================
--- /trunk/src/VBox/Runtime/include/internal/mp.h	(revision 64281)
+++ /trunk/src/VBox/Runtime/include/internal/mp.h	(revision 64281)
@@ -0,0 +1,82 @@
+/* $Id$ */
+/** @file
+ * IPRT - Internal RTMp header
+ */
+
+/*
+ * Copyright (C) 2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef ___internal_mp_h
+#define ___internal_mp_h
+
+#include <iprt/assert.h>
+#include <iprt/mp.h>
+
+RT_C_DECLS_BEGIN
+
+
+#ifdef RT_OS_WINDOWS
+/** @todo Return the processor group + number instead.
+ * Unfortunately, DTrace and HM makes the impossible for the time being as it
+ * seems to be making the stupid assumption that idCpu == iCpuSet. */
+#if 0
+# define IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+#endif
+
+# ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+
+/** @def RTMPCPUID_FROM_GROUP_AND_NUMBER
+ * Creates the RTCPUID value.
+ *
+ * @remarks We Increment a_uGroup by 1 to make sure the ID is never the same as
+ *          the CPU set index.
+ *
+ * @remarks We put the group in the top to make it easy to construct the MAX ID.
+ *          For that reason we also just use 8 bits for the processor number, as
+ *          it keeps the range small.
+ */
+#  define RTMPCPUID_FROM_GROUP_AND_NUMBER(a_uGroup, a_uGroupMember)  \
+    ( (uint8_t)(a_uGroupMember) | (((uint32_t)(a_uGroup) + 1) << 8) )
+
+/** Extracts the group number from a RTCPUID value.  */
+DECLINLINE(uint16_t) rtMpCpuIdGetGroup(RTCPUID idCpu)
+{
+    Assert(idCpu != NIL_RTCPUID);
+    uint16_t idxGroup = idCpu >> 8;
+    Assert(idxGroup != 0);
+    return idxGroup - 1;
+}
+
+/** Extracts the group member number from a RTCPUID value.   */
+DECLINLINE(uint8_t) rtMpCpuIdGetGroupMember(RTCPUID idCpu)
+{
+    Assert(idCpu != NIL_RTCPUID);
+    return (uint8_t)idCpu;
+}
+
+# endif /* IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER */
+#endif /* RT_OS_WINDOWS */
+
+
+RT_C_DECLS_END
+
+#endif
+
Index: /trunk/src/VBox/Runtime/r0drv/nt/initterm-r0drv-nt.cpp
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/nt/initterm-r0drv-nt.cpp	(revision 64280)
+++ /trunk/src/VBox/Runtime/r0drv/nt/initterm-r0drv-nt.cpp	(revision 64281)
@@ -31,12 +31,8 @@
 #include "the-nt-kernel.h"
 #include <iprt/asm-amd64-x86.h>
-#include <iprt/assert.h>
 #include <iprt/err.h>
-#include <iprt/mem.h>
-#include <iprt/mp.h>
 #include <iprt/string.h>
 #include "internal/initterm.h"
 #include "internal-r0drv-nt.h"
-#include "../mp-r0drv.h"
 #include "symdb.h"
 #include "symdbdata.h"
@@ -46,19 +42,4 @@
 *   Global Variables                                                                                                             *
 *********************************************************************************************************************************/
-/** The NT CPU set.
- * KeQueryActiveProcssors() cannot be called at all IRQLs and therefore we'll
- * have to cache it. Fortunately, Nt doesn't really support taking CPUs offline
- * or online. It's first with W2K8 that support for CPU hotplugging was added.
- * Once we start caring about this, we'll simply let the native MP event callback
- * and update this variable as CPUs comes online. (The code is done already.)
- */
-RTCPUSET                                g_rtMpNtCpuSet;
-/** Maximum number of processor groups. */
-uint32_t                                g_cRtMpNtMaxGroups;
-/** Maximum number of processors. */
-uint32_t                                g_cRtMpNtMaxCpus;
-/** The handle of the rtR0NtMpProcessorChangeCallback registration. */
-static PVOID                            g_pvMpCpuChangeCallback = NULL;
-
 /** ExSetTimerResolution, introduced in W2K. */
 PFNMYEXSETTIMERRESOLUTION               g_pfnrtNtExSetTimerResolution;
@@ -93,4 +74,8 @@
 /** KeQueryMaximumGroupCount - Introducted in Windows 7. */
 PFNKEQUERYMAXIMUMGROUPCOUNT             g_pfnrtKeQueryMaximumGroupCount;
+/** KeQueryActiveProcessorCount   - Introducted in Vista and obsoleted W7. */
+PFNKEQUERYACTIVEPROCESSORCOUNT          g_pfnrtKeQueryActiveProcessorCount;
+/** KeQueryActiveProcessorCountEx - Introducted in Windows 7. */
+PFNKEQUERYACTIVEPROCESSORCOUNTEX        g_pfnrtKeQueryActiveProcessorCountEx;
 /** KeQueryLogicalProcessorRelationship - Introducted in Windows 7. */
 PFNKEQUERYLOGICALPROCESSORRELATIONSHIP  g_pfnrtKeQueryLogicalProcessorRelationship;
@@ -245,326 +230,4 @@
 
 
-/**
- * Implements the NT PROCESSOR_CALLBACK_FUNCTION callback function.
- *
- * This maintains the g_rtMpNtCpuSet and works MP notification callbacks.  When
- * registered, it's called for each active CPU in the system, avoiding racing
- * CPU hotplugging (as well as testing the callback).
- *
- * @param   pvUser              User context (not used).
- * @param   pChangeCtx          Change context (in).
- * @param   prcOperationStatus  Operation status (in/out).
- */
-static VOID __stdcall rtR0NtMpProcessorChangeCallback(void *pvUser, PKE_PROCESSOR_CHANGE_NOTIFY_CONTEXT pChangeCtx,
-                                                      PNTSTATUS prcOperationStatus)
-{
-    RT_NOREF(pvUser, prcOperationStatus);
-    switch (pChangeCtx->State)
-    {
-        case KeProcessorAddCompleteNotify:
-            if (pChangeCtx->NtNumber < RTCPUSET_MAX_CPUS)
-            {
-                RTCpuSetAddByIndex(&g_rtMpNtCpuSet, pChangeCtx->NtNumber);
-                rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, pChangeCtx->NtNumber);
-            }
-            else
-            {
-                DbgPrint("rtR0NtMpProcessorChangeCallback: NtNumber=%u (%#x) is higher than RTCPUSET_MAX_CPUS (%d)\n",
-                         pChangeCtx->NtNumber, pChangeCtx->NtNumber, RTCPUSET_MAX_CPUS);
-                AssertMsgFailed(("NtNumber=%u (%#x)\n", pChangeCtx->NtNumber, pChangeCtx->NtNumber));
-            }
-            break;
-
-        case KeProcessorAddStartNotify:
-        case KeProcessorAddFailureNotify:
-            /* ignore */
-            break;
-
-        default:
-            AssertMsgFailed(("State=%u\n", pChangeCtx->State));
-    }
-}
-
-
-/**
- * Wrapper around KeQueryLogicalProcessorRelationship.
- *
- * @returns IPRT status code.
- * @param   ppInfo  Where to return the info. Pass to RTMemFree when done.
- */
-static int rtR0NtInitQueryGroupRelations(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX **ppInfo)
-{
-    ULONG    cbInfo = sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX)
-                    + g_cRtMpNtMaxGroups * sizeof(GROUP_RELATIONSHIP);
-    NTSTATUS rcNt;
-    do
-    {
-        SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pInfo = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *)RTMemAlloc(cbInfo);
-        if (pInfo)
-        {
-            rcNt = g_pfnrtKeQueryLogicalProcessorRelationship(NULL /*pProcNumber*/, RelationGroup, pInfo, &cbInfo);
-            if (NT_SUCCESS(rcNt))
-            {
-                *ppInfo = pInfo;
-                return VINF_SUCCESS;
-            }
-
-            RTMemFree(pInfo);
-            pInfo = NULL;
-        }
-        else
-            rcNt = STATUS_NO_MEMORY;
-    } while (rcNt == STATUS_INFO_LENGTH_MISMATCH);
-    DbgPrint("IPRT: Fatal: KeQueryLogicalProcessorRelationship failed: %#x\n", rcNt);
-    AssertMsgFailed(("KeQueryLogicalProcessorRelationship failed: %#x\n", rcNt));
-    return RTErrConvertFromNtStatus(rcNt);
-}
-
-
-/**
- * Initalizes multiprocessor globals.
- *
- * @returns IPRT status code.
- */
-static int rtR0NtInitMp(RTNTSDBOSVER const *pOsVerInfo)
-{
-#define MY_CHECK_BREAK(a_Check, a_DbgPrintArgs) \
-        AssertMsgBreakStmt(a_Check, a_DbgPrintArgs, DbgPrint a_DbgPrintArgs; rc = VERR_INTERNAL_ERROR_4 )
-#define MY_CHECK_RETURN(a_Check, a_DbgPrintArgs, a_rcRet) \
-        AssertMsgReturnStmt(a_Check, a_DbgPrintArgs, DbgPrint a_DbgPrintArgs, a_rcRet)
-#define MY_CHECK(a_Check, a_DbgPrintArgs) \
-        AssertMsgStmt(a_Check, a_DbgPrintArgs, DbgPrint a_DbgPrintArgs; rc = VERR_INTERNAL_ERROR_4 )
-
-    /*
-     * API combination checks.
-     */
-    MY_CHECK_RETURN(!g_pfnrtKeSetTargetProcessorDpcEx || g_pfnrtKeGetProcessorNumberFromIndex,
-                    ("IPRT: Fatal: Missing KeSetTargetProcessorDpcEx without KeGetProcessorNumberFromIndex!\n"),
-                    VERR_SYMBOL_NOT_FOUND);
-
-    /*
-     * Get max number of processor groups.
-     */
-    if (g_pfnrtKeQueryMaximumGroupCount)
-    {
-        g_cRtMpNtMaxGroups = g_pfnrtKeQueryMaximumGroupCount();
-        MY_CHECK_RETURN(g_cRtMpNtMaxGroups <= RTCPUSET_MAX_CPUS && g_cRtMpNtMaxGroups > 0,
-                        ("IPRT: Fatal: g_cRtMpNtMaxGroups=%u, max %u\n", g_cRtMpNtMaxGroups, RTCPUSET_MAX_CPUS),
-                        VERR_MP_TOO_MANY_CPUS);
-    }
-    else
-        g_cRtMpNtMaxGroups = 1;
-
-    /*
-     * Get max number CPUs.
-     * This also defines the range of NT CPU indexes, RTCPUID and index into RTCPUSET.
-     */
-    if (g_pfnrtKeQueryMaximumProcessorCountEx)
-    {
-        g_cRtMpNtMaxCpus = g_pfnrtKeQueryMaximumProcessorCountEx(ALL_PROCESSOR_GROUPS);
-        MY_CHECK_RETURN(g_cRtMpNtMaxCpus <= RTCPUSET_MAX_CPUS && g_cRtMpNtMaxCpus > 0,
-                        ("IPRT: Fatal: g_cRtMpNtMaxGroups=%u, max %u [KeQueryMaximumProcessorCountEx]\n",
-                         g_cRtMpNtMaxGroups, RTCPUSET_MAX_CPUS),
-                        VERR_MP_TOO_MANY_CPUS);
-    }
-    else if (g_pfnrtKeQueryMaximumProcessorCount)
-    {
-        g_cRtMpNtMaxCpus = g_pfnrtKeQueryMaximumProcessorCount();
-        MY_CHECK_RETURN(g_cRtMpNtMaxCpus <= RTCPUSET_MAX_CPUS && g_cRtMpNtMaxCpus > 0,
-                        ("IPRT: Fatal: g_cRtMpNtMaxGroups=%u, max %u [KeQueryMaximumProcessorCount]\n",
-                         g_cRtMpNtMaxGroups, RTCPUSET_MAX_CPUS),
-                        VERR_MP_TOO_MANY_CPUS);
-    }
-    else if (g_pfnrtKeQueryActiveProcessors)
-    {
-        KAFFINITY fActiveProcessors = g_pfnrtKeQueryActiveProcessors();
-        MY_CHECK_RETURN(fActiveProcessors != 0,
-                        ("IPRT: Fatal: KeQueryActiveProcessors returned 0!\n"),
-                        VERR_INTERNAL_ERROR_2);
-        g_cRtMpNtMaxCpus = 0;
-        do
-        {
-            g_cRtMpNtMaxCpus++;
-            fActiveProcessors >>= 1;
-        } while (fActiveProcessors);
-    }
-    else
-        g_cRtMpNtMaxCpus = KeNumberProcessors;
-
-    /*
-     * Query the details for the groups to figure out which CPUs are online as
-     * well as the NT index limit.
-     */
-    if (g_pfnrtKeQueryLogicalProcessorRelationship)
-    {
-        MY_CHECK_RETURN(g_pfnrtKeGetProcessorIndexFromNumber,
-                        ("IPRT: Fatal: Found KeQueryLogicalProcessorRelationship but not KeGetProcessorIndexFromNumber!\n"),
-                        VERR_SYMBOL_NOT_FOUND);
-        MY_CHECK_RETURN(g_pfnrtKeGetProcessorNumberFromIndex,
-                        ("IPRT: Fatal: Found KeQueryLogicalProcessorRelationship but not KeGetProcessorIndexFromNumber!\n"),
-                        VERR_SYMBOL_NOT_FOUND);
-        MY_CHECK_RETURN(g_pfnrtKeSetTargetProcessorDpcEx,
-                        ("IPRT: Fatal: Found KeQueryLogicalProcessorRelationship but not KeSetTargetProcessorDpcEx!\n"),
-                        VERR_SYMBOL_NOT_FOUND);
-
-        SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pInfo = NULL;
-        int rc = rtR0NtInitQueryGroupRelations(&pInfo);
-        if (RT_FAILURE(rc))
-            return rc;
-
-        AssertReturnStmt(pInfo->Group.MaximumGroupCount == g_cRtMpNtMaxGroups, RTMemFree(pInfo), VERR_INTERNAL_ERROR_3);
-
-        /*
-         * Calc online mask.
-         *
-         * Also check ASSUMPTIONS:
-         *      - Processor indexes going to KeQueryMaximumProcessorCountEx(ALL_PROCESSOR_GROUPS)
-         *      - Processor indexes being assigned to absent hotswappable CPUs, i.e.
-         *        KeGetProcessorIndexFromNumber and KeGetProcessorNumberFromIndex works
-         *        all possible indexes. [Not yet confirmed!]
-         *      - Processor indexes are assigned in group order.
-         *      - MaximumProcessorCount specifies the highest bit in the active mask.
-         *        This is for confirming process IDs assigned by IPRT in ring-3.
-         */
-        /** @todo Test the latter on a real/virtual system. */
-        RTCpuSetEmpty(&g_rtMpNtCpuSet);
-        uint32_t idxCpuExpect = 0;
-        for (uint32_t idxGroup = 0; RT_SUCCESS(rc) && idxGroup < pInfo->Group.ActiveGroupCount; idxGroup++)
-        {
-            const PROCESSOR_GROUP_INFO *pGrpInfo = &pInfo->Group.GroupInfo[idxGroup];
-            MY_CHECK_BREAK(pGrpInfo->MaximumProcessorCount <= MAXIMUM_PROC_PER_GROUP,
-                           ("IPRT: Fatal: MaximumProcessorCount=%u\n", pGrpInfo->MaximumProcessorCount));
-            MY_CHECK_BREAK(pGrpInfo->ActiveProcessorCount <= MAXIMUM_PROC_PER_GROUP,
-                           ("IPRT: Fatal: ActiveProcessorCount=%u\n", pGrpInfo->ActiveProcessorCount));
-            MY_CHECK_BREAK(pGrpInfo->ActiveProcessorCount <= pGrpInfo->MaximumProcessorCount,
-                           ("IPRT: Fatal: ActiveProcessorCount=%u > MaximumProcessorCount=%u\n",
-                            pGrpInfo->ActiveProcessorCount, pGrpInfo->MaximumProcessorCount));
-            for (uint32_t idxMember = 0; idxMember < pGrpInfo->MaximumProcessorCount; idxMember++, idxCpuExpect++)
-            {
-                PROCESSOR_NUMBER ProcNum;
-                ProcNum.Group    = (USHORT)idxGroup;
-                ProcNum.Number   = (UCHAR)idxMember;
-                ProcNum.Reserved = 0;
-                ULONG idxCpu = g_pfnrtKeGetProcessorIndexFromNumber(&ProcNum);
-                if (idxCpu != INVALID_PROCESSOR_INDEX)
-                {
-                    MY_CHECK_BREAK(idxCpu < g_cRtMpNtMaxCpus && idxCpu < RTCPUSET_MAX_CPUS,
-                                   ("IPRT: Fatal: idxCpu=%u >= g_cRtMpNtMaxCpu=%u (RTCPUSET_MAX_CPUS=%u)\n",
-                                    idxCpu, g_cRtMpNtMaxCpus, RTCPUSET_MAX_CPUS));
-                    MY_CHECK_BREAK(idxCpu == idxCpuExpect, ("IPRT: Fatal: idxCpu=%u != idxCpuExpect=%u\n", idxCpu, idxCpuExpect));
-
-                    ProcNum.Group    = UINT16_MAX;
-                    ProcNum.Number   = UINT8_MAX;
-                    ProcNum.Reserved = UINT8_MAX;
-                    NTSTATUS rcNt = g_pfnrtKeGetProcessorNumberFromIndex(idxCpu, &ProcNum);
-                    MY_CHECK_BREAK(NT_SUCCESS(rcNt), ("IPRT: Fatal: KeGetProcessorNumberFromIndex(%u,) -> %#x!\n", idxCpu, rcNt));
-                    MY_CHECK_BREAK(ProcNum.Group == idxGroup && ProcNum.Number == idxMember,
-                                   ("IPRT: Fatal: KeGetProcessorXxxxFromYyyy roundtrip error for %#x! Group: %u vs %u, Number: %u vs %u\n",
-                                    idxCpu, ProcNum.Group, idxGroup, ProcNum.Number, idxMember));
-
-                    if (pGrpInfo->ActiveProcessorMask & RT_BIT_64(idxMember))
-                        RTCpuSetAddByIndex(&g_rtMpNtCpuSet, idxCpu);
-                }
-                else
-                {
-                    /* W2K8 server gives me a max of 64 logical CPUs, even if the system only has 12,
-                       causing failures here.  Not yet sure how this would work with two CPU groups yet... */
-                    MY_CHECK_BREAK(   idxMember >= pGrpInfo->ActiveProcessorCount
-                                   && !(pGrpInfo->ActiveProcessorMask & RT_BIT_64(idxMember)),
-                                   ("IPRT: Fatal: KeGetProcessorIndexFromNumber(%u/%u) failed! cMax=%u cActive=%u\n",
-                                    idxGroup, idxMember, pGrpInfo->MaximumProcessorCount, pGrpInfo->ActiveProcessorCount));
-                }
-            }
-        }
-        RTMemFree(pInfo);
-        if (RT_FAILURE(rc)) /* MY_CHECK_BREAK sets rc. */
-            return rc;
-    }
-    else
-    {
-        /* Legacy: */
-        MY_CHECK_RETURN(g_cRtMpNtMaxGroups == 1, ("IPRT: Fatal: Missing KeQueryLogicalProcessorRelationship!\n"),
-                        VERR_SYMBOL_NOT_FOUND);
-
-        if (g_pfnrtKeQueryActiveProcessors)
-            RTCpuSetFromU64(&g_rtMpNtCpuSet, g_pfnrtKeQueryActiveProcessors());
-        else if (g_cRtMpNtMaxCpus < 64)
-            RTCpuSetFromU64(&g_rtMpNtCpuSet, (UINT64_C(1) << g_cRtMpNtMaxCpus) - 1);
-        else
-        {
-            MY_CHECK_RETURN(g_cRtMpNtMaxCpus == 64, ("IPRT: Fatal: g_cRtMpNtMaxCpus=%u, expect 64 or less\n", g_cRtMpNtMaxCpus),
-                            VERR_MP_TOO_MANY_CPUS);
-            RTCpuSetFromU64(&g_rtMpNtCpuSet, UINT64_MAX);
-        }
-    }
-
-    /*
-     * Register CPU hot plugging callback.
-     */
-    Assert(g_pvMpCpuChangeCallback == NULL);
-    if (g_pfnrtKeRegisterProcessorChangeCallback)
-    {
-        MY_CHECK_RETURN(g_pfnrtKeDeregisterProcessorChangeCallback,
-                        ("IPRT: Fatal: KeRegisterProcessorChangeCallback without KeDeregisterProcessorChangeCallback!\n"),
-                        VERR_SYMBOL_NOT_FOUND);
-
-        RTCPUSET ActiveSetCopy = g_rtMpNtCpuSet;
-        RTCpuSetEmpty(&g_rtMpNtCpuSet);
-        g_pvMpCpuChangeCallback = g_pfnrtKeRegisterProcessorChangeCallback(rtR0NtMpProcessorChangeCallback, NULL /*pvUser*/,
-                                                                           KE_PROCESSOR_CHANGE_ADD_EXISTING);
-        if (!g_pvMpCpuChangeCallback)  
-        {
-            AssertFailed();
-            g_rtMpNtCpuSet = ActiveSetCopy;
-        }
-    }
-
-    /*
-     * Special IPI fun for RTMpPokeCpu.
-     *
-     * On Vista and later the DPC method doesn't seem to reliably send IPIs,
-     * so we have to use alternative methods.
-     *
-     * On AMD64 We used to use the HalSendSoftwareInterrupt API (also x86 on
-     * W10+), it looks faster and more convenient to use, however we're either
-     * using it wrong or it doesn't reliably do what we want (see @bugref{8343}).
-     *
-     * The HalRequestIpip API is thus far the only alternative to KeInsertQueueDpc
-     * for doing targetted IPIs.  Trouble with this API is that it changed
-     * fundamentally in Window 7 when they added support for lots of processors.
-     *
-     * If we really think we cannot use KeInsertQueueDpc, we use the broadcast IPI
-     * API KeIpiGenericCall.
-     */
-    if (   pOsVerInfo->uMajorVer > 6
-        || (pOsVerInfo->uMajorVer == 6 && pOsVerInfo->uMinorVer > 0))
-        g_pfnrtHalRequestIpiPreW7 = NULL;
-    else
-        g_pfnrtHalRequestIpiW7Plus = NULL;
-
-    g_pfnrtMpPokeCpuWorker = rtMpPokeCpuUsingDpc;
-#ifndef IPRT_TARGET_NT4
-    if (   g_pfnrtHalRequestIpiW7Plus
-        && g_pfnrtKeInitializeAffinityEx
-        && g_pfnrtKeAddProcessorAffinityEx
-        && g_pfnrtKeGetProcessorIndexFromNumber)
-    {
-        DbgPrint("IPRT: RTMpPoke => rtMpPokeCpuUsingHalReqestIpiW7Plus\n");
-        g_pfnrtMpPokeCpuWorker = rtMpPokeCpuUsingHalReqestIpiW7Plus;
-    }
-    else if (pOsVerInfo->uMajorVer >= 6 && g_pfnrtKeIpiGenericCall)
-    {
-        DbgPrint("IPRT: RTMpPoke => rtMpPokeCpuUsingBroadcastIpi\n");
-        g_pfnrtMpPokeCpuWorker = rtMpPokeCpuUsingBroadcastIpi;
-    }
-    else
-        DbgPrint("IPRT: RTMpPoke => rtMpPokeCpuUsingDpc\n");
-    /* else: Windows XP should send always send an IPI -> VERIFY */
-#endif
-
-    return VINF_SUCCESS;
-}
-
-
 DECLHIDDEN(int) rtR0InitNative(void)
 {
@@ -599,4 +262,6 @@
     GET_SYSTEM_ROUTINE(KeQueryMaximumProcessorCountEx);
     GET_SYSTEM_ROUTINE(KeQueryMaximumGroupCount);
+    GET_SYSTEM_ROUTINE(KeQueryActiveProcessorCount);
+    GET_SYSTEM_ROUTINE(KeQueryActiveProcessorCountEx);
     GET_SYSTEM_ROUTINE(KeQueryLogicalProcessorRelationship);
     GET_SYSTEM_ROUTINE(KeRegisterProcessorChangeCallback);
@@ -743,5 +408,5 @@
     else
         DbgPrint("IPRT: _KPRCB:{.QuantumEnd=%x/%d, .DpcQueueDepth=%x/%d} Kernel %u.%u %u %s\n",
-                 g_offrtNtPbQuantumEnd, g_cbrtNtPbQuantumEnd, g_offrtNtPbDpcQueueDepth,
+                 g_offrtNtPbQuantumEnd, g_cbrtNtPbQuantumEnd, g_offrtNtPbDpcQueueDepth, g_offrtNtPbDpcQueueDepth,
                  OsVerInfo.uMajorVer, OsVerInfo.uMinorVer, OsVerInfo.uBuildNo, OsVerInfo.fChecked ? "checked" : "free");
 # endif
@@ -752,9 +417,9 @@
      * we call rtR0TermNative to do the deregistration on failure.
      */
-    int rc = rtR0NtInitMp(&OsVerInfo);
+    int rc = rtR0MpNtInit(&OsVerInfo);
     if (RT_FAILURE(rc))
     {
         rtR0TermNative();
-        DbgPrint("IPRT: Fatal: rtR0NtInitMp failed: %d\n", rc);
+        DbgPrint("IPRT: Fatal: rtR0MpNtInit failed: %d\n", rc);
         return rc;
     }
@@ -766,14 +431,5 @@
 DECLHIDDEN(void) rtR0TermNative(void)
 {
-    /*
-     * Deregister the processor change callback.
-     */
-    PVOID pvMpCpuChangeCallback = g_pvMpCpuChangeCallback;
-    g_pvMpCpuChangeCallback = NULL;
-    if (pvMpCpuChangeCallback)
-    {
-        AssertReturnVoid(g_pfnrtKeDeregisterProcessorChangeCallback);
-        g_pfnrtKeDeregisterProcessorChangeCallback(pvMpCpuChangeCallback);
-    }
+    rtR0MpNtTerm();
 }
 
Index: /trunk/src/VBox/Runtime/r0drv/nt/internal-r0drv-nt.h
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/nt/internal-r0drv-nt.h	(revision 64280)
+++ /trunk/src/VBox/Runtime/r0drv/nt/internal-r0drv-nt.h	(revision 64281)
@@ -56,4 +56,5 @@
 extern uint32_t                                g_cRtMpNtMaxGroups;
 extern uint32_t                                g_cRtMpNtMaxCpus;
+extern RTCPUID                                 g_aidRtMpNtByCpuSetIdx[RTCPUSET_MAX_CPUS];
 
 extern PFNMYEXSETTIMERRESOLUTION               g_pfnrtNtExSetTimerResolution;
@@ -74,4 +75,6 @@
 extern PFNKEQUERYMAXIMUMPROCESSORCOUNTEX       g_pfnrtKeQueryMaximumProcessorCountEx;
 extern PFNKEQUERYMAXIMUMGROUPCOUNT             g_pfnrtKeQueryMaximumGroupCount;
+extern PFNKEQUERYACTIVEPROCESSORCOUNT          g_pfnrtKeQueryActiveProcessorCount;
+extern PFNKEQUERYACTIVEPROCESSORCOUNTEX        g_pfnrtKeQueryActiveProcessorCountEx;
 extern PFNKEQUERYLOGICALPROCESSORRELATIONSHIP  g_pfnrtKeQueryLogicalProcessorRelationship;
 extern PFNKEREGISTERPROCESSORCHANGECALLBACK    g_pfnrtKeRegisterProcessorChangeCallback;
@@ -95,4 +98,7 @@
 int __stdcall rtMpPokeCpuUsingHalReqestIpiPreW7(RTCPUID idCpu);
 
+struct RTNTSDBOSVER;
+DECLHIDDEN(int)  rtR0MpNtInit(struct RTNTSDBOSVER const *pOsVerInfo);
+DECLHIDDEN(void) rtR0MpNtTerm(void);
 DECLHIDDEN(int) rtMpNtSetTargetProcessorDpc(KDPC *pDpc, RTCPUID idCpu);
 
Index: /trunk/src/VBox/Runtime/r0drv/nt/mp-r0drv-nt.cpp
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/nt/mp-r0drv-nt.cpp	(revision 64280)
+++ /trunk/src/VBox/Runtime/r0drv/nt/mp-r0drv-nt.cpp	(revision 64281)
@@ -36,7 +36,10 @@
 #include <iprt/asm.h>
 #include <iprt/log.h>
+#include <iprt/mem.h>
 #include <iprt/time.h>
 #include "r0drv/mp-r0drv.h"
+#include "symdb.h"
 #include "internal-r0drv-nt.h"
+#include "internal/mp.h"
 
 
@@ -75,8 +78,835 @@
 
 
+/*********************************************************************************************************************************
+*   Defined Constants And Macros                                                                                                 *
+*********************************************************************************************************************************/
+/** Inactive bit for g_aidRtMpNtByCpuSetIdx. */
+#define RTMPNT_ID_F_INACTIVE    RT_BIT_32(31)
+
+
+/*********************************************************************************************************************************
+*   Global Variables                                                                                                             *
+*********************************************************************************************************************************/
+/** Maximum number of processor groups. */
+uint32_t                                g_cRtMpNtMaxGroups;
+/** Maximum number of processors. */
+uint32_t                                g_cRtMpNtMaxCpus;
+/** Number of active processors. */
+uint32_t volatile                       g_cRtMpNtActiveCpus;
+/** The NT CPU set.
+ * KeQueryActiveProcssors() cannot be called at all IRQLs and therefore we'll
+ * have to cache it.  Fortunately, NT doesn't really support taking CPUs offline,
+ * and taking them online was introduced with W2K8 where it is intended for virtual
+ * machines and not real HW.  We update this, g_cRtMpNtActiveCpus and
+ * g_aidRtMpNtByCpuSetIdx from the rtR0NtMpProcessorChangeCallback.
+ */
+RTCPUSET                                g_rtMpNtCpuSet;
+
+/** Static per group info.
+ * @remarks  With RTCPUSET_MAX_CPUS as 256, this takes up 33KB. */
+static struct
+{
+    /** The max CPUs in the group. */
+    uint16_t    cMaxCpus;
+    /** The number of active CPUs at the time of initialization. */
+    uint16_t    cActiveCpus;
+    /** CPU set indexes for each CPU in the group. */
+    int16_t     aidxCpuSetMembers[64];
+}                                       g_aRtMpNtCpuGroups[RTCPUSET_MAX_CPUS];
+/** Maps CPU set indexes to RTCPUID.
+ * Inactive CPUs has bit 31 set (RTMPNT_ID_F_INACTIVE) so we can identify them
+ * and shuffle duplicates during CPU hotplugging.  We assign temporary IDs to
+ * the inactive CPUs starting at g_cRtMpNtMaxCpus - 1, ASSUMING that active
+ * CPUs has IDs from 0 to g_cRtMpNtActiveCpus. */
+RTCPUID                                 g_aidRtMpNtByCpuSetIdx[RTCPUSET_MAX_CPUS];
+/** The handle of the rtR0NtMpProcessorChangeCallback registration. */
+static PVOID                            g_pvMpCpuChangeCallback = NULL;
+
+
+/*********************************************************************************************************************************
+*   Internal Functions                                                                                                           *
+*********************************************************************************************************************************/
+static VOID __stdcall rtR0NtMpProcessorChangeCallback(void *pvUser, PKE_PROCESSOR_CHANGE_NOTIFY_CONTEXT pChangeCtx,
+                                                      PNTSTATUS prcOperationStatus);
+static int rtR0NtInitQueryGroupRelations(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX **ppInfo);
+
+
+
+/**
+ * Initalizes multiprocessor globals (called by rtR0InitNative).
+ *
+ * @returns IPRT status code.
+ * @param   pOsVerInfo          Version information.
+ */
+DECLHIDDEN(int) rtR0MpNtInit(RTNTSDBOSVER const *pOsVerInfo)
+{
+#define MY_CHECK_BREAK(a_Check, a_DbgPrintArgs) \
+        AssertMsgBreakStmt(a_Check, a_DbgPrintArgs, DbgPrint a_DbgPrintArgs; rc = VERR_INTERNAL_ERROR_4 )
+#define MY_CHECK_RETURN(a_Check, a_DbgPrintArgs, a_rcRet) \
+        AssertMsgReturnStmt(a_Check, a_DbgPrintArgs, DbgPrint a_DbgPrintArgs, a_rcRet)
+#define MY_CHECK(a_Check, a_DbgPrintArgs) \
+        AssertMsgStmt(a_Check, a_DbgPrintArgs, DbgPrint a_DbgPrintArgs; rc = VERR_INTERNAL_ERROR_4 )
+
+    /*
+     * API combination checks.
+     */
+    MY_CHECK_RETURN(!g_pfnrtKeSetTargetProcessorDpcEx || g_pfnrtKeGetProcessorNumberFromIndex,
+                    ("IPRT: Fatal: Missing KeSetTargetProcessorDpcEx without KeGetProcessorNumberFromIndex!\n"),
+                    VERR_SYMBOL_NOT_FOUND);
+
+    /*
+     * Get max number of processor groups.
+     *
+     * We may need to upadjust this number below, because windows likes to keep
+     * all options open when it comes to hotplugged CPU group assignments.  A
+     * server advertising up to 64 CPUs in the ACPI table will get a result of
+     * 64 from KeQueryMaximumGroupCount.  That makes sense.  However, when windows
+     * server 2012 does a two processor group setup for it, the sum of the
+     * GroupInfo[*].MaximumProcessorCount members below is 128.  This is probably
+     * because windows doesn't want to make decisions grouping of hotpluggable CPUs.
+     * So, we need to bump the maximum count to 128 below do deal with this as we
+     * want to have valid CPU set indexes for all potential CPUs - how could we
+     * otherwise use the RTMpGetSet() result and also RTCpuSetCount(RTMpGetSet())
+     * should equal RTMpGetCount().
+     */
+    if (g_pfnrtKeQueryMaximumGroupCount)
+    {
+        g_cRtMpNtMaxGroups = g_pfnrtKeQueryMaximumGroupCount();
+        MY_CHECK_RETURN(g_cRtMpNtMaxGroups <= RTCPUSET_MAX_CPUS && g_cRtMpNtMaxGroups > 0,
+                        ("IPRT: Fatal: g_cRtMpNtMaxGroups=%u, max %u\n", g_cRtMpNtMaxGroups, RTCPUSET_MAX_CPUS),
+                        VERR_MP_TOO_MANY_CPUS);
+    }
+    else
+        g_cRtMpNtMaxGroups = 1;
+
+    /*
+     * Get max number CPUs.
+     * This also defines the range of NT CPU indexes, RTCPUID and index into RTCPUSET.
+     */
+    if (g_pfnrtKeQueryMaximumProcessorCountEx)
+    {
+        g_cRtMpNtMaxCpus = g_pfnrtKeQueryMaximumProcessorCountEx(ALL_PROCESSOR_GROUPS);
+        MY_CHECK_RETURN(g_cRtMpNtMaxCpus <= RTCPUSET_MAX_CPUS && g_cRtMpNtMaxCpus > 0,
+                        ("IPRT: Fatal: g_cRtMpNtMaxCpus=%u, max %u [KeQueryMaximumProcessorCountEx]\n",
+                         g_cRtMpNtMaxGroups, RTCPUSET_MAX_CPUS),
+                        VERR_MP_TOO_MANY_CPUS);
+    }
+    else if (g_pfnrtKeQueryMaximumProcessorCount)
+    {
+        g_cRtMpNtMaxCpus = g_pfnrtKeQueryMaximumProcessorCount();
+        MY_CHECK_RETURN(g_cRtMpNtMaxCpus <= RTCPUSET_MAX_CPUS && g_cRtMpNtMaxCpus > 0,
+                        ("IPRT: Fatal: g_cRtMpNtMaxCpus=%u, max %u [KeQueryMaximumProcessorCount]\n",
+                         g_cRtMpNtMaxGroups, RTCPUSET_MAX_CPUS),
+                        VERR_MP_TOO_MANY_CPUS);
+    }
+    else if (g_pfnrtKeQueryActiveProcessors)
+    {
+        KAFFINITY fActiveProcessors = g_pfnrtKeQueryActiveProcessors();
+        MY_CHECK_RETURN(fActiveProcessors != 0,
+                        ("IPRT: Fatal: KeQueryActiveProcessors returned 0!\n"),
+                        VERR_INTERNAL_ERROR_2);
+        g_cRtMpNtMaxCpus = 0;
+        do
+        {
+            g_cRtMpNtMaxCpus++;
+            fActiveProcessors >>= 1;
+        } while (fActiveProcessors);
+    }
+    else
+        g_cRtMpNtMaxCpus = KeNumberProcessors;
+
+    /*
+     * Just because we're a bit paranoid about getting something wrong wrt to the
+     * kernel interfaces, we try 16 times to get the KeQueryActiveProcessorCountEx
+     * and KeQueryLogicalProcessorRelationship information to match up.
+     */
+    for (unsigned cTries = 0;; cTries++)
+    {
+        /*
+         * Get number of active CPUs.
+         */
+        if (g_pfnrtKeQueryActiveProcessorCountEx)
+        {
+            g_cRtMpNtActiveCpus = g_pfnrtKeQueryActiveProcessorCountEx(ALL_PROCESSOR_GROUPS);
+            MY_CHECK_RETURN(g_cRtMpNtActiveCpus <= g_cRtMpNtMaxCpus && g_cRtMpNtActiveCpus > 0,
+                            ("IPRT: Fatal: g_cRtMpNtMaxGroups=%u, max %u [KeQueryActiveProcessorCountEx]\n",
+                             g_cRtMpNtMaxGroups, g_cRtMpNtMaxCpus),
+                            VERR_MP_TOO_MANY_CPUS);
+        }
+        else if (g_pfnrtKeQueryActiveProcessorCount)
+        {
+            g_cRtMpNtActiveCpus = g_pfnrtKeQueryActiveProcessorCount(NULL);
+            MY_CHECK_RETURN(g_cRtMpNtActiveCpus <= g_cRtMpNtMaxCpus && g_cRtMpNtActiveCpus > 0,
+                            ("IPRT: Fatal: g_cRtMpNtMaxGroups=%u, max %u [KeQueryActiveProcessorCount]\n",
+                             g_cRtMpNtMaxGroups, g_cRtMpNtMaxCpus),
+                            VERR_MP_TOO_MANY_CPUS);
+        }
+        else
+            g_cRtMpNtActiveCpus = g_cRtMpNtMaxCpus;
+
+        /*
+         * Query the details for the groups to figure out which CPUs are online as
+         * well as the NT index limit.
+         */
+        for (unsigned i = 0; i < RT_ELEMENTS(g_aidRtMpNtByCpuSetIdx); i++)
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+            g_aidRtMpNtByCpuSetIdx[i] = NIL_RTCPUID;
+#else
+            g_aidRtMpNtByCpuSetIdx[i] = i < g_cRtMpNtMaxCpus ? i : NIL_RTCPUID;
+#endif
+        for (unsigned idxGroup = 0; idxGroup < RT_ELEMENTS(g_aRtMpNtCpuGroups); idxGroup++)
+        {
+            g_aRtMpNtCpuGroups[idxGroup].cMaxCpus    = 0;
+            g_aRtMpNtCpuGroups[idxGroup].cActiveCpus = 0;
+            for (unsigned idxMember = 0; idxMember < RT_ELEMENTS(g_aRtMpNtCpuGroups[idxGroup].aidxCpuSetMembers); idxMember++)
+                g_aRtMpNtCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = -1;
+        }
+
+        if (g_pfnrtKeQueryLogicalProcessorRelationship)
+        {
+            MY_CHECK_RETURN(g_pfnrtKeGetProcessorIndexFromNumber,
+                            ("IPRT: Fatal: Found KeQueryLogicalProcessorRelationship but not KeGetProcessorIndexFromNumber!\n"),
+                            VERR_SYMBOL_NOT_FOUND);
+            MY_CHECK_RETURN(g_pfnrtKeGetProcessorNumberFromIndex,
+                            ("IPRT: Fatal: Found KeQueryLogicalProcessorRelationship but not KeGetProcessorIndexFromNumber!\n"),
+                            VERR_SYMBOL_NOT_FOUND);
+            MY_CHECK_RETURN(g_pfnrtKeSetTargetProcessorDpcEx,
+                            ("IPRT: Fatal: Found KeQueryLogicalProcessorRelationship but not KeSetTargetProcessorDpcEx!\n"),
+                            VERR_SYMBOL_NOT_FOUND);
+
+            SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pInfo = NULL;
+            int rc = rtR0NtInitQueryGroupRelations(&pInfo);
+            if (RT_FAILURE(rc))
+                return rc;
+
+            MY_CHECK(pInfo->Group.MaximumGroupCount == g_cRtMpNtMaxGroups,
+                     ("IPRT: Fatal: MaximumGroupCount=%u != g_cRtMpNtMaxGroups=%u!\n",
+                      pInfo->Group.MaximumGroupCount, g_cRtMpNtMaxGroups));
+            MY_CHECK(pInfo->Group.ActiveGroupCount > 0 && pInfo->Group.ActiveGroupCount <= g_cRtMpNtMaxGroups,
+                     ("IPRT: Fatal: ActiveGroupCount=%u != g_cRtMpNtMaxGroups=%u!\n",
+                      pInfo->Group.ActiveGroupCount, g_cRtMpNtMaxGroups));
+
+            /*
+             * First we need to recalc g_cRtMpNtMaxCpus (see above).
+             */
+            uint32_t cMaxCpus = 0;
+            uint32_t idxGroup;
+            for (idxGroup = 0; RT_SUCCESS(rc) && idxGroup < pInfo->Group.ActiveGroupCount; idxGroup++)
+            {
+                const PROCESSOR_GROUP_INFO *pGrpInfo = &pInfo->Group.GroupInfo[idxGroup];
+                MY_CHECK_BREAK(pGrpInfo->MaximumProcessorCount <= MAXIMUM_PROC_PER_GROUP,
+                               ("IPRT: Fatal: MaximumProcessorCount=%u\n", pGrpInfo->MaximumProcessorCount));
+                MY_CHECK_BREAK(pGrpInfo->ActiveProcessorCount <= pGrpInfo->MaximumProcessorCount,
+                               ("IPRT: Fatal: ActiveProcessorCount=%u > MaximumProcessorCount=%u\n",
+                                pGrpInfo->ActiveProcessorCount, pGrpInfo->MaximumProcessorCount));
+                cMaxCpus += pGrpInfo->MaximumProcessorCount;
+            }
+            if (cMaxCpus > g_cRtMpNtMaxCpus && RT_SUCCESS(rc))
+            {
+                DbgPrint("IPRT: g_cRtMpNtMaxCpus=%u -> %u\n", g_cRtMpNtMaxCpus, cMaxCpus);
+#ifndef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+                uint32_t i = RT_MIN(cMaxCpus, RT_ELEMENTS(g_aidRtMpNtByCpuSetIdx));
+                while (i-- > g_cRtMpNtMaxCpus)
+                    g_aidRtMpNtByCpuSetIdx[i] = i;
+#endif
+                g_cRtMpNtMaxCpus = cMaxCpus;
+                if (g_cRtMpNtMaxGroups > RTCPUSET_MAX_CPUS)
+                {
+                    MY_CHECK(g_cRtMpNtMaxGroups <= RTCPUSET_MAX_CPUS && g_cRtMpNtMaxGroups > 0,
+                             ("IPRT: Fatal: g_cRtMpNtMaxGroups=%u, max %u\n", g_cRtMpNtMaxGroups, RTCPUSET_MAX_CPUS));
+                    rc = VERR_MP_TOO_MANY_CPUS;
+                }
+            }
+
+            /*
+             * Calc online mask, partition IDs and such.
+             *
+             * Also check ASSUMPTIONS:
+             *
+             *      1. Processor indexes going from 0 and up to
+             *         KeQueryMaximumProcessorCountEx(ALL_PROCESSOR_GROUPS) - 1.
+             *
+             *      2. Currently valid processor indexes, i.e. accepted by
+             *         KeGetProcessorIndexFromNumber & KeGetProcessorNumberFromIndex, goes
+             *         from 0 thru KeQueryActiveProcessorCountEx(ALL_PROCESSOR_GROUPS) - 1.
+             *
+             *      3. PROCESSOR_GROUP_INFO::MaximumProcessorCount gives the number of
+             *         relevant bits in the ActiveProcessorMask (from LSB).
+             *
+             *      4. Active processor count found in KeQueryLogicalProcessorRelationship
+             *         output matches what KeQueryActiveProcessorCountEx(ALL) returns.
+             *
+             *      5. Active + inactive processor counts in same does not exceed
+             *         KeQueryMaximumProcessorCountEx(ALL).
+             *
+             * Note! Processor indexes are assigned as CPUs come online and are not
+             *       preallocated according to group maximums.  Since CPUS are only taken
+             *       online and never offlined, this means that internal CPU bitmaps are
+             *       never sparse and no time is wasted scanning unused bits.
+             *
+             *       Unfortunately, it means that ring-3 cannot easily guess the index
+             *       assignments when hotswapping is used, and must use GIP when available.
+             */
+            RTCpuSetEmpty(&g_rtMpNtCpuSet);
+            uint32_t cInactive = 0;
+            uint32_t cActive   = 0;
+            uint32_t idxCpuMax = 0;
+            uint32_t idxCpuSetNextInactive = g_cRtMpNtMaxCpus - 1;
+            for (idxGroup = 0; RT_SUCCESS(rc) && idxGroup < pInfo->Group.ActiveGroupCount; idxGroup++)
+            {
+                const PROCESSOR_GROUP_INFO *pGrpInfo = &pInfo->Group.GroupInfo[idxGroup];
+                MY_CHECK_BREAK(pGrpInfo->MaximumProcessorCount <= MAXIMUM_PROC_PER_GROUP,
+                               ("IPRT: Fatal: MaximumProcessorCount=%u\n", pGrpInfo->MaximumProcessorCount));
+                MY_CHECK_BREAK(pGrpInfo->ActiveProcessorCount <= pGrpInfo->MaximumProcessorCount,
+                               ("IPRT: Fatal: ActiveProcessorCount=%u > MaximumProcessorCount=%u\n",
+                                pGrpInfo->ActiveProcessorCount, pGrpInfo->MaximumProcessorCount));
+
+                g_aRtMpNtCpuGroups[idxGroup].cMaxCpus    = pGrpInfo->MaximumProcessorCount;
+                g_aRtMpNtCpuGroups[idxGroup].cActiveCpus = pGrpInfo->ActiveProcessorCount;
+
+                for (uint32_t idxMember = 0; idxMember < pGrpInfo->MaximumProcessorCount; idxMember++)
+                {
+                    PROCESSOR_NUMBER ProcNum;
+                    ProcNum.Group    = (USHORT)idxGroup;
+                    ProcNum.Number   = (UCHAR)idxMember;
+                    ProcNum.Reserved = 0;
+                    ULONG idxCpu = g_pfnrtKeGetProcessorIndexFromNumber(&ProcNum);
+                    if (idxCpu != INVALID_PROCESSOR_INDEX)
+                    {
+                        MY_CHECK_BREAK(idxCpu < g_cRtMpNtMaxCpus && idxCpu < RTCPUSET_MAX_CPUS, /* ASSUMPTION #1 */
+                                       ("IPRT: Fatal: idxCpu=%u >= g_cRtMpNtMaxCpus=%u (RTCPUSET_MAX_CPUS=%u)\n",
+                                        idxCpu, g_cRtMpNtMaxCpus, RTCPUSET_MAX_CPUS));
+                        if (idxCpu > idxCpuMax)
+                            idxCpuMax = idxCpu;
+                        g_aRtMpNtCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = idxCpu;
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+                        g_aidRtMpNtByCpuSetIdx[idxCpu] = RTMPCPUID_FROM_GROUP_AND_NUMBER(idxGroup, idxMember);
+#endif
+
+                        ProcNum.Group    = UINT16_MAX;
+                        ProcNum.Number   = UINT8_MAX;
+                        ProcNum.Reserved = UINT8_MAX;
+                        NTSTATUS rcNt = g_pfnrtKeGetProcessorNumberFromIndex(idxCpu, &ProcNum);
+                        MY_CHECK_BREAK(NT_SUCCESS(rcNt),
+                                       ("IPRT: Fatal: KeGetProcessorNumberFromIndex(%u,) -> %#x!\n", idxCpu, rcNt));
+                        MY_CHECK_BREAK(ProcNum.Group == idxGroup && ProcNum.Number == idxMember,
+                                       ("IPRT: Fatal: KeGetProcessorXxxxFromYyyy roundtrip error for %#x! Group: %u vs %u, Number: %u vs %u\n",
+                                        idxCpu, ProcNum.Group, idxGroup, ProcNum.Number, idxMember));
+
+                        if (pGrpInfo->ActiveProcessorMask & RT_BIT_64(idxMember))
+                        {
+                            RTCpuSetAddByIndex(&g_rtMpNtCpuSet, idxCpu);
+                            cActive++;
+                        }
+                        else
+                            cInactive++; /* (This is a little unexpected, but not important as long as things add up below.) */
+                    }
+                    else
+                    {
+                        /* Must be not present / inactive when KeGetProcessorIndexFromNumber fails. */
+                        MY_CHECK_BREAK(!(pGrpInfo->ActiveProcessorMask & RT_BIT_64(idxMember)),
+                                       ("IPRT: Fatal: KeGetProcessorIndexFromNumber(%u/%u) failed but CPU is active! cMax=%u cActive=%u fActive=%p\n",
+                                        idxGroup, idxMember, pGrpInfo->MaximumProcessorCount, pGrpInfo->ActiveProcessorCount,
+                                        pGrpInfo->ActiveProcessorMask));
+                        cInactive++;
+                        if (idxCpuSetNextInactive >= g_cRtMpNtActiveCpus)
+                        {
+                            g_aRtMpNtCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = idxCpuSetNextInactive;
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+                            g_aidRtMpNtByCpuSetIdx[idxCpuSetNextInactive] = RTMPCPUID_FROM_GROUP_AND_NUMBER(idxGroup, idxMember)
+                                                                          | RTMPNT_ID_F_INACTIVE;
+#endif
+                            idxCpuSetNextInactive--;
+                        }
+                    }
+                }
+            }
+
+            MY_CHECK(cInactive + cActive <= g_cRtMpNtMaxCpus, /* ASSUMPTION #5 (not '==' because of inactive groups) */
+                     ("IPRT: Fatal: cInactive=%u + cActive=%u > g_cRtMpNtMaxCpus=%u\n", cInactive, cActive, g_cRtMpNtMaxCpus));
+
+            /* Deal with inactive groups using KeQueryMaximumProcessorCountEx or as
+               best as we can by as best we can by stipulating maximum member counts
+               from the previous group. */
+            if (   RT_SUCCESS(rc)
+                && idxGroup < pInfo->Group.MaximumGroupCount)
+            {
+                uint16_t cInactiveLeft = g_cRtMpNtMaxCpus - (cInactive + cActive);
+                while (idxGroup < pInfo->Group.MaximumGroupCount)
+                {
+                    uint32_t cMaxMembers = 0;
+                    if (g_pfnrtKeQueryMaximumProcessorCountEx)
+                        cMaxMembers = g_pfnrtKeQueryMaximumProcessorCountEx(idxGroup);
+                    if (cMaxMembers != 0 || cInactiveLeft == 0)
+                        AssertStmt(cMaxMembers <= cInactiveLeft, cMaxMembers = cInactiveLeft);
+                    else
+                    {
+                        uint16_t cGroupsLeft = pInfo->Group.MaximumGroupCount - idxGroup;
+                        cMaxMembers = pInfo->Group.GroupInfo[idxGroup - 1].MaximumProcessorCount;
+                        while (cMaxMembers * cGroupsLeft < cInactiveLeft)
+                            cMaxMembers++;
+                        if (cMaxMembers > cInactiveLeft)
+                            cMaxMembers = cInactiveLeft;
+                    }
+
+                    g_aRtMpNtCpuGroups[idxGroup].cMaxCpus    = (uint16_t)cMaxMembers;
+                    g_aRtMpNtCpuGroups[idxGroup].cActiveCpus = 0;
+                    for (uint16_t idxMember = 0; idxMember < cMaxMembers; idxMember++)
+                        if (idxCpuSetNextInactive >= g_cRtMpNtActiveCpus)
+                        {
+                            g_aRtMpNtCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = idxCpuSetNextInactive;
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+                            g_aidRtMpNtByCpuSetIdx[idxCpuSetNextInactive] = RTMPCPUID_FROM_GROUP_AND_NUMBER(idxGroup, idxMember)
+                                                                          | RTMPNT_ID_F_INACTIVE;
+#endif
+                            idxCpuSetNextInactive--;
+                        }
+                    cInactiveLeft -= cMaxMembers;
+                    idxGroup++;
+                }
+            }
+
+            /* We're done with pInfo now, free it so we can start returning when assertions fail. */
+            RTMemFree(pInfo);
+            if (RT_FAILURE(rc)) /* MY_CHECK_BREAK sets rc. */
+                return rc;
+            MY_CHECK_RETURN(cActive >= g_cRtMpNtActiveCpus,
+                            ("IPRT: Fatal: cActive=%u < g_cRtMpNtActiveCpus=%u - CPUs removed?\n", cActive, g_cRtMpNtActiveCpus),
+                            VERR_INTERNAL_ERROR_3);
+            MY_CHECK_RETURN(idxCpuMax < cActive, /* ASSUMPTION #2 */
+                            ("IPRT: Fatal: idCpuMax=%u >= cActive=%u! Unexpected CPU index allocation. CPUs removed?\n",
+                             idxCpuMax, cActive),
+                            VERR_INTERNAL_ERROR_4);
+
+            /* Retry if CPUs were added. */
+            if (   cActive != g_cRtMpNtActiveCpus
+                && cTries < 16)
+                continue;
+            MY_CHECK_RETURN(cActive == g_cRtMpNtActiveCpus, /* ASSUMPTION #4 */
+                            ("IPRT: Fatal: cActive=%u != g_cRtMpNtActiveCpus=%u\n", cActive, g_cRtMpNtActiveCpus),
+                            VERR_INTERNAL_ERROR_5);
+        }
+        else
+        {
+            /* Legacy: */
+            MY_CHECK_RETURN(g_cRtMpNtMaxGroups == 1, ("IPRT: Fatal: Missing KeQueryLogicalProcessorRelationship!\n"),
+                            VERR_SYMBOL_NOT_FOUND);
+
+            /** @todo Is it possible that the affinity mask returned by
+             *        KeQueryActiveProcessors is sparse? */
+            if (g_pfnrtKeQueryActiveProcessors)
+                RTCpuSetFromU64(&g_rtMpNtCpuSet, g_pfnrtKeQueryActiveProcessors());
+            else if (g_cRtMpNtMaxCpus < 64)
+                RTCpuSetFromU64(&g_rtMpNtCpuSet, (UINT64_C(1) << g_cRtMpNtMaxCpus) - 1);
+            else
+            {
+                MY_CHECK_RETURN(g_cRtMpNtMaxCpus == 64, ("IPRT: Fatal: g_cRtMpNtMaxCpus=%u, expect 64 or less\n", g_cRtMpNtMaxCpus),
+                                VERR_MP_TOO_MANY_CPUS);
+                RTCpuSetFromU64(&g_rtMpNtCpuSet, UINT64_MAX);
+            }
+
+            g_aRtMpNtCpuGroups[0].cMaxCpus    = g_cRtMpNtMaxCpus;
+            g_aRtMpNtCpuGroups[0].cActiveCpus = g_cRtMpNtMaxCpus;
+            for (unsigned i = 0; i < g_cRtMpNtMaxCpus; i++)
+            {
+                g_aRtMpNtCpuGroups[0].aidxCpuSetMembers[i] = i;
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+                g_aidRtMpNtByCpuSetIdx[i] = RTMPCPUID_FROM_GROUP_AND_NUMBER(0, i);
+#endif
+            }
+        }
+
+        /*
+         * Register CPU hot plugging callback (it also counts active CPUs).
+         */
+        Assert(g_pvMpCpuChangeCallback == NULL);
+        if (g_pfnrtKeRegisterProcessorChangeCallback)
+        {
+            MY_CHECK_RETURN(g_pfnrtKeDeregisterProcessorChangeCallback,
+                            ("IPRT: Fatal: KeRegisterProcessorChangeCallback without KeDeregisterProcessorChangeCallback!\n"),
+                            VERR_SYMBOL_NOT_FOUND);
+
+            RTCPUSET const ActiveSetCopy = g_rtMpNtCpuSet;
+            RTCpuSetEmpty(&g_rtMpNtCpuSet);
+            uint32_t const cActiveCpus   = g_cRtMpNtActiveCpus;
+            g_cRtMpNtActiveCpus = 0;
+
+            g_pvMpCpuChangeCallback = g_pfnrtKeRegisterProcessorChangeCallback(rtR0NtMpProcessorChangeCallback, NULL /*pvUser*/,
+                                                                               KE_PROCESSOR_CHANGE_ADD_EXISTING);
+            if (g_pvMpCpuChangeCallback)
+            {
+                if (cActiveCpus == g_cRtMpNtActiveCpus)
+                { /* likely */ }
+                else
+                {
+                    g_pfnrtKeDeregisterProcessorChangeCallback(g_pvMpCpuChangeCallback);
+                    if (cTries < 16)
+                    {
+                        /* Retry if CPUs were added. */
+                        MY_CHECK_RETURN(g_cRtMpNtActiveCpus >= cActiveCpus,
+                                        ("IPRT: Fatal: g_cRtMpNtActiveCpus=%u < cActiveCpus=%u! CPUs removed?\n",
+                                         g_cRtMpNtActiveCpus, cActiveCpus),
+                                        VERR_INTERNAL_ERROR_2);
+                        MY_CHECK_RETURN(g_cRtMpNtActiveCpus <= g_cRtMpNtMaxCpus,
+                                        ("IPRT: Fatal: g_cRtMpNtActiveCpus=%u > g_cRtMpNtMaxCpus=%u!\n",
+                                         g_cRtMpNtActiveCpus, g_cRtMpNtMaxCpus),
+                                        VERR_INTERNAL_ERROR_2);
+                        continue;
+                    }
+                    MY_CHECK_RETURN(0, ("IPRT: Fatal: g_cRtMpNtActiveCpus=%u cActiveCpus=%u\n", g_cRtMpNtActiveCpus, cActiveCpus),
+                                    VERR_INTERNAL_ERROR_3);
+                }
+            }
+            else
+            {
+                AssertFailed();
+                g_rtMpNtCpuSet      = ActiveSetCopy;
+                g_cRtMpNtActiveCpus = cActiveCpus;
+            }
+        }
+        break;
+    } /* Retry loop for stable active CPU count. */
+
+#undef MY_CHECK_RETURN
+
+    /*
+     * Special IPI fun for RTMpPokeCpu.
+     *
+     * On Vista and later the DPC method doesn't seem to reliably send IPIs,
+     * so we have to use alternative methods.
+     *
+     * On AMD64 We used to use the HalSendSoftwareInterrupt API (also x86 on
+     * W10+), it looks faster and more convenient to use, however we're either
+     * using it wrong or it doesn't reliably do what we want (see @bugref{8343}).
+     *
+     * The HalRequestIpip API is thus far the only alternative to KeInsertQueueDpc
+     * for doing targetted IPIs.  Trouble with this API is that it changed
+     * fundamentally in Window 7 when they added support for lots of processors.
+     *
+     * If we really think we cannot use KeInsertQueueDpc, we use the broadcast IPI
+     * API KeIpiGenericCall.
+     */
+    if (   pOsVerInfo->uMajorVer > 6
+        || (pOsVerInfo->uMajorVer == 6 && pOsVerInfo->uMinorVer > 0))
+        g_pfnrtHalRequestIpiPreW7 = NULL;
+    else
+        g_pfnrtHalRequestIpiW7Plus = NULL;
+
+    g_pfnrtMpPokeCpuWorker = rtMpPokeCpuUsingDpc;
+#ifndef IPRT_TARGET_NT4
+    if (   g_pfnrtHalRequestIpiW7Plus
+        && g_pfnrtKeInitializeAffinityEx
+        && g_pfnrtKeAddProcessorAffinityEx
+        && g_pfnrtKeGetProcessorIndexFromNumber)
+    {
+        DbgPrint("IPRT: RTMpPoke => rtMpPokeCpuUsingHalReqestIpiW7Plus\n");
+        g_pfnrtMpPokeCpuWorker = rtMpPokeCpuUsingHalReqestIpiW7Plus;
+    }
+    else if (pOsVerInfo->uMajorVer >= 6 && g_pfnrtKeIpiGenericCall)
+    {
+        DbgPrint("IPRT: RTMpPoke => rtMpPokeCpuUsingBroadcastIpi\n");
+        g_pfnrtMpPokeCpuWorker = rtMpPokeCpuUsingBroadcastIpi;
+    }
+    else
+        DbgPrint("IPRT: RTMpPoke => rtMpPokeCpuUsingDpc\n");
+    /* else: Windows XP should send always send an IPI -> VERIFY */
+#endif
+
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Called by rtR0TermNative.
+ */
+DECLHIDDEN(void) rtR0MpNtTerm(void)
+{
+    /*
+     * Deregister the processor change callback.
+     */
+    PVOID pvMpCpuChangeCallback = g_pvMpCpuChangeCallback;
+    g_pvMpCpuChangeCallback = NULL;
+    if (pvMpCpuChangeCallback)
+    {
+        AssertReturnVoid(g_pfnrtKeDeregisterProcessorChangeCallback);
+        g_pfnrtKeDeregisterProcessorChangeCallback(pvMpCpuChangeCallback);
+    }
+}
+
+
+DECLHIDDEN(int) rtR0MpNotificationNativeInit(void)
+{
+    return VINF_SUCCESS;
+}
+
+
+DECLHIDDEN(void) rtR0MpNotificationNativeTerm(void)
+{
+}
+
+
+/**
+ * Implements the NT PROCESSOR_CALLBACK_FUNCTION callback function.
+ *
+ * This maintains the g_rtMpNtCpuSet and works MP notification callbacks.  When
+ * registered, it's called for each active CPU in the system, avoiding racing
+ * CPU hotplugging (as well as testing the callback).
+ *
+ * @param   pvUser              User context (not used).
+ * @param   pChangeCtx          Change context (in).
+ * @param   prcOperationStatus  Operation status (in/out).
+ *
+ * @remarks ASSUMES no concurrent execution of KeProcessorAddCompleteNotify
+ *          notification callbacks.  At least during callback registration
+ *          callout, we're owning KiDynamicProcessorLock.
+ *
+ * @remarks When registering the handler, we first get KeProcessorAddStartNotify
+ *          callbacks for all active CPUs, and after they all succeed we get the
+ *          KeProcessorAddCompleteNotify callbacks.
+ */
+static VOID __stdcall rtR0NtMpProcessorChangeCallback(void *pvUser, PKE_PROCESSOR_CHANGE_NOTIFY_CONTEXT pChangeCtx,
+                                                      PNTSTATUS prcOperationStatus)
+{
+    RT_NOREF(pvUser, prcOperationStatus);
+    switch (pChangeCtx->State)
+    {
+        /*
+         * Check whether we can deal with the CPU, failing the start operation if we
+         * can't.  The checks we are doing here are to avoid complicated/impossible
+         * cases in KeProcessorAddCompleteNotify.  They are really just verify specs.
+         */
+        case KeProcessorAddStartNotify:
+        {
+            NTSTATUS rcNt = STATUS_SUCCESS;
+            if (pChangeCtx->NtNumber < RTCPUSET_MAX_CPUS)
+            {
+                if (pChangeCtx->NtNumber >= g_cRtMpNtMaxCpus)
+                {
+                    DbgPrint("IPRT: KeProcessorAddStartNotify failure: NtNumber=%u is higher than the max CPU count (%u)!\n",
+                             pChangeCtx->NtNumber, g_cRtMpNtMaxCpus);
+                    rcNt = STATUS_INTERNAL_ERROR;
+                }
+
+                /* The ProcessNumber field was introduced in Windows 7. */
+                PROCESSOR_NUMBER ProcNum;
+                if (g_pfnrtKeGetProcessorIndexFromNumber)
+                {
+                    ProcNum = pChangeCtx->ProcNumber;
+                    KEPROCESSORINDEX idxCpu = g_pfnrtKeGetProcessorIndexFromNumber(&ProcNum);
+                    if (idxCpu != pChangeCtx->NtNumber)
+                    {
+                        DbgPrint("IPRT: KeProcessorAddStartNotify failure: g_pfnrtKeGetProcessorIndexFromNumber(%u.%u) -> %u, expected %u!\n",
+                                 ProcNum.Group, ProcNum.Number, idxCpu, pChangeCtx->NtNumber);
+                        rcNt = STATUS_INTERNAL_ERROR;
+                    }
+                }
+                else
+                {
+                    ProcNum.Group  = 0;
+                    ProcNum.Number = pChangeCtx->NtNumber;
+                }
+
+                if (   ProcNum.Group  < RT_ELEMENTS(g_aRtMpNtCpuGroups)
+                    && ProcNum.Number < RT_ELEMENTS(g_aRtMpNtCpuGroups[0].aidxCpuSetMembers))
+                {
+                    if (ProcNum.Group >= g_cRtMpNtMaxGroups)
+                    {
+                        DbgPrint("IPRT: KeProcessorAddStartNotify failure: %u.%u is out of range - max groups: %u!\n",
+                                 ProcNum.Group, ProcNum.Number, g_cRtMpNtMaxGroups);
+                        rcNt = STATUS_INTERNAL_ERROR;
+                    }
+
+                    if (ProcNum.Number < g_aRtMpNtCpuGroups[ProcNum.Group].cMaxCpus)
+                    {
+                        Assert(g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number] != -1);
+                        if (g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number] == -1)
+                        {
+                            DbgPrint("IPRT: KeProcessorAddStartNotify failure: Internal error! %u.%u was assigned -1 as set index!\n",
+                                     ProcNum.Group, ProcNum.Number);
+                            rcNt = STATUS_INTERNAL_ERROR;
+                        }
+
+                        Assert(g_aidRtMpNtByCpuSetIdx[pChangeCtx->NtNumber] != NIL_RTCPUID);
+                        if (g_aidRtMpNtByCpuSetIdx[pChangeCtx->NtNumber] == NIL_RTCPUID)
+                        {
+                            DbgPrint("IPRT: KeProcessorAddStartNotify failure: Internal error! %u (%u.%u) translates to NIL_RTCPUID!\n",
+                                     pChangeCtx->NtNumber, ProcNum.Group, ProcNum.Number);
+                            rcNt = STATUS_INTERNAL_ERROR;
+                        }
+                    }
+                    else
+                    {
+                        DbgPrint("IPRT: KeProcessorAddStartNotify failure: max processors in group %u is %u, cannot add %u to it!\n",
+                                 ProcNum.Group, g_aRtMpNtCpuGroups[ProcNum.Group].cMaxCpus, ProcNum.Group, ProcNum.Number);
+                        rcNt = STATUS_INTERNAL_ERROR;
+                    }
+                }
+                else
+                {
+                    DbgPrint("IPRT: KeProcessorAddStartNotify failure: %u.%u is out of range (max %u.%u)!\n",
+                             ProcNum.Group, ProcNum.Number, RT_ELEMENTS(g_aRtMpNtCpuGroups), RT_ELEMENTS(g_aRtMpNtCpuGroups[0].aidxCpuSetMembers));
+                    rcNt = STATUS_INTERNAL_ERROR;
+                }
+            }
+            else
+            {
+                DbgPrint("IPRT: KeProcessorAddStartNotify failure: NtNumber=%u is outside RTCPUSET_MAX_CPUS (%u)!\n",
+                         pChangeCtx->NtNumber, RTCPUSET_MAX_CPUS);
+                rcNt = STATUS_INTERNAL_ERROR;
+            }
+            if (!NT_SUCCESS(rcNt))
+                *prcOperationStatus = rcNt;
+            break;
+        }
+
+        /*
+         * Update the globals.  Since we've checked out range limits and other
+         * limitations already we just AssertBreak here.
+         */
+        case KeProcessorAddCompleteNotify:
+        {
+            /*
+             * Calc the processor number and assert conditions checked in KeProcessorAddStartNotify.
+             */
+            AssertBreak(pChangeCtx->NtNumber < RTCPUSET_MAX_CPUS);
+            AssertBreak(pChangeCtx->NtNumber < g_cRtMpNtMaxCpus);
+            Assert(pChangeCtx->NtNumber == g_cRtMpNtActiveCpus); /* light assumption */
+            PROCESSOR_NUMBER ProcNum;
+            if (g_pfnrtKeGetProcessorIndexFromNumber)
+            {
+                ProcNum = pChangeCtx->ProcNumber;
+                AssertBreak(g_pfnrtKeGetProcessorIndexFromNumber(&ProcNum) == pChangeCtx->NtNumber);
+                AssertBreak(ProcNum.Group < RT_ELEMENTS(g_aRtMpNtCpuGroups));
+                AssertBreak(ProcNum.Group < g_cRtMpNtMaxGroups);
+            }
+            else
+            {
+                ProcNum.Group  = 0;
+                ProcNum.Number = pChangeCtx->NtNumber;
+            }
+            AssertBreak(ProcNum.Number < RT_ELEMENTS(g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers));
+            AssertBreak(ProcNum.Number < g_aRtMpNtCpuGroups[ProcNum.Group].cMaxCpus);
+            AssertBreak(g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number] != -1);
+            AssertBreak(g_aidRtMpNtByCpuSetIdx[pChangeCtx->NtNumber] != NIL_RTCPUID);
+
+            /*
+             * Add ourselves to the online CPU set and update the active CPU count.
+             */
+            RTCpuSetAddByIndex(&g_rtMpNtCpuSet, pChangeCtx->NtNumber);
+            ASMAtomicIncU32(&g_cRtMpNtActiveCpus);
+
+            /*
+             * Update the group info.
+             *
+             * If the index prediction failed (real hotplugging callbacks only) we
+             * have to switch it around.  This is particularly annoying when we
+             * use the index as the ID.
+             */
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+            RTCPUID idCpu = RTMPCPUID_FROM_GROUP_AND_NUMBER(ProcNum.Group, ProcNum.Number);
+            RTCPUID idOld = g_aidRtMpNtByCpuSetIdx[pChangeCtx->NtNumber];
+            if ((idOld & ~RTMPNT_ID_F_INACTIVE) != idCpu)
+            {
+                Assert(idOld & RTMPNT_ID_F_INACTIVE);
+                int idxDest = g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number];
+                g_aRtMpNtCpuGroups[rtMpCpuIdGetGroup(idOld)].aidxCpuSetMembers[rtMpCpuIdGetGroupMember(idOld)] = idxDest;
+                g_aidRtMpNtByCpuSetIdx[idxDest] = idOld;
+            }
+            g_aidRtMpNtByCpuSetIdx[pChangeCtx->NtNumber] = idCpu;
+#else
+            Assert(g_aidRtMpNtByCpuSetIdx[pChangeCtx->NtNumber] == pChangeCtx->NtNumber);
+            int idxDest = g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number];
+            if ((ULONG)idxDest != pChangeCtx->NtNumber)
+            {
+                bool     fFound = false;
+                uint32_t idxOldGroup = g_cRtMpNtMaxGroups;
+                while (idxOldGroup-- > 0 && !fFound)
+                {
+                    uint32_t idxMember = g_aRtMpNtCpuGroups[idxOldGroup].cMaxCpus;
+                    while (idxMember-- > 0)
+                        if (g_aRtMpNtCpuGroups[idxOldGroup].aidxCpuSetMembers[idxMember] == (int)pChangeCtx->NtNumber)
+                        {
+                            g_aRtMpNtCpuGroups[idxOldGroup].aidxCpuSetMembers[idxMember] = idxDest;
+                            fFound = true;
+                            break;
+                        }
+                }
+                Assert(fFound);
+            }
+#endif
+            g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number] = pChangeCtx->NtNumber;
+
+            /*
+             * Do MP notification callbacks.
+             */
+            rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, pChangeCtx->NtNumber);
+            break;
+        }
+
+        case KeProcessorAddFailureNotify:
+            /* ignore */
+            break;
+
+        default:
+            AssertMsgFailed(("State=%u\n", pChangeCtx->State));
+    }
+}
+
+
+/**
+ * Wrapper around KeQueryLogicalProcessorRelationship.
+ *
+ * @returns IPRT status code.
+ * @param   ppInfo  Where to return the info. Pass to RTMemFree when done.
+ */
+static int rtR0NtInitQueryGroupRelations(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX **ppInfo)
+{
+    ULONG    cbInfo = sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX)
+                    + g_cRtMpNtMaxGroups * sizeof(GROUP_RELATIONSHIP);
+    NTSTATUS rcNt;
+    do
+    {
+        SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pInfo = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *)RTMemAlloc(cbInfo);
+        if (pInfo)
+        {
+            rcNt = g_pfnrtKeQueryLogicalProcessorRelationship(NULL /*pProcNumber*/, RelationGroup, pInfo, &cbInfo);
+            if (NT_SUCCESS(rcNt))
+            {
+                *ppInfo = pInfo;
+                return VINF_SUCCESS;
+            }
+
+            RTMemFree(pInfo);
+            pInfo = NULL;
+        }
+        else
+            rcNt = STATUS_NO_MEMORY;
+    } while (rcNt == STATUS_INFO_LENGTH_MISMATCH);
+    DbgPrint("IPRT: Fatal: KeQueryLogicalProcessorRelationship failed: %#x\n", rcNt);
+    AssertMsgFailed(("KeQueryLogicalProcessorRelationship failed: %#x\n", rcNt));
+    return RTErrConvertFromNtStatus(rcNt);
+}
+
+
+
+
 
 RTDECL(RTCPUID) RTMpCpuId(void)
 {
     Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
+
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+    PROCESSOR_NUMBER ProcNum;
+    ProcNum.Group = 0;
+    if (g_pfnrtKeGetCurrentProcessorNumberEx)
+    {
+        ProcNum.Number = 0;
+        g_pfnrtKeGetCurrentProcessorNumberEx(&ProcNum);
+    }
+    else
+        ProcNum.Number = KeGetCurrentProcessorNumber(); /* Number is 8-bit, so we're not subject to BYTE -> WORD upgrade in WDK.  */
+    return RTMPCPUID_FROM_GROUP_AND_NUMBER(ProcNum.Group, ProcNum.Number);
+
+#else
 
     if (g_pfnrtKeGetCurrentProcessorNumberEx)
@@ -87,6 +917,6 @@
     }
 
-    /* WDK upgrade warning: PCR->Number changed from BYTE to WORD. */
-    return KeGetCurrentProcessorNumber();
+    return (uint8_t)KeGetCurrentProcessorNumber(); /* PCR->Number was changed from BYTE to WORD in the WDK, thus the cast. */
+#endif
 }
 
@@ -94,5 +924,17 @@
 RTDECL(int) RTMpCurSetIndex(void)
 {
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+    Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
+
+    if (g_pfnrtKeGetCurrentProcessorNumberEx)
+    {
+        KEPROCESSORINDEX idxCpu = g_pfnrtKeGetCurrentProcessorNumberEx(NULL);
+        Assert(idxCpu < RTCPUSET_MAX_CPUS);
+        return idxCpu;
+    }
+    return (uint8_t)KeGetCurrentProcessorNumber(); /* PCR->Number was changed from BYTE to WORD in the WDK, thus the cast. */
+#else
     return (int)RTMpCpuId();
+#endif
 }
 
@@ -100,5 +942,15 @@
 RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu)
 {
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+    Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
+
+    PROCESSOR_NUMBER ProcNum = { 0 , 0,  0 };
+    KEPROCESSORINDEX idxCpu = g_pfnrtKeGetCurrentProcessorNumberEx(&ProcNum);
+    Assert(idxCpu < RTCPUSET_MAX_CPUS);
+    *pidCpu = RTMPCPUID_FROM_GROUP_AND_NUMBER(ProcNum.Group, ProcNum.Number);
+    return idxCpu;
+#else
     return *pidCpu = RTMpCpuId();
+#endif
 }
 
@@ -106,6 +958,37 @@
 RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
 {
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+    Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
+
+    if (idCpu != NIL_RTCPUID)
+    {
+        if (g_pfnrtKeGetProcessorIndexFromNumber)
+        {
+            PROCESSOR_NUMBER ProcNum;
+            ProcNum.Group    = rtMpCpuIdGetGroup(idCpu);
+            ProcNum.Number   = rtMpCpuIdGetGroupMember(idCpu);
+            ProcNum.Reserved = 0;
+            KEPROCESSORINDEX idxCpu = g_pfnrtKeGetProcessorIndexFromNumber(&ProcNum);
+            if (idxCpu != INVALID_PROCESSOR_INDEX)
+            {
+                Assert(idxCpu < g_cRtMpNtMaxCpus);
+                Assert((ULONG)g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number] == idxCpu);
+                return idxCpu;
+            }
+
+            /* Since NT assigned indexes as the CPUs come online, we cannot produce an ID <-> index
+               mapping for not-yet-onlined CPUS that is consistent.  We just have to do our best... */
+            if (   ProcNum.Group < g_cRtMpNtMaxGroups
+                && ProcNum.Number < g_aRtMpNtCpuGroups[ProcNum.Group].cMaxCpus)
+                return g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number];
+        }
+        else if (rtMpCpuIdGetGroup(idCpu) == 0)
+            return rtMpCpuIdGetGroupMember(idCpu);
+    }
+    return -1;
+#else
     /* 1:1 mapping, just do range checks. */
     return idCpu < RTCPUSET_MAX_CPUS ? (int)idCpu : -1;
+#endif
 }
 
@@ -113,6 +996,59 @@
 RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
 {
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+    Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
+
+    if ((unsigned)iCpu < g_cRtMpNtMaxCpus)
+    {
+        if (g_pfnrtKeGetProcessorIndexFromNumber)
+        {
+            PROCESSOR_NUMBER ProcNum = { 0, 0, 0 };
+            NTSTATUS rcNt = g_pfnrtKeGetProcessorNumberFromIndex(iCpu, &ProcNum);
+            if (NT_SUCCESS(rcNt))
+            {
+                Assert(ProcNum.Group <= g_cRtMpNtMaxGroups);
+                Assert(   (g_aidRtMpNtByCpuSetIdx[iCpu] & ~RTMPNT_ID_F_INACTIVE)
+                       == RTMPCPUID_FROM_GROUP_AND_NUMBER(ProcNum.Group, ProcNum.Number));
+                return RTMPCPUID_FROM_GROUP_AND_NUMBER(ProcNum.Group, ProcNum.Number);
+            }
+        }
+        return g_aidRtMpNtByCpuSetIdx[iCpu];
+    }
+    return NIL_RTCPUID;
+#else
     /* 1:1 mapping, just do range checks. */
     return (unsigned)iCpu < RTCPUSET_MAX_CPUS ? iCpu : NIL_RTCPUID;
+#endif
+}
+
+
+RTDECL(int) RTMpSetIndexFromCpuGroupMember(uint32_t idxGroup, uint32_t idxMember)
+{
+    Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
+
+    if (idxGroup < g_cRtMpNtMaxGroups)
+        if (idxMember < g_aRtMpNtCpuGroups[idxGroup].cMaxCpus)
+            return g_aRtMpNtCpuGroups[idxGroup].aidxCpuSetMembers[idxMember];
+    return -1;
+}
+
+
+RTDECL(uint32_t) RTMpGetCpuGroupCounts(uint32_t idxGroup, uint32_t *pcActive)
+{
+    if (idxGroup < g_cRtMpNtMaxGroups)
+    {
+        if (pcActive)
+            *pcActive = g_aRtMpNtCpuGroups[idxGroup].cActiveCpus;
+        return g_aRtMpNtCpuGroups[idxGroup].cMaxCpus;
+    }
+    if (pcActive)
+        *pcActive = 0;
+    return 0;
+}
+
+
+RTDECL(uint32_t) RTMpGetMaxCpuGroupCount(void)
+{
+    return g_cRtMpNtMaxGroups;
 }
 
@@ -122,7 +1058,11 @@
     Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
 
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+    return RTMPCPUID_FROM_GROUP_AND_NUMBER(g_cRtMpNtMaxGroups - 1, g_aRtMpNtCpuGroups[g_cRtMpNtMaxGroups - 1].cMaxCpus - 1);
+#else
     /* According to MSDN the processor indexes goes from 0 to the maximum
        number of CPUs in the system.  We've check this in initterm-r0drv-nt.cpp. */
     return g_cRtMpNtMaxCpus - 1;
+#endif
 }
 
@@ -131,6 +1071,5 @@
 {
     Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
-    return idCpu < RTCPUSET_MAX_CPUS
-        && RTCpuSetIsMember(&g_rtMpNtCpuSet, idCpu);
+    return RTCpuSetIsMember(&g_rtMpNtCpuSet, idCpu);
 }
 
@@ -140,7 +1079,18 @@
     Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
 
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+    if (idCpu != NIL_RTCPUID)
+    {
+        unsigned idxGroup = rtMpCpuIdGetGroup(idCpu);
+        if (idxGroup < g_cRtMpNtMaxGroups)
+            return rtMpCpuIdGetGroupMember(idCpu) < g_aRtMpNtCpuGroups[idxGroup].cMaxCpus;
+    }
+    return false;
+
+#else
     /* A possible CPU ID is one with a value lower than g_cRtMpNtMaxCpus (see
        comment in RTMpGetMaxCpuId). */
     return idCpu < g_cRtMpNtMaxCpus;
+#endif
 }
 
@@ -183,4 +1133,12 @@
     return RTCpuSetCount(&Set);
 }
+
+
+RTDECL(RTCPUID) RTMpGetOnlineCoreCount(void)
+{
+    /** @todo fix me */
+    return RTMpGetOnlineCount();
+}
+
 
 
@@ -372,5 +1330,5 @@
            the reverse conversion internally). */
         PROCESSOR_NUMBER ProcNum;
-        NTSTATUS rcNt = g_pfnrtKeGetProcessorNumberFromIndex(idCpu, &ProcNum);
+        NTSTATUS rcNt = g_pfnrtKeGetProcessorNumberFromIndex(RTMpCpuIdToSetIndex(idCpu), &ProcNum);
         AssertMsgReturn(NT_SUCCESS(rcNt),
                         ("KeGetProcessorNumberFromIndex(%u) -> %#x\n", idCpu, rcNt),
@@ -383,5 +1341,5 @@
     }
     else
-        KeSetTargetProcessorDpc(pDpc, (int)idCpu);
+        KeSetTargetProcessorDpc(pDpc, RTMpCpuIdToSetIndex(idCpu));
     return VINF_SUCCESS;
 }
@@ -410,7 +1368,4 @@
 
 #else  /* !IPRT_TARGET_NT4 */
-    PRTMPARGS pArgs;
-    KDPC     *paExecCpuDpcs;
-
 # if 0
     /* KeFlushQueuedDpcs must be run at IRQL PASSIVE_LEVEL according to MSDN, but the
@@ -419,12 +1374,36 @@
     AssertMsg(KeGetCurrentIrql() == PASSIVE_LEVEL, ("%d != %d (PASSIVE_LEVEL)\n", KeGetCurrentIrql(), PASSIVE_LEVEL));
 # endif
-
-    KAFFINITY Mask = KeQueryActiveProcessors();
-
     /* KeFlushQueuedDpcs is not present in Windows 2000; import it dynamically so we can just fail this call. */
     if (!g_pfnrtNtKeFlushQueuedDpcs)
         return VERR_NOT_SUPPORTED;
 
-    pArgs = (PRTMPARGS)ExAllocatePoolWithTag(NonPagedPool, g_cRtMpNtMaxCpus * sizeof(KDPC) + sizeof(RTMPARGS), (ULONG)'RTMp');
+    /*
+     * Make a copy of the active CPU set and figure out how many KDPCs we really need.
+     * We must not try setup DPCs for CPUs which aren't there, because that may fail.
+     */
+    RTCPUSET  OnlineSet = g_rtMpNtCpuSet;
+    uint32_t  cDpcsNeeded;
+    switch (enmCpuid)
+    {
+        case RT_NT_CPUID_SPECIFIC:
+            cDpcsNeeded = 1;
+            break;
+        case RT_NT_CPUID_PAIR:
+            cDpcsNeeded = 2;
+            break;
+        default:
+            do
+            {
+                cDpcsNeeded = g_cRtMpNtActiveCpus;
+                OnlineSet   = g_rtMpNtCpuSet;
+            } while (cDpcsNeeded != g_cRtMpNtActiveCpus);
+            break;
+    }
+
+    /*
+     * Allocate an RTMPARGS structure followed by cDpcsNeeded KDPCs
+     * and initialize them.
+     */
+    PRTMPARGS pArgs = (PRTMPARGS)ExAllocatePoolWithTag(NonPagedPool, sizeof(RTMPARGS) + cDpcsNeeded * sizeof(KDPC), (ULONG)'RTMp');
     if (!pArgs)
         return VERR_NO_MEMORY;
@@ -438,7 +1417,6 @@
     pArgs->cRefs     = 1;
 
-    paExecCpuDpcs = (KDPC *)(pArgs + 1);
-
     int rc;
+    KDPC *paExecCpuDpcs = (KDPC *)(pArgs + 1);
     if (enmCpuid == RT_NT_CPUID_SPECIFIC)
     {
@@ -464,10 +1442,11 @@
     {
         rc = VINF_SUCCESS;
-        for (unsigned i = 0; i < g_cRtMpNtMaxCpus && RT_SUCCESS(rc); i++)
-        {
-            KeInitializeDpc(&paExecCpuDpcs[i], rtmpNtDPCWrapper, pArgs);
-            KeSetImportanceDpc(&paExecCpuDpcs[i], HighImportance);
-            rc = rtMpNtSetTargetProcessorDpc(&paExecCpuDpcs[i], i);
-        }
+        for (uint32_t i = 0; i < cDpcsNeeded && RT_SUCCESS(rc); i++)
+            if (RTCpuSetIsMemberByIndex(&OnlineSet, i))
+            {
+                KeInitializeDpc(&paExecCpuDpcs[i], rtmpNtDPCWrapper, pArgs);
+                KeSetImportanceDpc(&paExecCpuDpcs[i], HighImportance);
+                rc = rtMpNtSetTargetProcessorDpc(&paExecCpuDpcs[i], RTMpCpuIdFromSetIndex(i));
+            }
     }
     if (RT_FAILURE(rc))
@@ -477,5 +1456,6 @@
     }
 
-    /* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu.
+    /*
+     * Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu.
      * KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL.
      */
@@ -507,10 +1487,9 @@
     else
     {
-        unsigned iSelf = RTMpCpuId();
-
-        for (unsigned i = 0; i < g_cRtMpNtMaxCpus; i++)
-        {
-            if (    (i != iSelf)
-                &&  (Mask & RT_BIT_64(i)))
+        uint32_t iSelf = RTMpCurSetIndex();
+        for (uint32_t i = 0; i < cDpcsNeeded; i++)
+        {
+            if (   (i != iSelf)
+                && RTCpuSetIsMemberByIndex(&OnlineSet, i))
             {
                 ASMAtomicIncS32(&pArgs->cRefs);
@@ -525,5 +1504,7 @@
     KeLowerIrql(oldIrql);
 
-    /* Flush all DPCs and wait for completion. (can take long!) */
+    /*
+     * Flush all DPCs and wait for completion. (can take long!)
+     */
     /** @todo Consider changing this to an active wait using some atomic inc/dec
      *  stuff (and check for the current cpu above in the specific case). */
Index: unk/src/VBox/Runtime/r0drv/nt/mpnotification-r0drv-nt.cpp
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/nt/mpnotification-r0drv-nt.cpp	(revision 64280)
+++ 	(revision )
@@ -1,189 +1,0 @@
-/* $Id$ */
-/** @file
- * IPRT - Multiprocessor Event Notifications, Ring-0 Driver, NT.
- */
-
-/*
- * Copyright (C) 2008-2016 Oracle Corporation
- *
- * This file is part of VirtualBox Open Source Edition (OSE), as
- * available from http://www.virtualbox.org. This file is free software;
- * you can redistribute it and/or modify it under the terms of the GNU
- * General Public License (GPL) as published by the Free Software
- * Foundation, in version 2 as it comes in the "COPYING" file of the
- * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
- * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
- *
- * The contents of this file may alternatively be used under the terms
- * of the Common Development and Distribution License Version 1.0
- * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
- * VirtualBox OSE distribution, in which case the provisions of the
- * CDDL are applicable instead of those of the GPL.
- *
- * You may elect to license modified versions of this file under the
- * terms and conditions of either the GPL or the CDDL or both.
- */
-
-
-/*********************************************************************************************************************************
-*   Header Files                                                                                                                 *
-*********************************************************************************************************************************/
-#include "the-nt-kernel.h"
-
-#include <iprt/mp.h>
-#include <iprt/err.h>
-#include <iprt/cpuset.h>
-#include "r0drv/mp-r0drv.h"
-#include "internal-r0drv-nt.h"
-
-
-#if 0 /* The following is 100% untested code . */
-
-#ifndef KE_PROCESSOR_CHANGE_ADD_EXISTING
-/* Some bits that are missing from our DDK headers. */
-
-typedef enum
-{
-    KeProcessorAddStartNotify = 0,
-    KeProcessorAddCompleteNotify,
-    KeProcessorAddFailureNotify
-} KE_PROCESSOR_CHANGE_NOTIFY_STATE;
-
-typedef struct _KE_PROCESSOR_CHANGE_NOTIFY_CONTEXT
-{
-    KE_PROCESSOR_CHANGE_NOTIFY_STATE State;
-    ULONG NtNumber;
-    NTSTATUS Status;
-} KE_PROCESSOR_CHANGE_NOTIFY_CONTEXT;
-typedef KE_PROCESSOR_CHANGE_NOTIFY_CONTEXT *PKE_PROCESSOR_CHANGE_NOTIFY_CONTEXT;
-
-typedef VOID (__stdcall *PPROCESSOR_CALLBACK_FUNCTION)(PVOID, PKE_PROCESSOR_CHANGE_NOTIFY_CONTEXT, PNTSTATUS);
-
-# define KE_PROCESSOR_CHANGE_ADD_EXISTING 1
-#endif /* !KE_PROCESSOR_CHANGE_ADD_EXISTING */
-
-
-
-/*********************************************************************************************************************************
-*   Structures and Typedefs                                                                                                      *
-*********************************************************************************************************************************/
-/** Typedef of KeRegisterProcessorChangeCallback. */
-typedef PVOID (__stdcall *PFNMYKEREGISTERPROCESSORCHANGECALLBACK)(PPROCESSOR_CALLBACK_FUNCTION, PVOID, ULONG);
-/** Typedef of KeDeregisterProcessorChangeCallback. */
-typedef VOID (__stdcall *PFNMYKEDEREGISTERPROCESSORCHANGECALLBACK)(PVOID);
-
-
-/*********************************************************************************************************************************
-*   Global Variables                                                                                                             *
-*********************************************************************************************************************************/
-/** The pointer to KeRegisterProcessorChangeCallback if found. */
-static PFNMYKEREGISTERPROCESSORCHANGECALLBACK   g_pfnKeRegisterProcessorChangeCallback = NULL;
-/** The pointer to KeDeregisterProcessorChangeCallback if found. */
-static PFNMYKEDEREGISTERPROCESSORCHANGECALLBACK g_pfnKeDeregisterProcessorChangeCallback = NULL;
-/** The callback handle. */
-static PVOID g_hCallback = NULL;
-
-
-/**
- * The native callback.
- *
- * @param   pNotifierBlock  Pointer to g_NotifierBlock.
- * @param   ulNativeEvent   The native event.
- * @param   pvCpu           The cpu id cast into a pointer value.
- */
-static VOID __stdcall rtMpNotificationNtCallback(PVOID pvUser,
-                                                 PKE_PROCESSOR_CHANGE_NOTIFY_CONTEXT pChangeContext,
-                                                 PNTSTATUS pOperationStatus)
-{
-    NOREF(pvUser);
-    AssertPtr(pChangeContext);
-    AssertPtrNull(pOperationStatus);
-
-    RTCPUID idCpu = pChangeContext->NtNumber;
-    switch (pChangeContext->State)
-    {
-        case KeProcessorAddStartNotify:
-        case KeProcessorAddFailureNotify:
-            break;
-
-        case KeProcessorAddCompleteNotify:
-            /* Update the active CPU set before doing callback round. */
-            RTCpuSetAdd(&g_rtMpNtCpuSet, idCpu);
-            rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, idCpu);
-            break;
-
-        //case KeProcessorDelCompleteNotify:
-        //    rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, idCpu);
-        //    break;
-
-        default:
-           AssertMsgFailed(("Unexpected state=%d idCpu=%d\n", pChangeContext->State, (int)idCpu));
-           break;
-    }
-
-    *pOperationStatus = STATUS_SUCCESS;
-}
-
-
-DECLHIDDEN(int) rtR0MpNotificationNativeInit(void)
-{
-    /*
-     * Try resolve the symbols.
-     */
-    UNICODE_STRING RoutineName;
-    RtlInitUnicodeString(&RoutineName, L"KeRegisterProcessorChangeCallback");
-    g_pfnKeRegisterProcessorChangeCallback = (PFNMYKEREGISTERPROCESSORCHANGECALLBACK)MmGetSystemRoutineAddress(&RoutineName);
-    if (g_pfnKeRegisterProcessorChangeCallback)
-    {
-        RtlInitUnicodeString(&RoutineName, L"KeDeregisterProcessorChangeCallback");
-        g_pfnKeDeregisterProcessorChangeCallback = (PFNMYKEDEREGISTERPROCESSORCHANGECALLBACK)MmGetSystemRoutineAddress(&RoutineName);
-        if (g_pfnKeDeregisterProcessorChangeCallback)
-        {
-            /*
-             * Try call it.
-             */
-            NTSTATUS ntRc = 0;
-            g_hCallback = g_pfnKeRegisterProcessorChangeCallback(rtMpNotificationNtCallback, &ntRc, KE_PROCESSOR_CHANGE_ADD_EXISTING);
-            if (g_hCallback != NULL)
-                return VINF_SUCCESS;
-
-            /* Genuine failure. */
-            int rc = RTErrConvertFromNtStatus(ntRc);
-            AssertMsgFailed(("ntRc=%#x rc=%d\n", ntRc, rc));
-            return rc;
-        }
-
-        /* this shouldn't happen. */
-        AssertFailed();
-    }
-
-    /* Not supported - success. */
-    g_pfnKeRegisterProcessorChangeCallback = NULL;
-    g_pfnKeDeregisterProcessorChangeCallback = NULL;
-    return VINF_SUCCESS;
-}
-
-
-DECLHIDDEN(void) rtR0MpNotificationNativeTerm(void)
-{
-    if (    g_pfnKeDeregisterProcessorChangeCallback
-        &&  g_hCallback)
-    {
-        g_pfnKeDeregisterProcessorChangeCallback(g_hCallback);
-        g_hCallback = NULL;
-    }
-}
-
-#else   /* Not supported */
-
-DECLHIDDEN(int) rtR0MpNotificationNativeInit(void)
-{
-    return VINF_SUCCESS;
-}
-
-DECLHIDDEN(void) rtR0MpNotificationNativeTerm(void)
-{
-}
-
-#endif  /* Not supported */
-
Index: /trunk/src/VBox/Runtime/r3/win/mp-win.cpp
===================================================================
--- /trunk/src/VBox/Runtime/r3/win/mp-win.cpp	(revision 64280)
+++ /trunk/src/VBox/Runtime/r3/win/mp-win.cpp	(revision 64281)
@@ -40,10 +40,16 @@
 #include <iprt/mem.h>
 #include <iprt/once.h>
+#include <iprt/param.h>
 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
 # include <iprt/asm-amd64-x86.h>
 #endif
+#if defined(VBOX) && !defined(IN_GUEST)
+# include <VBox/sup.h>
+# define IPRT_WITH_GIP_MP_INFO
+#else
+# undef  IPRT_WITH_GIP_MP_INFO
+#endif
 
 #include "internal-r3-win.h"
-
 
 
@@ -51,7 +57,49 @@
 *   Defined Constants And Macros                                                                                                 *
 *********************************************************************************************************************************/
+/** @def RTMPWIN_UPDATE_GIP_GLOBAL
+ * Does lazy (re-)initialization using information provieded by GIP. */
+#ifdef IPRT_WITH_GIP_MP_INFO
+# define RTMPWIN_UPDATE_GIP_GLOBAL() \
+    do { RTMPWIN_UPDATE_GIP_GLOBALS_AND_GET_PGIP(); } while (0)
+#else
+# define RTMPWIN_UPDATE_GIP_GLOBAL() do { } while (0)
+#endif
+
+/** @def RTMPWIN_UPDATE_GIP_GLOBALS_AND_GET_PGIP
+ * Does lazy (re-)initialization using information provieded by GIP and
+ * declare and initalize a pGip local variable. */
+#ifdef IPRT_WITH_GIP_MP_INFO
+#define RTMPWIN_UPDATE_GIP_GLOBALS_AND_GET_PGIP() \
+    PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage; \
+    if (pGip) \
+    { \
+        if (   pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC \
+            && RTOnce(&g_MpInitOnceGip, rtMpWinInitOnceGip, NULL) == VINF_SUCCESS) \
+        { \
+            if (g_cRtMpWinActiveCpus >= pGip->cOnlineCpus) \
+            { /* likely */ } \
+            else \
+                rtMpWinRefreshGip(); \
+        } \
+        else \
+            pGip = NULL; \
+    } else do { } while (0)
+#else
+# define RTMPWIN_UPDATE_GIP_GLOBALS_AND_GET_PGIP() do { } while (0)
+#endif
+
+
+/*********************************************************************************************************************************
+*   Global Variables                                                                                                             *
+*********************************************************************************************************************************/
 /** Initialize once. */
 static RTONCE                                       g_MpInitOnce = RTONCE_INITIALIZER;
-//static decltype(GetMaximumProcessorCount)          *g_pfnGetMaximumProcessorCount;
+#ifdef IPRT_WITH_GIP_MP_INFO
+/** Initialize once using GIP. */
+static RTONCE                                       g_MpInitOnceGip = RTONCE_INITIALIZER;
+#endif
+
+static decltype(GetMaximumProcessorCount)          *g_pfnGetMaximumProcessorCount;
+//static decltype(GetActiveProcessorCount)           *g_pfnGetActiveProcessorCount;
 static decltype(GetCurrentProcessorNumber)         *g_pfnGetCurrentProcessorNumber;
 static decltype(GetCurrentProcessorNumberEx)       *g_pfnGetCurrentProcessorNumberEx;
@@ -60,7 +108,4 @@
 
 
-/*********************************************************************************************************************************
-*   Global Variables                                                                                                             *
-*********************************************************************************************************************************/
 /** The required buffer size for getting group relations. */
 static uint32_t     g_cbRtMpWinGrpRelBuf;
@@ -71,16 +116,26 @@
 /** The max number of groups. */
 static uint32_t     g_cRtMpWinMaxCpuGroups;
-/** Static per group info. */
+/** The number of active CPUs the last time we checked. */
+static uint32_t volatile g_cRtMpWinActiveCpus;
+/** Static per group info.
+ * @remarks  With RTCPUSET_MAX_CPUS as 256, this takes up 33KB.
+ * @sa g_aRtMpNtCpuGroups */
 static struct
 {
-    /** The CPU ID (and CPU set index) of the first CPU in the group. */
-    uint16_t    idFirstCpu;
     /** The max CPUs in the group. */
     uint16_t    cMaxCpus;
-} g_aRtMpWinCpuGroups[RTCPUSET_MAX_CPUS];
+    /** The number of active CPUs at the time of initialization. */
+    uint16_t    cActiveCpus;
+    /** CPU set indexes for each CPU in the group. */
+    int16_t     aidxCpuSetMembers[64];
+}                   g_aRtMpWinCpuGroups[RTCPUSET_MAX_CPUS];
+/** Maps CPU set indexes to RTCPUID.
+ * @sa g_aidRtMpNtByCpuSetIdx  */
+RTCPUID             g_aidRtMpWinByCpuSetIdx[RTCPUSET_MAX_CPUS];
 
 
 /**
- * @callback_method_impl{FNRTONCE, Resolves dynamic imports.}
+ * @callback_method_impl{FNRTONCE,
+ *      Resolves dynamic imports and initializes globals.}
  */
 static DECLCALLBACK(int32_t) rtMpWinInitOnce(void *pvUser)
@@ -98,5 +153,6 @@
             RT_CONCAT(g_pfn,a_FnName) = (decltype(a_FnName) *)GetProcAddress(g_hModKernel32, #a_FnName); \
         } while (0)
-    //RESOLVE_API("kernel32.dll", GetMaximumProcessorCount); /* Calls GetLogicalProcessorInformationEx/RelationGroup in W10. */
+    RESOLVE_API("kernel32.dll", GetMaximumProcessorCount);
+    //RESOLVE_API("kernel32.dll", GetActiveProcessorCount); - slow :/
     RESOLVE_API("kernel32.dll", GetCurrentProcessorNumber);
     RESOLVE_API("kernel32.dll", GetCurrentProcessorNumberEx);
@@ -105,18 +161,30 @@
 
     /*
-     * Query group information, partitioning CPU IDs and CPU set
-     * indexes (they are the same).
+     * Reset globals.
+     */
+    for (unsigned i = 0; i < RT_ELEMENTS(g_aidRtMpWinByCpuSetIdx); i++)
+        g_aidRtMpWinByCpuSetIdx[i] = NIL_RTCPUID;
+    for (unsigned idxGroup = 0; idxGroup < RT_ELEMENTS(g_aRtMpWinCpuGroups); idxGroup++)
+    {
+        g_aRtMpWinCpuGroups[idxGroup].cMaxCpus    = 0;
+        g_aRtMpWinCpuGroups[idxGroup].cActiveCpus = 0;
+        for (unsigned idxMember = 0; idxMember < RT_ELEMENTS(g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers); idxMember++)
+            g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = -1;
+    }
+
+    /*
+     * Query group information, partitioning CPU IDs and CPU set indexes.
      *
      * We ASSUME the the GroupInfo index is the same as the group number.
      *
-     * We ASSUME there are no inactive groups, because otherwise it will
-     * be difficult to tell how many possible CPUs we can have and do a
-     * reasonable CPU ID/index partitioning. [probably bad assumption]
+     * We CANNOT ASSUME that the kernel CPU indexes are assigned in any given
+     * way, though they usually are in group order by active processor.  So,
+     * we do that to avoid trouble.  We must use information provided thru GIP
+     * if we want the kernel CPU set indexes.  Even there, the inactive CPUs
+     * wont have sensible indexes.  Sigh.
      *
-     * We ASSUME that the kernel processor indexes are assigned in group order,
-     * which we match here with our own ID+index assignments.  This claim is
-     * verified by initterm-r0drv-nt.cpp.
+     * We try to assign IDs to inactive CPUs in the same manner as mp-r0drv-nt.cpp
      *
-     * Note! We will die if there are too many processors!
+     * Note! We will die (AssertFatal) if there are too many processors!
      */
     union
@@ -146,25 +214,67 @@
         AssertFatal(uBuf.Info.Group.MaximumGroupCount >= uBuf.Info.Group.ActiveGroupCount);
 
+        g_cRtMpWinMaxCpuGroups = uBuf.Info.Group.MaximumGroupCount;
+
+        /* Count max cpus (see mp-r0drv0-nt.cpp) why we don't use GetMaximumProcessorCount(ALL). */
+        uint32_t idxGroup;
+        g_cRtMpWinMaxCpus = 0;
+        for (idxGroup = 0; idxGroup < uBuf.Info.Group.ActiveGroupCount; idxGroup++)
+            g_cRtMpWinMaxCpus += uBuf.Info.Group.GroupInfo[idxGroup].MaximumProcessorCount;
 
         /* Process the active groups. */
-        g_cRtMpWinMaxCpuGroups = uBuf.Info.Group.MaximumGroupCount;
-        uint16_t idxCpu        = 0;
-        uint32_t idxGroup      = 0;
-        for (; idxGroup < uBuf.Info.Group.ActiveGroupCount; idxGroup++)
-        {
-            g_aRtMpWinCpuGroups[idxGroup].idFirstCpu = idxCpu;
-            g_aRtMpWinCpuGroups[idxGroup].cMaxCpus   = uBuf.Info.Group.GroupInfo[idxGroup].MaximumProcessorCount;
-            idxCpu += uBuf.Info.Group.GroupInfo[idxGroup].MaximumProcessorCount;
-        }
+        uint32_t cActive   = 0;
+        uint32_t cInactive = 0;
+        uint32_t idxCpu    = 0;
+        uint32_t idxCpuSetNextInactive = g_cRtMpWinMaxCpus - 1;
+        for (idxGroup = 0; idxGroup < uBuf.Info.Group.ActiveGroupCount; idxGroup++)
+        {
+            PROCESSOR_GROUP_INFO const *pGroupInfo = &uBuf.Info.Group.GroupInfo[idxGroup];
+            g_aRtMpWinCpuGroups[idxGroup].cMaxCpus    = pGroupInfo->MaximumProcessorCount;
+            g_aRtMpWinCpuGroups[idxGroup].cActiveCpus = pGroupInfo->ActiveProcessorCount;
+            for (uint32_t idxMember = 0; idxMember < pGroupInfo->MaximumProcessorCount; idxMember++)
+            {
+                if (pGroupInfo->ActiveProcessorMask & RT_BIT_64(idxMember))
+                {
+                    g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = idxCpu;
+                    g_aidRtMpWinByCpuSetIdx[idxCpu] = idxCpu;
+                    idxCpu++;
+                    cActive++;
+                }
+                else
+                {
+                    if (idxCpuSetNextInactive >= idxCpu)
+                    {
+                        g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = idxCpuSetNextInactive;
+                        g_aidRtMpWinByCpuSetIdx[idxCpuSetNextInactive] = idxCpuSetNextInactive;
+                        idxCpuSetNextInactive--;
+                    }
+                    cInactive++;
+                }
+            }
+        }
+        g_cRtMpWinActiveCpus = cActive;
+        Assert(cActive + cInactive <= g_cRtMpWinMaxCpus);
+        Assert(idxCpu <= idxCpuSetNextInactive + 1);
+        Assert(idxCpu <= g_cRtMpWinMaxCpus);
 
         /* Just in case the 2nd assumption doesn't hold true and there are inactive groups. */
         for (; idxGroup < uBuf.Info.Group.MaximumGroupCount; idxGroup++)
         {
-            g_aRtMpWinCpuGroups[idxGroup].idFirstCpu = idxCpu;
-            g_aRtMpWinCpuGroups[idxGroup].cMaxCpus   = RT_MAX(MAXIMUM_PROC_PER_GROUP, 64);
-            idxCpu += RT_MAX(MAXIMUM_PROC_PER_GROUP, 64);
-        }
-
-        g_cRtMpWinMaxCpus = idxCpu;
+            DWORD cMaxMembers = g_pfnGetMaximumProcessorCount(idxGroup);
+            g_aRtMpWinCpuGroups[idxGroup].cMaxCpus    = cMaxMembers;
+            g_aRtMpWinCpuGroups[idxGroup].cActiveCpus = 0;
+            for (uint32_t idxMember = 0; idxMember < cMaxMembers; idxMember++)
+            {
+                if (idxCpuSetNextInactive >= idxCpu)
+                {
+                    g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = idxCpuSetNextInactive;
+                    g_aidRtMpWinByCpuSetIdx[idxCpuSetNextInactive] = idxCpuSetNextInactive;
+                    idxCpuSetNextInactive--;
+                }
+                cInactive++;
+            }
+        }
+        Assert(cActive + cInactive <= g_cRtMpWinMaxCpus);
+        Assert(idxCpu <= idxCpuSetNextInactive + 1);
     }
     else
@@ -172,8 +282,14 @@
         /* Legacy: */
         GetSystemInfo(&uBuf.SysInfo);
+        g_cRtMpWinMaxCpuGroups              = 1;
         g_cRtMpWinMaxCpus                   = uBuf.SysInfo.dwNumberOfProcessors;
-        g_cRtMpWinMaxCpuGroups              = 1;
-        g_aRtMpWinCpuGroups[0].idFirstCpu   = 0;
         g_aRtMpWinCpuGroups[0].cMaxCpus     = uBuf.SysInfo.dwNumberOfProcessors;
+        g_aRtMpWinCpuGroups[0].cActiveCpus  = uBuf.SysInfo.dwNumberOfProcessors;
+
+        for (uint32_t idxMember = 0; idxMember < uBuf.SysInfo.dwNumberOfProcessors; idxMember++)
+        {
+            g_aRtMpWinCpuGroups[0].aidxCpuSetMembers[idxMember] = idxMember;
+            g_aidRtMpWinByCpuSetIdx[idxMember] = idxMember;
+        }
     }
 
@@ -268,70 +384,277 @@
 
 
+#ifdef IPRT_WITH_GIP_MP_INFO
+/**
+ * @callback_method_impl{FNRTONCE, Updates globals with information from GIP.}
+ */
+static DECLCALLBACK(int32_t) rtMpWinInitOnceGip(void *pvUser)
+{
+    RT_NOREF(pvUser);
+    RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL);
+
+    PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
+    if (   pGip
+        && pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC)
+    {
+        /*
+         * Update globals.
+         */
+        if (g_cRtMpWinMaxCpus != pGip->cPossibleCpus)
+            g_cRtMpWinMaxCpus = pGip->cPossibleCpus;
+        if (g_cRtMpWinActiveCpus != pGip->cOnlineCpus)
+            g_cRtMpWinActiveCpus = pGip->cOnlineCpus;
+        Assert(g_cRtMpWinMaxCpuGroups == pGip->cPossibleCpuGroups);
+        if (g_cRtMpWinMaxCpuGroups != pGip->cPossibleCpuGroups)
+        {
+            g_cRtMpWinMaxCpuGroups = pGip->cPossibleCpuGroups;
+            g_cbRtMpWinGrpRelBuf   = sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX)
+                                   + (g_cRtMpWinMaxCpuGroups + 2) * sizeof(PROCESSOR_GROUP_INFO);
+        }
+
+        /*
+         * Update CPU set IDs.
+         */
+        for (unsigned i = g_cRtMpWinMaxCpus; i < RT_ELEMENTS(g_aidRtMpWinByCpuSetIdx); i++)
+            g_aidRtMpWinByCpuSetIdx[i] = NIL_RTCPUID;
+
+        unsigned const cbGip = pGip->cPages * PAGE_SIZE;
+        for (uint32_t idxGroup = 0; idxGroup < g_cRtMpWinMaxCpus; idxGroup++)
+        {
+            uint32_t idxMember;
+            unsigned offCpuGroup = pGip->aoffCpuGroup[idxGroup];
+            if (offCpuGroup < cbGip)
+            {
+                PSUPGIPCPUGROUP pGipCpuGrp  = (PSUPGIPCPUGROUP)((uintptr_t)pGip + offCpuGroup);
+                uint32_t        cMaxMembers = pGipCpuGrp->cMaxMembers;
+                AssertStmt(cMaxMembers < RT_ELEMENTS(g_aRtMpWinCpuGroups[0].aidxCpuSetMembers),
+                           cMaxMembers = RT_ELEMENTS(g_aRtMpWinCpuGroups[0].aidxCpuSetMembers));
+                g_aRtMpWinCpuGroups[idxGroup].cMaxCpus     = cMaxMembers;
+                g_aRtMpWinCpuGroups[idxGroup].cActiveCpus  = RT_MIN(pGipCpuGrp->cMembers, cMaxMembers);
+
+                for (idxMember = 0; idxMember < cMaxMembers; idxMember++)
+                {
+                    int16_t idxSet = pGipCpuGrp->aiCpuSetIdxs[idxMember];
+                    g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = idxSet;
+                    if ((unsigned)idxSet < RT_ELEMENTS(g_aidRtMpWinByCpuSetIdx))
+# ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+                        g_aidRtMpWinByCpuSetIdx[idxSet] = RTMPCPUID_FROM_GROUP_AND_NUMBER(idxGroup, idxMember);
+# else
+                        g_aidRtMpWinByCpuSetIdx[idxSet] = idxSet;
+# endif
+                }
+            }
+            else
+                idxMember = 0;
+            for (; idxMember < RT_ELEMENTS(g_aRtMpWinCpuGroups[0].aidxCpuSetMembers); idxMember++)
+                g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = -1;
+        }
+    }
+
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Refreshes globals from GIP after one or more CPUs were added.
+ *
+ * There are potential races here.  We might race other threads and we may race
+ * more CPUs being added.
+ */
+static void rtMpWinRefreshGip(void)
+{
+    PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
+    if (   pGip
+        && pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC)
+    {
+        /*
+         * Since CPUs cannot be removed, we only have to update the IDs and
+         * indexes of CPUs that we think are inactive and the group member counts.
+         */
+        for (;;)
+        {
+            unsigned const cbGip          = pGip->cPages * PAGE_SIZE;
+            uint32_t const cGipActiveCpus = pGip->cOnlineCpus;
+            uint32_t const cMyActiveCpus  = ASMAtomicReadU32(&g_cRtMpWinActiveCpus);
+            ASMCompilerBarrier();
+
+            for (uint32_t idxGroup = 0; idxGroup < g_cRtMpWinMaxCpus; idxGroup++)
+            {
+                unsigned offCpuGroup = pGip->aoffCpuGroup[idxGroup];
+                if (offCpuGroup < cbGip)
+                {
+                    PSUPGIPCPUGROUP pGipCpuGrp  = (PSUPGIPCPUGROUP)((uintptr_t)pGip + offCpuGroup);
+                    uint32_t        cMaxMembers = pGipCpuGrp->cMaxMembers;
+                    AssertStmt(cMaxMembers < RT_ELEMENTS(g_aRtMpWinCpuGroups[0].aidxCpuSetMembers),
+                               cMaxMembers = RT_ELEMENTS(g_aRtMpWinCpuGroups[0].aidxCpuSetMembers));
+                    for (uint32_t idxMember = g_aRtMpWinCpuGroups[idxGroup].cActiveCpus; idxMember < cMaxMembers; idxMember++)
+                    {
+                        int16_t idxSet = pGipCpuGrp->aiCpuSetIdxs[idxMember];
+                        g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = idxSet;
+                        if ((unsigned)idxSet < RT_ELEMENTS(g_aidRtMpWinByCpuSetIdx))
+# ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+                            g_aidRtMpWinByCpuSetIdx[idxSet] = RTMPCPUID_FROM_GROUP_AND_NUMBER(idxGroup, idxMember);
+# else
+                            g_aidRtMpWinByCpuSetIdx[idxSet] = idxSet;
+# endif
+                    }
+                    g_aRtMpWinCpuGroups[idxGroup].cMaxCpus    = RT_MIN(pGipCpuGrp->cMembers, cMaxMembers);
+                    g_aRtMpWinCpuGroups[idxGroup].cActiveCpus = RT_MIN(pGipCpuGrp->cMembers, cMaxMembers);
+                }
+                else
+                    Assert(g_aRtMpWinCpuGroups[idxGroup].cActiveCpus == 0);
+            }
+
+            ASMCompilerBarrier();
+            if (cGipActiveCpus == pGip->cOnlineCpus)
+                if (ASMAtomicCmpXchgU32(&g_cRtMpWinActiveCpus, cGipActiveCpus, cMyActiveCpus))
+                    break;
+        }
+    }
+}
+
+#endif /* IPRT_WITH_GIP_MP_INFO */
+
+
+/*
+ * Conversion between CPU ID and set index.
+ */
+
+RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
+{
+    RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL);
+    RTMPWIN_UPDATE_GIP_GLOBAL();
+
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+    if (idCpu != NIL_RTCPUID)
+        return RTMpSetIndexFromCpuGroupMember(rtMpCpuIdGetGroup(idCpu), rtMpCpuIdGetGroupMember(idCpu));
+    return -1;
+
+#else
+    /* 1:1 mapping, just do range checking. */
+    return idCpu < g_cRtMpWinMaxCpus ? idCpu : -1;
+#endif
+}
+
+
+RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
+{
+    RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL);
+    RTMPWIN_UPDATE_GIP_GLOBAL();
+
+    if ((unsigned)iCpu < RT_ELEMENTS(g_aidRtMpWinByCpuSetIdx))
+    {
+        RTCPUID idCpu = g_aidRtMpWinByCpuSetIdx[iCpu];
+
+#if defined(IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER) && defined(RT_STRICT)
+        /* Check the correctness of the mapping table. */
+        RTCPUID idCpuGip = NIL_RTCPUID;
+        if (   pGip
+            && (unsigned)iCpu < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx))
+        {
+            unsigned idxSupCpu = pGip->aiCpuFromCpuSetIdx[idxGuess];
+            if (idxSupCpu < pGip->cCpus)
+                if (pGip->aCPUs[idxSupCpu].enmState != SUPGIPCPUSTATE_INVALID)
+                    idCpuGip = pGip->aCPUs[idxSupCpu].idCpu;
+        }
+        AssertMsg(idCpu == idCpuGip, ("table:%#x  gip:%#x\n", idCpu, idCpuGip));
+#endif
+
+        return idCpu;
+    }
+    return NIL_RTCPUID;
+}
+
+
+RTDECL(int) RTMpSetIndexFromCpuGroupMember(uint32_t idxGroup, uint32_t idxMember)
+{
+    if (idxGroup < g_cRtMpWinMaxCpuGroups)
+        if (idxMember < g_aRtMpWinCpuGroups[idxGroup].cMaxCpus)
+            return g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers[idxMember];
+    return -1;
+}
+
+
+RTDECL(uint32_t) RTMpGetCpuGroupCounts(uint32_t idxGroup, uint32_t *pcActive)
+{
+    if (idxGroup < g_cRtMpWinMaxCpuGroups)
+    {
+        if (pcActive)
+            *pcActive = g_aRtMpWinCpuGroups[idxGroup].cActiveCpus;
+        return g_aRtMpWinCpuGroups[idxGroup].cMaxCpus;
+    }
+    if (pcActive)
+        *pcActive = 0;
+    return 0;
+}
+
+
+RTDECL(uint32_t) RTMpGetMaxCpuGroupCount(void)
+{
+    return g_cRtMpWinMaxCpuGroups;
+}
+
+
+
+/*
+ * Get current CPU.
+ */
+
 RTDECL(RTCPUID) RTMpCpuId(void)
 {
     RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL);
-
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+    RTMPWIN_UPDATE_GIP_GLOBAL();
+#endif
+
+    PROCESSOR_NUMBER ProcNum;
+    ProcNum.Group = 0;
+    ProcNum.Number = 0xff;
     if (g_pfnGetCurrentProcessorNumberEx)
-    {
-        PROCESSOR_NUMBER ProcNum;
         g_pfnGetCurrentProcessorNumberEx(&ProcNum);
-        Assert(ProcNum.Group < g_cRtMpWinMaxCpuGroups);
-        Assert(ProcNum.Number < g_aRtMpWinCpuGroups[ProcNum.Group].cMaxCpus);
-        return g_aRtMpWinCpuGroups[ProcNum.Group].idFirstCpu + ProcNum.Number;
-    }
-
-    if (g_pfnGetCurrentProcessorNumber)
-    {
-        /* Should be safe wrt processor numbering, I hope... Only affects W2k3 and Vista. */
-        Assert(g_cRtMpWinMaxCpuGroups == 1);
-        return g_pfnGetCurrentProcessorNumber();
-    }
-
-    /* The API was introduced with W2K3 according to MSDN. */
+    else if (g_pfnGetCurrentProcessorNumber)
+    {
+        DWORD iCpu = g_pfnGetCurrentProcessorNumber();
+        Assert(iCpu < g_cRtMpWinMaxCpus);
+        ProcNum.Number = iCpu;
+    }
+    else
+    {
 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
-    return ASMGetApicId();
+        ProcNum.Number = ASMGetApicId();
 #else
 # error "Not ported to this architecture."
-    return NIL_RTAPICID;
-#endif
-}
-
-
-RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
+        return NIL_RTCPUID;
+#endif
+    }
+
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+    return RTMPCPUID_FROM_GROUP_AND_NUMBER(ProcNum.Group, ProcNum.Number);
+#else
+    return RTMpSetIndexFromCpuGroupMember(ProcNum.Group, ProcNum.Number);
+#endif
+}
+
+
+/*
+ * Possible CPUs and cores.
+ */
+
+RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
 {
     RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL);
-
-    /* 1:1 mapping, just do range checking. */
-    return idCpu < g_cRtMpWinMaxCpus ? idCpu : -1;
-}
-
-
-RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
+#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
+    return RTMPCPUID_FROM_GROUP_AND_NUMBER(g_cRtMpWinMaxCpuGroups - 1,
+                                           g_aRtMpWinCpuGroups[g_cRtMpWinMaxCpuGroups - 1].cMaxCpus - 1);
+#else
+    return g_cRtMpWinMaxCpus - 1;
+#endif
+}
+
+
+RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
 {
     RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL);
-
-    /* 1:1 mapping, just do range checking. */
-    return (unsigned)iCpu < g_cRtMpWinMaxCpus ? iCpu : NIL_RTCPUID;
-}
-
-
-RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
-{
-    RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL);
-    return g_cRtMpWinMaxCpus - 1;
-}
-
-
-RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
-{
-    RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL);
-    RTCPUSET Set;
-    return RTCpuSetIsMember(RTMpGetOnlineSet(&Set), idCpu);
-}
-
-
-RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
-{
-    RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL);
+    RTMPWIN_UPDATE_GIP_GLOBAL();
+
     /* Any CPU between 0 and g_cRtMpWinMaxCpus are possible. */
     return idCpu < g_cRtMpWinMaxCpus;
@@ -341,8 +664,8 @@
 RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet)
 {
-    RTCPUID idCpu = RTMpGetCount();
+    RTCPUID iCpu = RTMpGetCount();
     RTCpuSetEmpty(pSet);
-    while (idCpu-- > 0)
-        RTCpuSetAdd(pSet, idCpu);
+    while (iCpu-- > 0)
+        RTCpuSetAddByIndex(pSet, iCpu);
     return pSet;
 }
@@ -352,4 +675,5 @@
 {
     RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL);
+
     return g_cRtMpWinMaxCpus;
 }
@@ -359,11 +683,25 @@
 {
     RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL);
+
     return g_cRtMpWinMaxCpuCores;
 }
 
 
+/*
+ * Online CPUs and cores.
+ */
+
 RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet)
 {
     RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL);
+
+#ifdef IPRT_WITH_GIP_MP_INFO
+    RTMPWIN_UPDATE_GIP_GLOBALS_AND_GET_PGIP();
+    if (pGip)
+    {
+        *pSet = pGip->OnlineCpuSet;
+        return pSet;
+    }
+#endif
 
     if (g_pfnGetLogicalProcessorInformationEx)
@@ -376,4 +714,5 @@
          * active processor mask width.
          */
+        /** @todo this is not correct for WOW64   */
         DWORD                                    cbInfo = g_cbRtMpWinGrpRelBuf;
         SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pInfo = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *)alloca(cbInfo);
@@ -397,5 +736,4 @@
                 uint32_t    cMembersLeft = pInfo->Group.GroupInfo[idxGroup].ActiveProcessorCount;
 #endif
-                int const   idxFirst  = g_aRtMpWinCpuGroups[idxGroup].idFirstCpu;
                 int const   cMembers  = g_aRtMpWinCpuGroups[idxGroup].cMaxCpus;
                 for (int idxMember = 0; idxMember < cMembers; idxMember++)
@@ -406,5 +744,5 @@
                         cMembersLeft--;
 #endif
-                        RTCpuSetAddByIndex(pSet, idxFirst + idxMember);
+                        RTCpuSetAddByIndex(pSet, g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers[idxMember]);
                         fActive >>= 1;
                         if (!fActive)
@@ -434,6 +772,19 @@
 
 
+RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
+{
+    RTCPUSET Set;
+    return RTCpuSetIsMember(RTMpGetOnlineSet(&Set), idCpu);
+}
+
+
 RTDECL(RTCPUID) RTMpGetOnlineCount(void)
 {
+#ifdef IPRT_WITH_GIP_MP_INFO
+    RTMPWIN_UPDATE_GIP_GLOBALS_AND_GET_PGIP();
+    if (pGip)
+        return pGip->cOnlineCpus;
+#endif
+
     RTCPUSET Set;
     RTMpGetOnlineSet(&Set);
Index: /trunk/src/VBox/Runtime/testcase/tstRTMp-1.cpp
===================================================================
--- /trunk/src/VBox/Runtime/testcase/tstRTMp-1.cpp	(revision 64280)
+++ /trunk/src/VBox/Runtime/testcase/tstRTMp-1.cpp	(revision 64281)
@@ -34,8 +34,11 @@
 #include <iprt/string.h>
 #include <iprt/test.h>
-
-
-
-int main()
+#ifdef VBOX
+# include <VBox/sup.h>
+#endif
+
+
+
+int main(int argc, char **argv)
 {
     RTTEST hTest;
@@ -44,4 +47,10 @@
         return rcExit;
     RTTestBanner(hTest);
+
+    NOREF(argc); NOREF(argv);
+#ifdef VBOX
+    if (argc > 1)
+        SUPR3Init(NULL);
+#endif
 
     /*
