Index: /trunk/include/VBox/vmm/em.h
===================================================================
--- /trunk/include/VBox/vmm/em.h	(revision 78430)
+++ /trunk/include/VBox/vmm/em.h	(revision 78431)
@@ -352,5 +352,9 @@
 /** @defgroup grp_em_r0     The EM Host Context Ring-0 API
  * @{ */
+# ifdef VBOX_BUGREF_9217
+VMMR0_INT_DECL(int)             EMR0InitVM(PGVM pGVM);
+# else
 VMMR0_INT_DECL(int)             EMR0InitVM(PGVM pGVM, PVM pVM);
+# endif
 /** @} */
 #endif
Index: /trunk/include/VBox/vmm/gvm.h
===================================================================
--- /trunk/include/VBox/vmm/gvm.h	(revision 78430)
+++ /trunk/include/VBox/vmm/gvm.h	(revision 78431)
@@ -32,4 +32,7 @@
 
 #include <VBox/types.h>
+#ifdef VBOX_BUGREF_9217
+# include <VBox/vmm/vm.h>
+#endif
 #include <iprt/thread.h>
 #include <iprt/assertcompile.h>
@@ -41,8 +44,20 @@
  */
 
+#if defined(VBOX_BUGREF_9217) && defined(__cplusplus)
+typedef struct GVMCPU : public VMCPU
+#else
 typedef struct GVMCPU
+#endif
 {
+#if defined(VBOX_BUGREF_9217) && !defined(__cplusplus)
+    VMCPU           s;
+#endif
+
     /** VCPU id (0 - (pVM->cCpus - 1). */
+#ifdef VBOX_BUGREF_9217
+    VMCPUID         idCpuSafe;
+#else
     VMCPUID         idCpu;
+#endif
     /** Padding. */
     uint32_t        uPadding;
@@ -53,11 +68,17 @@
     /** Pointer to the global (ring-0) VM structure this CPU belongs to. */
     PGVM            pGVM;
+#ifndef VBOX_BUGREF_9217
     /** Pointer to the corresponding cross context CPU structure. */
     PVMCPU          pVCpu;
     /** Pointer to the corresponding cross context VM structure. */
     PVM             pVM;
+#endif
 
     /** Padding so gvmm starts on a 64 byte boundrary. */
+#ifdef VBOX_BUGREF_9217
+    uint8_t         abPadding[HC_ARCH_BITS == 32 ? 48 : 40];
+#else
     uint8_t         abPadding[HC_ARCH_BITS == 32 ? 4*4 + 24 : 24];
+#endif
 
     /** The GVMM per vcpu data. */
@@ -80,11 +101,29 @@
     } nem;
 #endif
+
+#ifdef VBOX_BUGREF_9217
+    /** Padding the structure size to page boundrary. */
+# ifdef VBOX_WITH_NEM_R0
+    uint8_t                 abPadding2[3904];
+# else
+    uint8_t                 abPadding2[3840];
+# endif
+#endif
+
 } GVMCPU;
-AssertCompileMemberOffset(GVMCPU, gvmm,   64);
-#ifdef VBOX_WITH_NEM_R0
-AssertCompileMemberOffset(GVMCPU, nem,    64 + 64);
-AssertCompileSize(        GVMCPU,         64 + 64 + 64);
-#else
-AssertCompileSize(        GVMCPU,         64 + 64);
+#ifdef VBOX_BUGREF_9217
+AssertCompileMemberAlignment(GVMCPU, gvmm,   64);
+# ifdef VBOX_WITH_NEM_R0
+AssertCompileMemberAlignment(GVMCPU, nem,    64);
+# endif
+AssertCompileSizeAlignment(GVMCPU,           4096);
+#else
+AssertCompileMemberOffset(GVMCPU, gvmm,      64);
+# ifdef VBOX_WITH_NEM_R0
+AssertCompileMemberOffset(GVMCPU, nem,       64 + 64);
+AssertCompileSize(        GVMCPU,            64 + 64 + 64);
+# else
+AssertCompileSize(        GVMCPU,            64 + 64);
+# endif
 #endif
 
@@ -105,21 +144,43 @@
  * paddings are checked by compile time assertions.
  */
+#ifdef VBOX_BUGREF_9217
+typedef struct GVM : public VM
+#else
 typedef struct GVM
+#endif
 {
     /** Magic / eye-catcher (GVM_MAGIC). */
     uint32_t        u32Magic;
     /** The global VM handle for this VM. */
+#ifdef VBOX_BUGREF_9217
+    uint32_t        hSelfSafe;
+#else
     uint32_t        hSelf;
+#endif
+#ifndef VBOX_BUGREF_9217
     /** The ring-0 mapping of the VM structure. */
     PVM             pVM;
+#endif
     /** The ring-3 mapping of the VM structure. */
     PVMR3           pVMR3;
     /** The support driver session the VM is associated with. */
+#ifdef VBOX_BUGREF_9217
+    PSUPDRVSESSION  pSessionSafe;
+#else
     PSUPDRVSESSION  pSession;
+#endif
     /** Number of Virtual CPUs, i.e. how many entries there are in aCpus.
      * Same same as VM::cCpus. */
+#ifdef VBOX_BUGREF_9217
+    uint32_t        cCpusSafe;
+#else
     uint32_t        cCpus;
+#endif
     /** Padding so gvmm starts on a 64 byte boundrary.   */
+#ifdef VBOX_BUGREF_9217
+    uint8_t         abPadding[HC_ARCH_BITS == 32 ? 12 + 28 + 4 : 28 + 8];
+#else
     uint8_t         abPadding[HC_ARCH_BITS == 32 ? 12 + 28 : 28];
+#endif
 
     /** The GVMM per vm data. */
@@ -160,17 +221,37 @@
         uint8_t             padding[64];
     } rawpci;
+
+#ifdef VBOX_BUGREF_9217
+    /** Padding so aCpus starts on a page boundrary.  */
+# ifdef VBOX_WITH_NEM_R0
+    uint8_t         abPadding2[4096 - 64 - 256 - 512 - 256 - 64];
+# else
+    uint8_t         abPadding2[4096 - 64 - 256 - 512 - 64];
+# endif
+#endif
 
     /** GVMCPU array for the configured number of virtual CPUs. */
     GVMCPU          aCpus[1];
 } GVM;
-AssertCompileMemberOffset(GVM, gvmm,   64);
-AssertCompileMemberOffset(GVM, gmm,    64 + 256);
-#ifdef VBOX_WITH_NEM_R0
-AssertCompileMemberOffset(GVM, nem,    64 + 256 + 512);
-AssertCompileMemberOffset(GVM, rawpci, 64 + 256 + 512 + 256);
-AssertCompileMemberOffset(GVM, aCpus,  64 + 256 + 512 + 256 + 64);
-#else
-AssertCompileMemberOffset(GVM, rawpci, 64 + 256 + 512);
-AssertCompileMemberOffset(GVM, aCpus,  64 + 256 + 512 + 64);
+#ifdef VBOX_BUGREF_9217
+AssertCompileMemberAlignment(GVM, gvmm,     64);
+AssertCompileMemberAlignment(GVM, gmm,      64);
+# ifdef VBOX_WITH_NEM_R0
+AssertCompileMemberAlignment(GVM, nem,      64);
+# endif
+AssertCompileMemberAlignment(GVM, rawpci,   64);
+AssertCompileMemberAlignment(GVM, aCpus,    4096);
+AssertCompileSizeAlignment(GVM,             4096);
+#else
+AssertCompileMemberOffset(GVM, gvmm,        64);
+AssertCompileMemberOffset(GVM, gmm,         64 + 256);
+# ifdef VBOX_WITH_NEM_R0
+AssertCompileMemberOffset(GVM, nem,         64 + 256 + 512);
+AssertCompileMemberOffset(GVM, rawpci,      64 + 256 + 512 + 256);
+AssertCompileMemberOffset(GVM, aCpus,       64 + 256 + 512 + 256 + 64);
+# else
+AssertCompileMemberOffset(GVM, rawpci,      64 + 256 + 512);
+AssertCompileMemberOffset(GVM, aCpus,       64 + 256 + 512 + 64);
+# endif
 #endif
 
Index: /trunk/include/VBox/vmm/vm.h
===================================================================
--- /trunk/include/VBox/vmm/vm.h	(revision 78430)
+++ /trunk/include/VBox/vmm/vm.h	(revision 78431)
@@ -32,4 +32,5 @@
 #ifndef VBOX_FOR_DTRACE_LIB
 # include <iprt/param.h>
+# include <VBox/param.h>
 # include <VBox/types.h>
 # include <VBox/vmm/cpum.h>
@@ -1198,9 +1199,23 @@
     uint32_t                    uCpuExecutionCap;
 
+#ifdef VBOX_BUGREF_9217
+    /** Structure version number (TBD). */
+    uint32_t                    uStructVersion;
+    /** Size of the VM structure. */
+    uint32_t                    cbSelf;
+    /** Size of the VMCPU structure. */
+    uint32_t                    cbVCpu;
+#else
     /** Size of the VM structure including the VMCPU array. */
     uint32_t                    cbSelf;
-
-    /** Offset to the VMCPU array starting from beginning of this structure. */
+#endif
+
+#ifdef VBOX_WITH_RAW_MODE
+    /** Offset to the VMCPU array starting from beginning of this structure,
+     * for raw-mode assembly code. */
     uint32_t                    offVMCPU;
+#else
+    uint32_t                    u32Unused;
+#endif
 
     /**
@@ -1209,5 +1224,4 @@
      * Depending on how the host handles the rc status given in @a eax, this may
      * return and let the caller resume whatever it was doing prior to the call.
-     *
      *
      * @param   eax         The return code, register.
@@ -1325,5 +1339,9 @@
     /** Padding - the unions must be aligned on a 64 bytes boundary and the unions
      *  must start at the same offset on both 64-bit and 32-bit hosts. */
+#ifdef VBOX_BUGREF_9217
+    uint8_t                     abAlignment3[(HC_ARCH_BITS == 32 ? 24 : 0) + 32];
+#else
     uint8_t                     abAlignment3[(HC_ARCH_BITS == 32 ? 24 : 0) + 40];
+#endif
 
     /** CPUM part. */
@@ -1568,21 +1586,39 @@
     } cfgm;
 
+#ifdef VBOX_BUGREF_9217
+    /** Padding for aligning the structure size on a page boundrary. */
+# if defined(VBOX_WITH_REM) && defined(VBOX_WITH_RAW_MODE)
+    uint8_t         abAlignment2[3670 - sizeof(PVMCPUR3) * VMM_MAX_CPU_COUNT];
+# elif defined(VBOX_WITH_REM) && !defined(VBOX_WITH_RAW_MODE)
+    uint8_t         abAlignment2[1430 - sizeof(PVMCPUR3) * VMM_MAX_CPU_COUNT];
+# elif !defined(VBOX_WITH_REM) && defined(VBOX_WITH_RAW_MODE)
+    uint8_t         abAlignment2[3926 - sizeof(PVMCPUR3) * VMM_MAX_CPU_COUNT];
+# else
+    uint8_t         abAlignment2[1686 - sizeof(PVMCPUR3) * VMM_MAX_CPU_COUNT];
+# endif
+#else
     /** Padding for aligning the cpu array on a page boundary. */
-#if defined(VBOX_WITH_REM) && defined(VBOX_WITH_RAW_MODE)
+# if defined(VBOX_WITH_REM) && defined(VBOX_WITH_RAW_MODE)
     uint8_t         abAlignment2[3670];
-#elif defined(VBOX_WITH_REM) && !defined(VBOX_WITH_RAW_MODE)
+# elif defined(VBOX_WITH_REM) && !defined(VBOX_WITH_RAW_MODE)
     uint8_t         abAlignment2[1430];
-#elif !defined(VBOX_WITH_REM) && defined(VBOX_WITH_RAW_MODE)
+# elif !defined(VBOX_WITH_REM) && defined(VBOX_WITH_RAW_MODE)
     uint8_t         abAlignment2[3926];
-#else
+# else
     uint8_t         abAlignment2[1686];
+# endif
 #endif
 
     /* ---- end small stuff ---- */
 
+#ifdef VBOX_BUGREF_9217
+    /** Array of VMCPU pointers. */
+    PVMCPUR3        apCpus[VMM_MAX_CPU_COUNT];
+#else
     /** VMCPU array for the configured number of virtual CPUs.
      * Must be aligned on a page boundary for TLB hit reasons as well as
      * alignment of VMCPU members. */
     VMCPU           aCpus[1];
+#endif
 } VM;
 
Index: /trunk/src/VBox/HostDrivers/Support/testcase/tstInt.cpp
===================================================================
--- /trunk/src/VBox/HostDrivers/Support/testcase/tstInt.cpp	(revision 78430)
+++ /trunk/src/VBox/HostDrivers/Support/testcase/tstInt.cpp	(revision 78431)
@@ -98,5 +98,7 @@
                 AssertRelease(pVM->pSession == pSession);
                 AssertRelease(pVM->cCpus == 1);
+#ifdef VBOX_WITH_RAW_MODE
                 AssertRelease(pVM->offVMCPU == RT_UOFFSETOF(VM, aCpus));
+#endif
                 pVM->enmVMState = VMSTATE_CREATED;
                 PVMR0 const pVMR0 = pVM->pVMR0;
Index: /trunk/src/VBox/VMM/Config.kmk
===================================================================
--- /trunk/src/VBox/VMM/Config.kmk	(revision 78430)
+++ /trunk/src/VBox/VMM/Config.kmk	(revision 78431)
@@ -82,4 +82,7 @@
  VMM_COMMON_DEFS += VBOX_WITH_MORE_RING0_MEM_MAPPINGS
 endif
+ifdef VBOX_BUGREF_9217
+ VMM_COMMON_DEFS += VBOX_BUGREF_9217
+endif
 
 # VMM_COMMON_DEFS += VBOX_WITH_NS_ACCOUNTING_STATS
Index: /trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp	(revision 78430)
+++ /trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp	(revision 78431)
@@ -24,4 +24,5 @@
 #include "CPUMInternal.h"
 #include <VBox/vmm/vm.h>
+#include <VBox/vmm/gvm.h>
 #include <VBox/err.h>
 #include <VBox/log.h>
@@ -334,6 +335,12 @@
     if (u32DR7 & X86_DR7_ENABLED_MASK)
     {
+#ifdef VBOX_BUGREF_9217
+        PGVM pGVM = (PGVM)pVM;
+        for (VMCPUID i = 0; i < pGVM->cCpusSafe; i++)
+            pGVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HOST;
+#else
         for (VMCPUID i = 0; i < pVM->cCpus; i++)
             pVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HOST;
+#endif
         Log(("CPUMR0Init: host uses debug registers (dr7=%x)\n", u32DR7));
     }
Index: /trunk/src/VBox/VMM/VMMR0/EMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/EMR0.cpp	(revision 78430)
+++ /trunk/src/VBox/VMM/VMMR0/EMR0.cpp	(revision 78431)
@@ -39,5 +39,10 @@
  * @param   pVM             The cross context VM structure.
  */
+#ifdef VBOX_BUGREF_9217
+VMMR0_INT_DECL(int) EMR0InitVM(PGVM pGVM)
+# define pVM pGVM /* temp hack */
+#else
 VMMR0_INT_DECL(int) EMR0InitVM(PGVM pGVM, PVM pVM)
+#endif
 {
     /*
@@ -50,5 +55,9 @@
                                   && pVM->aCpus[0].em.s.fExitOptimizationEnabledR0PreemptDisabled
                                   && RTThreadPreemptIsPendingTrusty();
+#ifdef VBOX_BUGREF_9217
+    for (VMCPUID i = 0; i < pGVM->cCpusSafe; i++)
+#else
     for (VMCPUID i = 0; i < pGVM->cCpus; i++)
+#endif
     {
         pVM->aCpus[i].em.s.fExitOptimizationEnabledR0                = fEnabledR0;
Index: /trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp	(revision 78430)
+++ /trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp	(revision 78431)
@@ -360,5 +360,9 @@
 *   Internal Functions                                                                                                           *
 *********************************************************************************************************************************/
+#ifdef VBOX_BUGREF_9217
+static void gvmmR0InitPerVMData(PGVM pGVM, int16_t hSelf, VMCPUID cCpus, PSUPDRVSESSION pSession);
+#else
 static void gvmmR0InitPerVMData(PGVM pGVM);
+#endif
 static DECLCALLBACK(void) gvmmR0HandleObjDestructor(void *pvObj, void *pvGVMM, void *pvHandle);
 static int gvmmR0ByGVMandVM(PGVM pGVM, PVM pVM, PGVMM *ppGVMM, bool fTakeUsedLock);
@@ -888,4 +892,144 @@
                 if (RT_SUCCESS(rc))
                 {
+#ifdef VBOX_BUGREF_9217
+                    /*
+                     * Allocate memory for the VM structure (combined VM + GVM).
+                     */
+                    const uint32_t  cbVM      = RT_UOFFSETOF_DYN(GVM, aCpus[cCpus]);
+                    const uint32_t  cPages    = RT_ALIGN_32(cbVM, PAGE_SIZE) >> PAGE_SHIFT;
+                    RTR0MEMOBJ      hVMMemObj = NIL_RTR0MEMOBJ;
+# if defined(VBOX_WITH_RAW_MODE) || HC_ARCH_BITS == 32
+                    rc = RTR0MemObjAllocLow(&hVMMemObj, cPages << PAGE_SHIFT, false /* fExecutable */);
+# else
+                    rc = RTR0MemObjAllocPage(&hVMMemObj, cPages << PAGE_SHIFT, false /* fExecutable */);
+# endif
+                    if (RT_SUCCESS(rc))
+                    {
+                        PGVM pGVM = (PGVM)RTR0MemObjAddress(hVMMemObj);
+                        AssertPtr(pGVM);
+
+                        /*
+                         * Initialise the structure.
+                         */
+                        RT_BZERO(pGVM, cPages << PAGE_SHIFT);
+                        gvmmR0InitPerVMData(pGVM, iHandle, cCpus, pSession);
+                        GMMR0InitPerVMData(pGVM);
+                        pGVM->gvmm.s.VMMemObj  = hVMMemObj;
+
+                        /*
+                         * Allocate page array.
+                         * This currently have to be made available to ring-3, but this is should change eventually.
+                         */
+                        rc = RTR0MemObjAllocPage(&pGVM->gvmm.s.VMPagesMemObj, cPages * sizeof(SUPPAGE), false /* fExecutable */);
+                        if (RT_SUCCESS(rc))
+                        {
+                            PSUPPAGE paPages = (PSUPPAGE)RTR0MemObjAddress(pGVM->gvmm.s.VMPagesMemObj); AssertPtr(paPages);
+                            for (uint32_t iPage = 0; iPage < cPages; iPage++)
+                            {
+                                paPages[iPage].uReserved = 0;
+                                paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pGVM->gvmm.s.VMMemObj, iPage);
+                                Assert(paPages[iPage].Phys != NIL_RTHCPHYS);
+                            }
+
+                            /*
+                             * Map the page array, VM and VMCPU structures into ring-3.
+                             */
+                            AssertCompileSizeAlignment(VM, PAGE_SIZE);
+                            rc = RTR0MemObjMapUserEx(&pGVM->gvmm.s.VMMapObj, pGVM->gvmm.s.VMMemObj, (RTR3PTR)-1, 0,
+                                                     RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS,
+                                                     0 /*offSub*/, sizeof(VM));
+                            for (VMCPUID i = 0; i < cCpus && RT_SUCCESS(rc); i++)
+                            {
+                                AssertCompileSizeAlignment(VMCPU, PAGE_SIZE);
+                                rc = RTR0MemObjMapUserEx(&pGVM->aCpus[i].gvmm.s.VMCpuMapObj, pGVM->gvmm.s.VMMemObj,
+                                                         (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS,
+                                                         RT_UOFFSETOF_DYN(GVM, aCpus[i]), sizeof(VMCPU));
+                            }
+                            if (RT_SUCCESS(rc))
+                                rc = RTR0MemObjMapUser(&pGVM->gvmm.s.VMPagesMapObj, pGVM->gvmm.s.VMPagesMemObj, (RTR3PTR)-1,
+                                                       0 /* uAlignment */, RTMEM_PROT_READ | RTMEM_PROT_WRITE,
+                                                       NIL_RTR0PROCESS);
+                            if (RT_SUCCESS(rc))
+                            {
+                                /*
+                                 * Initialize all the VM pointer.
+                                 */
+                                PVMR3 pVMR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMMapObj);
+                                AssertPtr((void *)pVMR3);
+
+                                for (VMCPUID i = 0; i < cCpus; i++)
+                                {
+                                    pGVM->aCpus[i].pVMR0 = pGVM;
+                                    pGVM->aCpus[i].pVMR3 = pVMR3;
+                                    pGVM->apCpus[i] = RTR0MemObjAddressR3(pGVM->aCpus[i].gvmm.s.VMCpuMapObj);
+                                    AssertPtr((void *)pGVM->apCpus[i]);
+                                }
+
+                                pGVM->paVMPagesR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMPagesMapObj);
+                                AssertPtr((void *)pGVM->paVMPagesR3);
+
+                                /*
+                                 * Complete the handle - take the UsedLock sem just to be careful.
+                                 */
+                                rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
+                                AssertRC(rc);
+
+                                pHandle->pVM                    = pGVM;
+                                pHandle->pGVM                   = pGVM;
+                                pHandle->hEMT0                  = hEMT0;
+                                pHandle->ProcId                 = ProcId;
+                                pGVM->pVMR3                     = pVMR3;
+                                pGVM->aCpus[0].hEMT             = hEMT0;
+                                pGVM->aCpus[0].hNativeThreadR0  = hEMT0;
+                                pGVMM->cEMTs += cCpus;
+
+                                /* Associate it with the session and create the context hook for EMT0. */
+                                rc = SUPR0SetSessionVM(pSession, pGVM, pGVM);
+                                if (RT_SUCCESS(rc))
+                                {
+                                    rc = VMMR0ThreadCtxHookCreateForEmt(&pGVM->aCpus[0]);
+                                    if (RT_SUCCESS(rc))
+                                    {
+                                        /*
+                                         * Done!
+                                         */
+                                        VBOXVMM_R0_GVMM_VM_CREATED(pGVM, pGVM, ProcId, (void *)hEMT0, cCpus);
+
+                                        GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
+                                        gvmmR0CreateDestroyUnlock(pGVMM);
+
+                                        CPUMR0RegisterVCpuThread(&pGVM->aCpus[0]);
+
+                                        *ppVM = pGVM;
+                                        Log(("GVMMR0CreateVM: pVMR3=%p pGVM=%p hGVM=%d\n", pVMR3, pGVM, iHandle));
+                                        return VINF_SUCCESS;
+                                    }
+
+                                    SUPR0SetSessionVM(pSession, NULL, NULL);
+                                }
+                                GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
+                            }
+
+                            /* Cleanup mappings. */
+                            if (pGVM->gvmm.s.VMMapObj != NIL_RTR0MEMOBJ)
+                            {
+                                RTR0MemObjFree(pGVM->gvmm.s.VMMapObj, false /* fFreeMappings */);
+                                pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
+                            }
+                            for (VMCPUID i = 0; i < cCpus; i++)
+                                if (pGVM->aCpus[i].gvmm.s.VMCpuMapObj != NIL_RTR0MEMOBJ)
+                                {
+                                    RTR0MemObjFree(pGVM->aCpus[i].gvmm.s.VMCpuMapObj, false /* fFreeMappings */);
+                                    pGVM->aCpus[i].gvmm.s.VMCpuMapObj = NIL_RTR0MEMOBJ;
+                                }
+                            if (pGVM->gvmm.s.VMPagesMapObj != NIL_RTR0MEMOBJ)
+                            {
+                                RTR0MemObjFree(pGVM->gvmm.s.VMPagesMapObj, false /* fFreeMappings */);
+                                pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
+                            }
+                        }
+                    }
+
+#else
                     /*
                      * Allocate the global VM structure (GVM) and initialize it.
@@ -920,5 +1064,7 @@
                             pVM->cCpus            = cCpus;
                             pVM->uCpuExecutionCap = 100; /* default is no cap. */
+# ifdef VBOX_WITH_RAW_MODE
                             pVM->offVMCPU         = RT_UOFFSETOF_DYN(VM, aCpus);
+# endif
                             AssertCompileMemberAlignment(VM, cpum, 64);
                             AssertCompileMemberAlignment(VM, tm, 64);
@@ -1022,4 +1168,5 @@
                         }
                     }
+#endif
                 }
                 /* else: The user wasn't permitted to create this VM. */
@@ -1035,5 +1182,5 @@
                 SUPR0ObjRelease(pvObj, pSession);
 
-                SUPR0Printf("GVMMR0CreateVM: failed, rc=%d\n", rc);
+                SUPR0Printf("GVMMR0CreateVM: failed, rc=%Rrc\n", rc);
                 return rc;
             }
@@ -1052,4 +1199,5 @@
 
 
+#ifdef VBOX_BUGREF_9217
 /**
  * Initializes the per VM data belonging to GVMM.
@@ -1057,23 +1205,75 @@
  * @param   pGVM        Pointer to the global VM structure.
  */
+static void gvmmR0InitPerVMData(PGVM pGVM, int16_t hSelf, VMCPUID cCpus, PSUPDRVSESSION pSession)
+#else
+/**
+ * Initializes the per VM data belonging to GVMM.
+ *
+ * @param   pGVM        Pointer to the global VM structure.
+ */
 static void gvmmR0InitPerVMData(PGVM pGVM)
+#endif
 {
     AssertCompile(RT_SIZEOFMEMB(GVM,gvmm.s) <= RT_SIZEOFMEMB(GVM,gvmm.padding));
     AssertCompile(RT_SIZEOFMEMB(GVMCPU,gvmm.s) <= RT_SIZEOFMEMB(GVMCPU,gvmm.padding));
-    pGVM->gvmm.s.VMMemObj = NIL_RTR0MEMOBJ;
-    pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
-    pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ;
-    pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
+#ifdef VBOX_BUGREF_9217
+    AssertCompileMemberAlignment(VM, cpum, 64);
+    AssertCompileMemberAlignment(VM, tm, 64);
+
+    /* GVM: */
+    pGVM->u32Magic         = GVM_MAGIC;
+    pGVM->hSelfSafe        = hSelf;
+    pGVM->cCpusSafe        = cCpus;
+    pGVM->pSessionSafe     = pSession;
+
+    /* VM: */
+    pGVM->enmVMState       = VMSTATE_CREATING;
+    pGVM->pVMR0            = pGVM;
+    pGVM->pSession         = pSession;
+    pGVM->hSelf            = hSelf;
+    pGVM->cCpus            = cCpus;
+    pGVM->uCpuExecutionCap = 100; /* default is no cap. */
+    pGVM->uStructVersion   = 1;
+    pGVM->cbSelf           = sizeof(VM);
+    pGVM->cbVCpu           = sizeof(VMCPU);
+# ifdef VBOX_WITH_RAW_MODE
+    pGVM->offVMCPU         = RT_UOFFSETOF_DYN(GVM, aCpus); /** @todo set this when mapping the VM structure into raw-mode context */
+# endif
+#endif
+
+    /* GVMM: */
+    pGVM->gvmm.s.VMMemObj       = NIL_RTR0MEMOBJ;
+    pGVM->gvmm.s.VMMapObj       = NIL_RTR0MEMOBJ;
+    pGVM->gvmm.s.VMPagesMemObj  = NIL_RTR0MEMOBJ;
+    pGVM->gvmm.s.VMPagesMapObj  = NIL_RTR0MEMOBJ;
     pGVM->gvmm.s.fDoneVMMR0Init = false;
     pGVM->gvmm.s.fDoneVMMR0Term = false;
 
+    /*
+     * Per virtual CPU.
+     */
     for (VMCPUID i = 0; i < pGVM->cCpus; i++)
     {
         pGVM->aCpus[i].idCpu                 = i;
+#ifdef VBOX_BUGREF_9217
+        pGVM->aCpus[i].idCpuSafe             = i;
+#endif
         pGVM->aCpus[i].gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
+#ifdef VBOX_BUGREF_9217
+        pGVM->aCpus[i].gvmm.s.VMCpuMapObj    = NIL_RTR0MEMOBJ;
+#endif
         pGVM->aCpus[i].hEMT                  = NIL_RTNATIVETHREAD;
         pGVM->aCpus[i].pGVM                  = pGVM;
+#ifndef VBOX_BUGREF_9217
         pGVM->aCpus[i].pVCpu                 = NULL;
         pGVM->aCpus[i].pVM                   = NULL;
+#endif
+#ifdef VBOX_BUGREF_9217
+        pGVM->aCpus[i].idHostCpu             = NIL_RTCPUID;
+        pGVM->aCpus[i].iHostCpuSet           = UINT32_MAX;
+        pGVM->aCpus[i].hNativeThread         = NIL_RTNATIVETHREAD;
+        pGVM->aCpus[i].hNativeThreadR0       = NIL_RTNATIVETHREAD;
+        pGVM->aCpus[i].enmState              = VMCPUSTATE_STOPPED;
+#endif
     }
 }
@@ -1172,5 +1372,9 @@
     AssertPtrReturn(pVM, VERR_INVALID_POINTER);
     AssertReturn(!((uintptr_t)pVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
+#ifdef VBOX_BUGREF_9217
+    AssertReturn(pGVM == pVM, VERR_INVALID_POINTER);
+#else
     AssertReturn(pGVM->pVM == pVM, VERR_INVALID_POINTER);
+#endif
     AssertMsgReturn(pVM->enmVMState >= VMSTATE_CREATING && pVM->enmVMState <= VMSTATE_TERMINATED, ("%d\n", pVM->enmVMState),
                     VERR_WRONG_ORDER);
@@ -1250,11 +1454,24 @@
     {
         if (    pGVM->gvmm.s.VMMemObj != NIL_RTR0MEMOBJ
-            &&  RTR0MemObjAddress(pGVM->gvmm.s.VMMemObj) == pGVM->pVM)
+#ifdef VBOX_BUGREF_9217
+            &&  RTR0MemObjAddress(pGVM->gvmm.s.VMMemObj) == pGVM
+#else
+            &&  RTR0MemObjAddress(pGVM->gvmm.s.VMMemObj) == pGVM->pVM
+#endif
+           )
         {
             LogFlow(("gvmmR0CleanupVM: Calling VMMR0TermVM\n"));
+#ifdef VBOX_BUGREF_9217
+            VMMR0TermVM(pGVM, pGVM, NIL_VMCPUID);
+#else
             VMMR0TermVM(pGVM, pGVM->pVM, NIL_VMCPUID);
+#endif
         }
         else
+#ifdef VBOX_BUGREF_9217
+            AssertMsgFailed(("gvmmR0CleanupVM: VMMemObj=%p pGVM=%p\n", pGVM->gvmm.s.VMMemObj, pGVM));
+#else
             AssertMsgFailed(("gvmmR0CleanupVM: VMMemObj=%p pVM=%p\n", pGVM->gvmm.s.VMMemObj, pGVM->pVM));
+#endif
     }
 
@@ -1265,5 +1482,9 @@
 
     AssertCompile(NIL_RTTHREADCTXHOOK == (RTTHREADCTXHOOK)0); /* Depends on zero initialized memory working for NIL at the moment. */
+#ifdef VBOX_BUGREF_9217
+    for (VMCPUID idCpu = 0; idCpu < pGVM->cCpusSafe; idCpu++)
+#else
     for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
+#endif
     {
         /** @todo Can we busy wait here for all thread-context hooks to be
@@ -1271,5 +1492,9 @@
          *        solution for not deregistering hooks everytime we're leaving HMR0
          *        context. */
+#ifdef VBOX_BUGREF_9217
+        VMMR0ThreadCtxHookDestroyForEmt(&pGVM->aCpus[idCpu]);
+#else
         VMMR0ThreadCtxHookDestroyForEmt(&pGVM->pVM->aCpus[idCpu]);
+#endif
     }
 }
@@ -1397,4 +1622,5 @@
         }
 
+#ifndef VBOX_BUGREF_9217
         if (pGVM->gvmm.s.VMMemObj != NIL_RTR0MEMOBJ)
         {
@@ -1402,4 +1628,5 @@
             pGVM->gvmm.s.VMMemObj = NIL_RTR0MEMOBJ;
         }
+#endif
 
         for (VMCPUID i = 0; i < pGVM->cCpus; i++)
@@ -1410,9 +1637,22 @@
                 pGVM->aCpus[i].gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
             }
+#ifdef VBOX_BUGREF_9217
+            if (pGVM->aCpus[i].gvmm.s.VMCpuMapObj != NIL_RTR0MEMOBJ)
+            {
+                rc = RTR0MemObjFree(pGVM->aCpus[i].gvmm.s.VMCpuMapObj, false /* fFreeMappings */); AssertRC(rc);
+                pGVM->aCpus[i].gvmm.s.VMCpuMapObj = NIL_RTR0MEMOBJ;
+            }
+#endif
         }
 
         /* the GVM structure itself. */
         pGVM->u32Magic |= UINT32_C(0x80000000);
+#ifdef VBOX_BUGREF_9217
+        Assert(pGVM->gvmm.s.VMMemObj != NIL_RTR0MEMOBJ);
+        rc = RTR0MemObjFree(pGVM->gvmm.s.VMMemObj, true /*fFreeMappings*/); AssertRC(rc);
+#else
         RTMemFree(pGVM);
+#endif
+        pGVM = NULL;
 
         /* Re-acquire the UsedLock before freeing the handle since we're updating handle fields. */
@@ -1466,5 +1706,9 @@
             if (pGVM->aCpus[idCpu].hEMT == NIL_RTNATIVETHREAD)
             {
+#ifdef VBOX_BUGREF_9217
+                Assert(pGVM->aCpus[idCpu].hNativeThreadR0 == NIL_RTNATIVETHREAD);
+#else
                 Assert(pVM->aCpus[idCpu].hNativeThreadR0 == NIL_RTNATIVETHREAD);
+#endif
 
                 /* A thread may only be one EMT. */
@@ -1477,4 +1721,13 @@
                      * Do the assignment, then try setup the hook. Undo if that fails.
                      */
+#ifdef VBOX_BUGREF_9217
+                    pGVM->aCpus[idCpu].hNativeThreadR0 = pGVM->aCpus[idCpu].hEMT = RTThreadNativeSelf();
+
+                    rc = VMMR0ThreadCtxHookCreateForEmt(&pGVM->aCpus[idCpu]);
+                    if (RT_SUCCESS(rc))
+                        CPUMR0RegisterVCpuThread(&pGVM->aCpus[idCpu]);
+                    else
+                        pGVM->aCpus[idCpu].hNativeThreadR0 = pGVM->aCpus[idCpu].hEMT = NIL_RTNATIVETHREAD;
+#else
                     pVM->aCpus[idCpu].hNativeThreadR0 = pGVM->aCpus[idCpu].hEMT = RTThreadNativeSelf();
 
@@ -1484,4 +1737,5 @@
                     else
                         pVM->aCpus[idCpu].hNativeThreadR0 = pGVM->aCpus[idCpu].hEMT = NIL_RTNATIVETHREAD;
+#endif
                 }
             }
@@ -1531,5 +1785,9 @@
              * Do per-EMT cleanups.
              */
+#ifdef VBOX_BUGREF_9217
+            VMMR0ThreadCtxHookDestroyForEmt(&pGVM->aCpus[idCpu]);
+#else
             VMMR0ThreadCtxHookDestroyForEmt(&pVM->aCpus[idCpu]);
+#endif
 
             /*
@@ -1539,5 +1797,9 @@
             AssertCompile(~(RTNATIVETHREAD)1 != NIL_RTNATIVETHREAD);
             pGVM->aCpus[idCpu].hEMT           = ~(RTNATIVETHREAD)1;
+#ifdef VBOX_BUGREF_9217
+            pGVM->aCpus[idCpu].hNativeThreadR0 = NIL_RTNATIVETHREAD;
+#else
             pVM->aCpus[idCpu].hNativeThreadR0 = NIL_RTNATIVETHREAD;
+#endif
         }
 
@@ -1573,5 +1835,9 @@
     PGVM pGVM = pHandle->pGVM;
     AssertPtrReturn(pGVM, NULL);
+#ifdef VBOX_BUGREF_9217
+    AssertReturn(pGVM == pHandle->pVM, NULL);
+#else
     AssertReturn(pGVM->pVM == pHandle->pVM, NULL);
+#endif
 
     return pHandle->pGVM;
@@ -1630,4 +1896,11 @@
 
         pGVM = pHandle->pGVM;
+#ifdef VBOX_BUGREF_9217
+        if (RT_UNLIKELY(    pHandle->pVM != pVM
+                        ||  pHandle->ProcId != ProcId
+                        ||  !VALID_PTR(pHandle->pvObj)
+                        ||  !VALID_PTR(pGVM)
+                        ||  pGVM != pVM))
+#else
         if (RT_UNLIKELY(    pHandle->pVM != pVM
                         ||  pHandle->ProcId != ProcId
@@ -1635,4 +1908,5 @@
                         ||  !VALID_PTR(pGVM)
                         ||  pGVM->pVM != pVM))
+#endif
         {
             GVMMR0_USED_SHARED_UNLOCK(pGVMM);
@@ -1652,5 +1926,9 @@
         if (RT_UNLIKELY(!VALID_PTR(pGVM)))
             return VERR_INVALID_HANDLE;
+#ifdef VBOX_BUGREF_9217
+        if (RT_UNLIKELY(pGVM != pVM))
+#else
         if (RT_UNLIKELY(pGVM->pVM != pVM))
+#endif
             return VERR_INVALID_HANDLE;
     }
@@ -1697,5 +1975,9 @@
     PGVM pGVM = pHandle->pGVM;
     AssertPtrReturn(pGVM, NULL);
+#ifdef VBOX_BUGREF_9217
+    AssertReturn(pGVM == pVM, NULL);
+#else
     AssertReturn(pGVM->pVM == pVM, NULL);
+#endif
 
     return pGVM;
@@ -1732,5 +2014,9 @@
                       && ((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0))
         {
+#ifdef VBOX_BUGREF_9217
+            if (RT_LIKELY(pGVM == pVM))
+#else
             if (RT_LIKELY(pGVM->pVM == pVM))
+#endif
             {
                 /*
@@ -1810,5 +2096,9 @@
     AssertPtrReturn(pVM,  VERR_INVALID_POINTER);
     AssertReturn(((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0, VERR_INVALID_POINTER);
+#ifdef VBOX_BUGREF_9217
+    AssertReturn(pGVM == pVM, VERR_INVALID_VM_HANDLE);
+#else
     AssertReturn(pGVM->pVM == pVM, VERR_INVALID_VM_HANDLE);
+#endif
 
 
@@ -2489,5 +2779,9 @@
     {
         if (idCpu < pGVM->cCpus)
+#ifdef VBOX_BUGREF_9217
+            rc = gvmmR0SchedPokeOne(pGVM, &pGVM->aCpus[idCpu]);
+#else
             rc = gvmmR0SchedPokeOne(pGVM, &pVM->aCpus[idCpu]);
+#endif
         else
             rc = VERR_INVALID_CPU_ID;
@@ -2543,5 +2837,9 @@
     {
         if (idCpu < pGVM->cCpus)
+#ifdef VBOX_BUGREF_9217
+            rc = gvmmR0SchedPokeOne(pGVM, &pGVM->aCpus[idCpu]);
+#else
             rc = gvmmR0SchedPokeOne(pGVM, &pVM->aCpus[idCpu]);
+#endif
         else
             rc = VERR_INVALID_CPU_ID;
@@ -2593,5 +2891,9 @@
             else if (VMCPUSET_IS_PRESENT(pPokeSet, idCpu))
             {
+#ifdef VBOX_BUGREF_9217
+                gvmmR0SchedPokeOne(pGVM, &pGVM->aCpus[idCpu]);
+#else
                 gvmmR0SchedPokeOne(pGVM, &pVM->aCpus[idCpu]);
+#endif
                 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
             }
Index: /trunk/src/VBox/VMM/VMMR0/GVMMR0Internal.h
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/GVMMR0Internal.h	(revision 78430)
+++ /trunk/src/VBox/VMM/VMMR0/GVMMR0Internal.h	(revision 78431)
@@ -34,4 +34,8 @@
     /** The event semaphore the EMT thread is blocking on. */
     RTSEMEVENTMULTI     HaltEventMulti;
+#ifdef VBOX_BUGREF_9217
+    /** The ring-3 mapping of the VMCPU structure. */
+    RTR0MEMOBJ          VMCpuMapObj;
+#endif
     /** The APIC ID of the CPU that EMT was scheduled on the last time we checked. */
     uint8_t             iCpuEmt;
Index: /trunk/src/VBox/VMM/VMMR0/PDMR0Device.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/PDMR0Device.cpp	(revision 78430)
+++ /trunk/src/VBox/VMM/VMMR0/PDMR0Device.cpp	(revision 78431)
@@ -27,4 +27,5 @@
 #include <VBox/vmm/mm.h>
 #include <VBox/vmm/vm.h>
+#include <VBox/vmm/gvm.h>
 #include <VBox/vmm/vmm.h>
 #include <VBox/vmm/patm.h>
@@ -458,6 +459,11 @@
 {
     PDMDEV_ASSERT_DEVINS(pDevIns);
+#ifdef VBOX_BUGREF_9217
+    PGVM   pGVM  = (PGVM)pDevIns->Internal.s.pVMR0;
+    PVMCPU pVCpu = &pGVM->aCpus[0];     /* for PIC we always deliver to CPU 0, MP use APIC */
+#else
     PVM    pVM   = pDevIns->Internal.s.pVMR0;
     PVMCPU pVCpu = &pVM->aCpus[0];      /* for PIC we always deliver to CPU 0, MP use APIC */
+#endif
     /** @todo r=ramshankar: Propagating rcRZ and make all callers handle it? */
     APICLocalInterrupt(pVCpu, 0 /* u8Pin */, 1 /* u8Level */, VINF_SUCCESS /* rcRZ */);
@@ -469,6 +475,11 @@
 {
     PDMDEV_ASSERT_DEVINS(pDevIns);
+#ifdef VBOX_BUGREF_9217
+    PGVM   pGVM  = (PGVM)pDevIns->Internal.s.pVMR0;
+    PVMCPU pVCpu = &pGVM->aCpus[0];     /* for PIC we always deliver to CPU 0, MP use APIC */
+#else
     PVM    pVM   = pDevIns->Internal.s.pVMR0;
     PVMCPU pVCpu = &pVM->aCpus[0];      /* for PIC we always deliver to CPU 0, MP use APIC */
+#endif
     /** @todo r=ramshankar: Propagating rcRZ and make all callers handle it? */
     APICLocalInterrupt(pVCpu, 0 /* u8Pin */, 0 /* u8Level */, VINF_SUCCESS /* rcRZ */);
Index: /trunk/src/VBox/VMM/VMMR0/PGMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/PGMR0.cpp	(revision 78430)
+++ /trunk/src/VBox/VMM/VMMR0/PGMR0.cpp	(revision 78431)
@@ -24,7 +24,7 @@
 #include <VBox/vmm/pgm.h>
 #include <VBox/vmm/gmm.h>
-#include <VBox/vmm/gvm.h>
 #include "PGMInternal.h"
 #include <VBox/vmm/vm.h>
+#include <VBox/vmm/gvm.h>
 #include "PGMInline.h"
 #include <VBox/log.h>
@@ -78,5 +78,9 @@
     AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */
     AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER);
+#ifdef VBOX_BUGREF_9217
+    PGM_LOCK_ASSERT_OWNER_EX(pVM, &pGVM->aCpus[idCpu]);
+#else
     PGM_LOCK_ASSERT_OWNER_EX(pVM, &pVM->aCpus[idCpu]);
+#endif
 
     /*
@@ -201,5 +205,9 @@
     AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */
     AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER);
+#ifdef VBOX_BUGREF_9217
+    PGM_LOCK_ASSERT_OWNER_EX(pVM, &pGVM->aCpus[idCpu]);
+#else
     PGM_LOCK_ASSERT_OWNER_EX(pVM, &pVM->aCpus[idCpu]);
+#endif
 
     /*
@@ -241,5 +249,9 @@
     AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */
     AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER);
+#ifdef VBOX_BUGREF_9217
+    PGM_LOCK_ASSERT_OWNER_EX(pVM, &pGVM->aCpus[idCpu]);
+#else
     PGM_LOCK_ASSERT_OWNER_EX(pVM, &pVM->aCpus[idCpu]);
+#endif
     Assert(!pVM->pgm.s.cLargeHandyPages);
 
Index: /trunk/src/VBox/VMM/VMMR0/PGMR0SharedPage.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/PGMR0SharedPage.cpp	(revision 78430)
+++ /trunk/src/VBox/VMM/VMMR0/PGMR0SharedPage.cpp	(revision 78431)
@@ -25,4 +25,5 @@
 #include "PGMInternal.h"
 #include <VBox/vmm/vm.h>
+#include <VBox/vmm/gvm.h>
 #include "PGMInline.h"
 #include <VBox/log.h>
@@ -50,5 +51,9 @@
 VMMR0DECL(int) PGMR0SharedModuleCheck(PVM pVM, PGVM pGVM, VMCPUID idCpu, PGMMSHAREDMODULE pModule, PCRTGCPTR64 paRegionsGCPtrs)
 {
+#ifdef VBOX_BUGREF_9217
+    PVMCPU              pVCpu         = &pGVM->aCpus[idCpu];
+#else
     PVMCPU              pVCpu         = &pVM->aCpus[idCpu];
+#endif
     int                 rc            = VINF_SUCCESS;
     bool                fFlushTLBs    = false;
@@ -162,6 +167,11 @@
 
     if (fFlushRemTLBs)
+#ifdef VBOX_BUGREF_9217
+        for (VMCPUID idCurCpu = 0; idCurCpu < pGVM->cCpus; idCurCpu++)
+            CPUMSetChangedFlags(&pGVM->aCpus[idCurCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
+#else
         for (VMCPUID idCurCpu = 0; idCurCpu < pVM->cCpus; idCurCpu++)
             CPUMSetChangedFlags(&pVM->aCpus[idCurCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
+#endif
 
     return rc;
Index: /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp	(revision 78430)
+++ /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp	(revision 78431)
@@ -395,5 +395,9 @@
      * Register the EMT R0 logger instance for VCPU 0.
      */
+#ifdef VBOX_BUGREF_9217
+    PVMCPU pVCpu = &pGVM->aCpus[0];
+#else
     PVMCPU pVCpu = &pVM->aCpus[0];
+#endif
 
     PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
@@ -471,5 +475,9 @@
                 {
                     VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
+#ifdef VBOX_BUGREF_9217
+                    rc = EMR0InitVM(pGVM);
+#else
                     rc = EMR0InitVM(pGVM, pVM);
+#endif
                     if (RT_SUCCESS(rc))
                     {
@@ -536,5 +544,9 @@
      * Registration of ring 0 loggers.
      */
+#ifdef VBOX_BUGREF_9217
+    PVMCPU       pVCpu     = &pGVM->aCpus[idCpu];
+#else
     PVMCPU       pVCpu     = &pVM->aCpus[idCpu];
+#endif
     PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
     if (   pR0Logger
@@ -717,5 +729,9 @@
 static int vmmR0DoHalt(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, PVMCPU pVCpu)
 {
+#ifdef VBOX_BUGREF_9217
+    Assert(pVCpu == pGVCpu);
+#else
     Assert(pVCpu == pGVCpu->pVCpu);
+#endif
 
     /*
@@ -1300,5 +1316,9 @@
 
     PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
+#ifdef VBOX_BUGREF_9217
+    PVMCPU  pVCpu  = pGVCpu;
+#else
     PVMCPU  pVCpu  = &pVM->aCpus[idCpu];
+#endif
     RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
     if (RT_LIKELY(   pGVCpu->hEMT           == hNativeThread
@@ -1754,9 +1774,17 @@
         }
 
+#ifdef VBOX_BUGREF_9217
+        if (RT_LIKELY(pGVM == pVM))
+#else
         if (RT_LIKELY(pGVM->pVM == pVM))
+#endif
         { /* likely */ }
         else
         {
+#ifdef VBOX_BUGREF_9217
+            SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM/pVM=%p\n", pVM, pGVM);
+#else
             SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM->pVM=%p\n", pVM, pGVM->pVM);
+#endif
             return VERR_INVALID_PARAMETER;
         }
@@ -1942,5 +1970,9 @@
             if (RT_UNLIKELY(pVM->cCpus != 1))
                 return VERR_INVALID_PARAMETER;
+# ifdef VBOX_BUGREF_9217
+            PVMCPU pVCpu = &pGVM->aCpus[idCpu];
+# else
             PVMCPU pVCpu = &pVM->aCpus[idCpu];
+# endif
 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
             if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
@@ -2513,5 +2545,9 @@
             {
                 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
+#ifdef VBOX_BUGREF_9217
+                PVMCPU  pVCpu  = pGVCpu;
+#else
                 PVMCPU  pVCpu  = &pVM->aCpus[idCpu];
+#endif
                 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
                 if (RT_LIKELY(   pGVCpu->hEMT           == hNativeThread
@@ -2696,5 +2732,9 @@
     if (pGVCpu)
     {
+#ifdef VBOX_BUGREF_9217
+        PVMCPU pVCpu = pGVCpu;
+#else
         PVMCPU pVCpu = pGVCpu->pVCpu;
+#endif
         if (RT_VALID_PTR(pVCpu))
         {
@@ -2703,5 +2743,10 @@
             {
                 if (   pVmmLogger->fCreated
-                    && pVmmLogger->pVM == pGVCpu->pVM)
+#ifdef VBOX_BUGREF_9217
+                    && pVmmLogger->pVM == pGVCpu->pGVM
+#else
+                    && pVmmLogger->pVM == pGVCpu->pVM
+#endif
+                   )
                 {
                     if (pVmmLogger->Logger.fFlags & RTLOGFLAGS_DISABLED)
Index: /trunk/src/VBox/VMM/VMMR3/VM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/VM.cpp	(revision 78430)
+++ /trunk/src/VBox/VMM/VMMR3/VM.cpp	(revision 78431)
@@ -593,5 +593,7 @@
         AssertRelease(pVM->cCpus == cCpus);
         AssertRelease(pVM->uCpuExecutionCap == 100);
+#ifdef VBOX_WITH_RAW_MODE
         AssertRelease(pVM->offVMCPU == RT_UOFFSETOF(VM, aCpus));
+#endif
         AssertCompileMemberAlignment(VM, cpum, 64);
         AssertCompileMemberAlignment(VM, tm, 64);
