Index: /trunk/include/VBox/vmm/pgm.h
===================================================================
--- /trunk/include/VBox/vmm/pgm.h	(revision 71080)
+++ /trunk/include/VBox/vmm/pgm.h	(revision 71081)
@@ -710,4 +710,6 @@
  *          Failure status will stop enumeration immediately and return.
  * @param   pVM         The cross context VM structure.
+ * @param   pVCpu       The cross context per virtual CPU structure.  Optional,
+ *                      see PGMPhysNemEnumPagesByState.
  * @param   GCPhys      The guest physical address (not A20 masked).
  * @param   pu2NemState Pointer to variable with the NEM state.  This can be
@@ -715,8 +717,9 @@
  * @param   pvUser      The user argument.
  */
-typedef DECLCALLBACK(int) FNPGMPHYSNEMENUMCALLBACK(PVM pVM, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser);
+typedef DECLCALLBACK(int) FNPGMPHYSNEMENUMCALLBACK(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser);
 /** Pointer to a FNPGMPHYSNEMENUMCALLBACK function. */
 typedef FNPGMPHYSNEMENUMCALLBACK *PFNPGMPHYSNEMENUMCALLBACK;
-VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVM pVM, uint8_t uMinState, PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser);
+VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVM pVM, PVMCPU VCpu, uint8_t uMinState,
+                                             PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser);
 
 
Index: /trunk/include/VBox/vmm/vmm.h
===================================================================
--- /trunk/include/VBox/vmm/vmm.h	(revision 71080)
+++ /trunk/include/VBox/vmm/vmm.h	(revision 71081)
@@ -291,4 +291,208 @@
 
 
+/** @defgroup grp_vmm_api_r0    The VMM Host Context Ring 0 API
+ * @{
+ */
+
+/**
+ * The VMMR0Entry() codes.
+ */
+typedef enum VMMR0OPERATION
+{
+    /** Run guest context. */
+    VMMR0_DO_RAW_RUN = SUP_VMMR0_DO_RAW_RUN,
+    /** Run guest code using the available hardware acceleration technology. */
+    VMMR0_DO_HM_RUN = SUP_VMMR0_DO_HM_RUN,
+    /** Official NOP that we use for profiling. */
+    VMMR0_DO_NOP = SUP_VMMR0_DO_NOP,
+    /** Official slow iocl NOP that we use for profiling. */
+    VMMR0_DO_SLOW_NOP,
+
+    /** Ask the GVMM to create a new VM. */
+    VMMR0_DO_GVMM_CREATE_VM = 32,
+    /** Ask the GVMM to destroy the VM. */
+    VMMR0_DO_GVMM_DESTROY_VM,
+    /** Call GVMMR0RegisterVCpu(). */
+    VMMR0_DO_GVMM_REGISTER_VMCPU,
+    /** Call GVMMR0DeregisterVCpu(). */
+    VMMR0_DO_GVMM_DEREGISTER_VMCPU,
+    /** Call GVMMR0SchedHalt(). */
+    VMMR0_DO_GVMM_SCHED_HALT,
+    /** Call GVMMR0SchedWakeUp(). */
+    VMMR0_DO_GVMM_SCHED_WAKE_UP,
+    /** Call GVMMR0SchedPoke(). */
+    VMMR0_DO_GVMM_SCHED_POKE,
+    /** Call GVMMR0SchedWakeUpAndPokeCpus(). */
+    VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS,
+    /** Call GVMMR0SchedPoll(). */
+    VMMR0_DO_GVMM_SCHED_POLL,
+    /** Call GVMMR0QueryStatistics(). */
+    VMMR0_DO_GVMM_QUERY_STATISTICS,
+    /** Call GVMMR0ResetStatistics(). */
+    VMMR0_DO_GVMM_RESET_STATISTICS,
+
+    /** Call VMMR0 Per VM Init. */
+    VMMR0_DO_VMMR0_INIT = 64,
+    /** Call VMMR0 Per VM Termination. */
+    VMMR0_DO_VMMR0_TERM,
+
+    /** Setup the hardware accelerated raw-mode session. */
+    VMMR0_DO_HM_SETUP_VM = 128,
+    /** Attempt to enable or disable hardware accelerated raw-mode. */
+    VMMR0_DO_HM_ENABLE,
+
+    /** Call PGMR0PhysAllocateHandyPages(). */
+    VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES = 192,
+    /** Call PGMR0PhysFlushHandyPages(). */
+    VMMR0_DO_PGM_FLUSH_HANDY_PAGES,
+    /** Call PGMR0AllocateLargePage(). */
+    VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE,
+    /** Call PGMR0PhysSetupIommu(). */
+    VMMR0_DO_PGM_PHYS_SETUP_IOMMU,
+
+    /** Call GMMR0InitialReservation(). */
+    VMMR0_DO_GMM_INITIAL_RESERVATION = 256,
+    /** Call GMMR0UpdateReservation(). */
+    VMMR0_DO_GMM_UPDATE_RESERVATION,
+    /** Call GMMR0AllocatePages(). */
+    VMMR0_DO_GMM_ALLOCATE_PAGES,
+    /** Call GMMR0FreePages(). */
+    VMMR0_DO_GMM_FREE_PAGES,
+    /** Call GMMR0FreeLargePage(). */
+    VMMR0_DO_GMM_FREE_LARGE_PAGE,
+    /** Call GMMR0QueryHypervisorMemoryStatsReq(). */
+    VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS,
+    /** Call GMMR0QueryMemoryStatsReq(). */
+    VMMR0_DO_GMM_QUERY_MEM_STATS,
+    /** Call GMMR0BalloonedPages(). */
+    VMMR0_DO_GMM_BALLOONED_PAGES,
+    /** Call GMMR0MapUnmapChunk(). */
+    VMMR0_DO_GMM_MAP_UNMAP_CHUNK,
+    /** Call GMMR0SeedChunk(). */
+    VMMR0_DO_GMM_SEED_CHUNK,
+    /** Call GMMR0RegisterSharedModule. */
+    VMMR0_DO_GMM_REGISTER_SHARED_MODULE,
+    /** Call GMMR0UnregisterSharedModule. */
+    VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE,
+    /** Call GMMR0ResetSharedModules. */
+    VMMR0_DO_GMM_RESET_SHARED_MODULES,
+    /** Call GMMR0CheckSharedModules. */
+    VMMR0_DO_GMM_CHECK_SHARED_MODULES,
+    /** Call GMMR0FindDuplicatePage. */
+    VMMR0_DO_GMM_FIND_DUPLICATE_PAGE,
+    /** Call GMMR0QueryStatistics(). */
+    VMMR0_DO_GMM_QUERY_STATISTICS,
+    /** Call GMMR0ResetStatistics(). */
+    VMMR0_DO_GMM_RESET_STATISTICS,
+
+    /** Call PDMR0DriverCallReqHandler. */
+    VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER = 320,
+    /** Call PDMR0DeviceCallReqHandler. */
+    VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER,
+
+    /** Calls function in the hypervisor.
+     * The caller must setup the hypervisor context so the call will be performed.
+     * The difference between VMMR0_DO_RUN_GC and this one is the handling of
+     * the return GC code. The return code will not be interpreted by this operation.
+     */
+    VMMR0_DO_CALL_HYPERVISOR = 384,
+
+    /** Set a GVMM or GMM configuration value. */
+    VMMR0_DO_GCFGM_SET_VALUE = 400,
+    /** Query a GVMM or GMM configuration value. */
+    VMMR0_DO_GCFGM_QUERY_VALUE,
+
+    /** The start of the R0 service operations. */
+    VMMR0_DO_SRV_START = 448,
+    /** Call IntNetR0Open(). */
+    VMMR0_DO_INTNET_OPEN,
+    /** Call IntNetR0IfClose(). */
+    VMMR0_DO_INTNET_IF_CLOSE,
+    /** Call IntNetR0IfGetBufferPtrs(). */
+    VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS,
+    /** Call IntNetR0IfSetPromiscuousMode(). */
+    VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE,
+    /** Call IntNetR0IfSetMacAddress(). */
+    VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS,
+    /** Call IntNetR0IfSetActive(). */
+    VMMR0_DO_INTNET_IF_SET_ACTIVE,
+    /** Call IntNetR0IfSend(). */
+    VMMR0_DO_INTNET_IF_SEND,
+    /** Call IntNetR0IfWait(). */
+    VMMR0_DO_INTNET_IF_WAIT,
+    /** Call IntNetR0IfAbortWait(). */
+    VMMR0_DO_INTNET_IF_ABORT_WAIT,
+
+    /** Forward call to the PCI driver */
+    VMMR0_DO_PCIRAW_REQ = 512,
+
+    /** The end of the R0 service operations. */
+    VMMR0_DO_SRV_END,
+
+    /** Call NEMR0InitVM() (host specific). */
+    VMMR0_DO_NEM_INIT_VM = 576,
+    /** Call NEMR0MapPages() (host specific). */
+    VMMR0_DO_NEM_MAP_PAGES,
+    /** Call NEMR0UnmapPages() (host specific). */
+    VMMR0_DO_NEM_UNMAP_PAGES,
+
+    /** Official call we use for testing Ring-0 APIs. */
+    VMMR0_DO_TESTS = 640,
+    /** Test the 32->64 bits switcher. */
+    VMMR0_DO_TEST_SWITCHER3264,
+
+    /** The usual 32-bit type blow up. */
+    VMMR0_DO_32BIT_HACK = 0x7fffffff
+} VMMR0OPERATION;
+
+
+/**
+ * Request buffer for VMMR0_DO_GCFGM_SET_VALUE and VMMR0_DO_GCFGM_QUERY_VALUE.
+ * @todo Move got GCFGM.h when it's implemented.
+ */
+typedef struct GCFGMVALUEREQ
+{
+    /** The request header.*/
+    SUPVMMR0REQHDR      Hdr;
+    /** The support driver session handle. */
+    PSUPDRVSESSION      pSession;
+    /** The value.
+     * This is input for the set request and output for the query. */
+    uint64_t            u64Value;
+    /** The variable name.
+     * This is fixed sized just to make things simple for the mock-up. */
+    char                szName[48];
+} GCFGMVALUEREQ;
+/** Pointer to a VMMR0_DO_GCFGM_SET_VALUE and VMMR0_DO_GCFGM_QUERY_VALUE request buffer.
+ * @todo Move got GCFGM.h when it's implemented.
+ */
+typedef GCFGMVALUEREQ *PGCFGMVALUEREQ;
+
+#if defined(IN_RING0) || defined(DOXYGEN_RUNNING)
+VMMR0DECL(void)      VMMR0EntryFast(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation);
+VMMR0DECL(int)       VMMR0EntryEx(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
+                                  PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION);
+VMMR0_INT_DECL(int)  VMMR0TermVM(PGVM pGVM, PVM pVM, VMCPUID idCpu);
+VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu);
+VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu);
+VMMR0_INT_DECL(int)  VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu);
+VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu);
+VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu);
+VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu);
+
+# ifdef LOG_ENABLED
+VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu);
+VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu);
+VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu);
+# else
+#  define            VMMR0LogFlushDisable(pVCpu)     do { } while(0)
+#  define            VMMR0LogFlushEnable(pVCpu)      do { } while(0)
+#  define            VMMR0IsLogFlushDisabled(pVCpu)  (true)
+# endif /* LOG_ENABLED */
+#endif /* IN_RING0 */
+
+/** @} */
+
+
 #if defined(IN_RING3) || defined(DOXYGEN_RUNNING)
 /** @defgroup grp_vmm_api_r3    The VMM Host Context Ring 3 API
@@ -317,4 +521,5 @@
 # endif
 VMMR3DECL(int)          VMMR3CallR0(PVM pVM, uint32_t uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr);
+VMMR3_INT_DECL(int)     VMMR3CallR0Emt(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr);
 VMMR3DECL(void)         VMMR3FatalDump(PVM pVM, PVMCPU pVCpu, int rcErr);
 VMMR3_INT_DECL(void)    VMMR3YieldSuspend(PVM pVM);
@@ -362,208 +567,4 @@
 
 
-/** @defgroup grp_vmm_api_r0    The VMM Host Context Ring 0 API
- * @{
- */
-
-/**
- * The VMMR0Entry() codes.
- */
-typedef enum VMMR0OPERATION
-{
-    /** Run guest context. */
-    VMMR0_DO_RAW_RUN = SUP_VMMR0_DO_RAW_RUN,
-    /** Run guest code using the available hardware acceleration technology. */
-    VMMR0_DO_HM_RUN = SUP_VMMR0_DO_HM_RUN,
-    /** Official NOP that we use for profiling. */
-    VMMR0_DO_NOP = SUP_VMMR0_DO_NOP,
-    /** Official slow iocl NOP that we use for profiling. */
-    VMMR0_DO_SLOW_NOP,
-
-    /** Ask the GVMM to create a new VM. */
-    VMMR0_DO_GVMM_CREATE_VM = 32,
-    /** Ask the GVMM to destroy the VM. */
-    VMMR0_DO_GVMM_DESTROY_VM,
-    /** Call GVMMR0RegisterVCpu(). */
-    VMMR0_DO_GVMM_REGISTER_VMCPU,
-    /** Call GVMMR0DeregisterVCpu(). */
-    VMMR0_DO_GVMM_DEREGISTER_VMCPU,
-    /** Call GVMMR0SchedHalt(). */
-    VMMR0_DO_GVMM_SCHED_HALT,
-    /** Call GVMMR0SchedWakeUp(). */
-    VMMR0_DO_GVMM_SCHED_WAKE_UP,
-    /** Call GVMMR0SchedPoke(). */
-    VMMR0_DO_GVMM_SCHED_POKE,
-    /** Call GVMMR0SchedWakeUpAndPokeCpus(). */
-    VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS,
-    /** Call GVMMR0SchedPoll(). */
-    VMMR0_DO_GVMM_SCHED_POLL,
-    /** Call GVMMR0QueryStatistics(). */
-    VMMR0_DO_GVMM_QUERY_STATISTICS,
-    /** Call GVMMR0ResetStatistics(). */
-    VMMR0_DO_GVMM_RESET_STATISTICS,
-
-    /** Call VMMR0 Per VM Init. */
-    VMMR0_DO_VMMR0_INIT = 64,
-    /** Call VMMR0 Per VM Termination. */
-    VMMR0_DO_VMMR0_TERM,
-
-    /** Setup the hardware accelerated raw-mode session. */
-    VMMR0_DO_HM_SETUP_VM = 128,
-    /** Attempt to enable or disable hardware accelerated raw-mode. */
-    VMMR0_DO_HM_ENABLE,
-
-    /** Call PGMR0PhysAllocateHandyPages(). */
-    VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES = 192,
-    /** Call PGMR0PhysFlushHandyPages(). */
-    VMMR0_DO_PGM_FLUSH_HANDY_PAGES,
-    /** Call PGMR0AllocateLargePage(). */
-    VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE,
-    /** Call PGMR0PhysSetupIommu(). */
-    VMMR0_DO_PGM_PHYS_SETUP_IOMMU,
-
-    /** Call GMMR0InitialReservation(). */
-    VMMR0_DO_GMM_INITIAL_RESERVATION = 256,
-    /** Call GMMR0UpdateReservation(). */
-    VMMR0_DO_GMM_UPDATE_RESERVATION,
-    /** Call GMMR0AllocatePages(). */
-    VMMR0_DO_GMM_ALLOCATE_PAGES,
-    /** Call GMMR0FreePages(). */
-    VMMR0_DO_GMM_FREE_PAGES,
-    /** Call GMMR0FreeLargePage(). */
-    VMMR0_DO_GMM_FREE_LARGE_PAGE,
-    /** Call GMMR0QueryHypervisorMemoryStatsReq(). */
-    VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS,
-    /** Call GMMR0QueryMemoryStatsReq(). */
-    VMMR0_DO_GMM_QUERY_MEM_STATS,
-    /** Call GMMR0BalloonedPages(). */
-    VMMR0_DO_GMM_BALLOONED_PAGES,
-    /** Call GMMR0MapUnmapChunk(). */
-    VMMR0_DO_GMM_MAP_UNMAP_CHUNK,
-    /** Call GMMR0SeedChunk(). */
-    VMMR0_DO_GMM_SEED_CHUNK,
-    /** Call GMMR0RegisterSharedModule. */
-    VMMR0_DO_GMM_REGISTER_SHARED_MODULE,
-    /** Call GMMR0UnregisterSharedModule. */
-    VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE,
-    /** Call GMMR0ResetSharedModules. */
-    VMMR0_DO_GMM_RESET_SHARED_MODULES,
-    /** Call GMMR0CheckSharedModules. */
-    VMMR0_DO_GMM_CHECK_SHARED_MODULES,
-    /** Call GMMR0FindDuplicatePage. */
-    VMMR0_DO_GMM_FIND_DUPLICATE_PAGE,
-    /** Call GMMR0QueryStatistics(). */
-    VMMR0_DO_GMM_QUERY_STATISTICS,
-    /** Call GMMR0ResetStatistics(). */
-    VMMR0_DO_GMM_RESET_STATISTICS,
-
-    /** Call PDMR0DriverCallReqHandler. */
-    VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER = 320,
-    /** Call PDMR0DeviceCallReqHandler. */
-    VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER,
-
-    /** Calls function in the hypervisor.
-     * The caller must setup the hypervisor context so the call will be performed.
-     * The difference between VMMR0_DO_RUN_GC and this one is the handling of
-     * the return GC code. The return code will not be interpreted by this operation.
-     */
-    VMMR0_DO_CALL_HYPERVISOR = 384,
-
-    /** Set a GVMM or GMM configuration value. */
-    VMMR0_DO_GCFGM_SET_VALUE = 400,
-    /** Query a GVMM or GMM configuration value. */
-    VMMR0_DO_GCFGM_QUERY_VALUE,
-
-    /** The start of the R0 service operations. */
-    VMMR0_DO_SRV_START = 448,
-    /** Call IntNetR0Open(). */
-    VMMR0_DO_INTNET_OPEN,
-    /** Call IntNetR0IfClose(). */
-    VMMR0_DO_INTNET_IF_CLOSE,
-    /** Call IntNetR0IfGetBufferPtrs(). */
-    VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS,
-    /** Call IntNetR0IfSetPromiscuousMode(). */
-    VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE,
-    /** Call IntNetR0IfSetMacAddress(). */
-    VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS,
-    /** Call IntNetR0IfSetActive(). */
-    VMMR0_DO_INTNET_IF_SET_ACTIVE,
-    /** Call IntNetR0IfSend(). */
-    VMMR0_DO_INTNET_IF_SEND,
-    /** Call IntNetR0IfWait(). */
-    VMMR0_DO_INTNET_IF_WAIT,
-    /** Call IntNetR0IfAbortWait(). */
-    VMMR0_DO_INTNET_IF_ABORT_WAIT,
-
-    /** Forward call to the PCI driver */
-    VMMR0_DO_PCIRAW_REQ = 512,
-
-    /** The end of the R0 service operations. */
-    VMMR0_DO_SRV_END,
-
-    /** Call NEMR0InitVM() (host specific). */
-    VMMR0_DO_NEM_INIT_VM = 576,
-    /** Call NEMR0MapPages() (host specific). */
-    VMMR0_DO_NEM_MAP_PAGES,
-    /** Call NEMR0UnmapPages() (host specific). */
-    VMMR0_DO_NEM_UNMAP_PAGES,
-
-    /** Official call we use for testing Ring-0 APIs. */
-    VMMR0_DO_TESTS = 640,
-    /** Test the 32->64 bits switcher. */
-    VMMR0_DO_TEST_SWITCHER3264,
-
-    /** The usual 32-bit type blow up. */
-    VMMR0_DO_32BIT_HACK = 0x7fffffff
-} VMMR0OPERATION;
-
-
-/**
- * Request buffer for VMMR0_DO_GCFGM_SET_VALUE and VMMR0_DO_GCFGM_QUERY_VALUE.
- * @todo Move got GCFGM.h when it's implemented.
- */
-typedef struct GCFGMVALUEREQ
-{
-    /** The request header.*/
-    SUPVMMR0REQHDR      Hdr;
-    /** The support driver session handle. */
-    PSUPDRVSESSION      pSession;
-    /** The value.
-     * This is input for the set request and output for the query. */
-    uint64_t            u64Value;
-    /** The variable name.
-     * This is fixed sized just to make things simple for the mock-up. */
-    char                szName[48];
-} GCFGMVALUEREQ;
-/** Pointer to a VMMR0_DO_GCFGM_SET_VALUE and VMMR0_DO_GCFGM_QUERY_VALUE request buffer.
- * @todo Move got GCFGM.h when it's implemented.
- */
-typedef GCFGMVALUEREQ *PGCFGMVALUEREQ;
-
-#if defined(IN_RING0) || defined(DOXYGEN_RUNNING)
-VMMR0DECL(void)      VMMR0EntryFast(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation);
-VMMR0DECL(int)       VMMR0EntryEx(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
-                                  PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION);
-VMMR0_INT_DECL(int)  VMMR0TermVM(PGVM pGVM, PVM pVM, VMCPUID idCpu);
-VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu);
-VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu);
-VMMR0_INT_DECL(int)  VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu);
-VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu);
-VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu);
-VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu);
-
-# ifdef LOG_ENABLED
-VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu);
-VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu);
-VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu);
-# else
-#  define            VMMR0LogFlushDisable(pVCpu)     do { } while(0)
-#  define            VMMR0LogFlushEnable(pVCpu)      do { } while(0)
-#  define            VMMR0IsLogFlushDisabled(pVCpu)  (true)
-# endif /* LOG_ENABLED */
-#endif /* IN_RING0 */
-
-/** @} */
-
-
 #if defined(IN_RC) || defined(DOXYGEN_RUNNING)
 /** @defgroup grp_vmm_api_rc    The VMM Raw-Mode Context API
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp	(revision 71080)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp	(revision 71081)
@@ -4858,9 +4858,12 @@
  * @returns VBox status code from callback.
  * @param   pVM             The cross context VM structure.
+ * @param   pVCpu           The cross context per CPU structure.  This is
+ *                          optional as its only for passing to callback.
  * @param   uMinState       The minimum NEM state value to call on.
  * @param   pfnCallback     The callback function.
  * @param   pvUser          User argument for the callback.
  */
-VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVM pVM, uint8_t uMinState, PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser)
+VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVM pVM, PVMCPU pVCpu, uint8_t uMinState,
+                                             PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser)
 {
     /*
@@ -4879,5 +4882,5 @@
             else
             {
-                rc = pfnCallback(pVM, pRam->GCPhys + ((RTGCPHYS)iPage << X86_PAGE_SHIFT), &u2State, pvUser);
+                rc = pfnCallback(pVM, pVCpu, pRam->GCPhys + ((RTGCPHYS)iPage << X86_PAGE_SHIFT), &u2State, pvUser);
                 if (RT_SUCCESS(rc))
                     PGM_PAGE_SET_NEM_STATE(&pRam->aPages[iPage], u2State);
Index: /trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp	(revision 71080)
+++ /trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp	(revision 71081)
@@ -223,5 +223,5 @@
 *   Internal Functions                                                                                                           *
 *********************************************************************************************************************************/
-static int nemR3NativeSetPhysPage(PVM pVM, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fPageProt,
+static int nemR3NativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fPageProt,
                                   uint8_t *pu2State, bool fBackingChanged);
 
@@ -985,4 +985,44 @@
 
 
+#ifdef NEM_WIN_USE_HYPERCALLS
+
+/**
+ * Wrapper around VMMR0_DO_NEM_MAP_PAGES for a single page.
+ *
+ * @returns VBox status code.
+ * @param   pVM         The cross context VM structure.
+ * @param   pVCpu       The cross context virtual CPU structure of the caller.
+ * @param   GCPhysSrc   The source page.  Does not need to be page aligned.
+ * @param   GCPhysDst   The destination page.  Same as @a GCPhysSrc except for
+ *                      when A20 is disabled.
+ * @param   fFlags      HV_MAP_GPA_XXX.
+ */
+DECLINLINE(int) nemR3WinHypercallMapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fFlags)
+{
+    pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc   = GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
+    pVCpu->nem.s.Hypercall.MapPages.GCPhysDst   = GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
+    pVCpu->nem.s.Hypercall.MapPages.cPages      = 1;
+    pVCpu->nem.s.Hypercall.MapPages.fFlags      = fFlags;
+    return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
+}
+
+
+/**
+ * Wrapper around VMMR0_DO_NEM_UNMAP_PAGES for a single page.
+ *
+ * @returns VBox status code.
+ * @param   pVM         The cross context VM structure.
+ * @param   pVCpu       The cross context virtual CPU structure of the caller.
+ * @param   GCPhys      The page to unmap.  Does not need to be page aligned.
+ */
+DECLINLINE(int) nemR3WinHypercallUnmapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
+{
+    pVCpu->nem.s.Hypercall.UnmapPages.GCPhys    = GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
+    pVCpu->nem.s.Hypercall.UnmapPages.cPages    = 1;
+    return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
+}
+
+#endif /* NEM_WIN_USE_HYPERCALLS */
+
 static int nemR3WinCopyStateToHyperV(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
 {
@@ -1841,15 +1881,13 @@
 
 
-static DECLCALLBACK(int) nemR3WinUnmapOnePageCallback(PVM pVM, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser)
+static DECLCALLBACK(int) nemR3WinUnmapOnePageCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser)
 {
     RT_NOREF_PV(pvUser);
 #ifdef NEM_WIN_USE_HYPERCALLS
-    PVMCPU pVCpu = VMMGetCpu(pVM);
-    pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys;
-    pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
-    int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
+    int rc = nemR3WinHypercallUnmapPage(pVM, pVCpu, GCPhys);
     AssertRC(rc);
     if (RT_SUCCESS(rc))
 #else
+    RT_NOREF_PV(pVCpu);
     HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
     if (SUCCEEDED(hrc))
@@ -1949,4 +1987,5 @@
             /* Map the page. */
             int rc = nemR3NativeSetPhysPage(pVM,
+                                            pVCpu,
                                             GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
                                             GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
@@ -1970,4 +2009,26 @@
                 return VINF_SUCCESS;
             }
+
+#ifdef NEM_WIN_USE_HYPERCALLS
+            /* Upgrade page to writable. */
+/** @todo test this*/
+            if (   (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
+                && pState->fWriteAccess)
+            {
+                int rc = nemR3WinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhys,
+                                                    HV_MAP_GPA_READABLE   | HV_MAP_GPA_WRITABLE
+                                                  | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
+                AssertRC(rc);
+                if (RT_SUCCESS(rc))
+                {
+                    pInfo->u2NemState = NEM_WIN_PAGE_STATE_WRITABLE;
+                    pState->fDidSomething = true;
+                    pState->fCanResume    = true;
+                    Log5(("NEM GPA write-upgrade/exit: %RGp (was %s, cMappedPages=%u)\n",
+                          GCPhys, g_apszPageStates[u2State], pVM->nem.s.cMappedPages));
+                    return rc;
+                }
+            }
+#endif
             break;
 
@@ -1989,7 +2050,5 @@
      */
 #ifdef NEM_WIN_USE_HYPERCALLS
-    pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys;
-    pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
-    int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
+    int rc = nemR3WinHypercallUnmapPage(pVM, pVCpu, GCPhys);
     AssertRC(rc);
     if (RT_SUCCESS(rc))
@@ -2015,5 +2074,5 @@
             pVM->nem.s.cMappedPages));
 
-    PGMPhysNemEnumPagesByState(pVM, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
+    PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
     Log(("nemR3WinHandleMemoryAccessPageCheckerCallback: Unmapped all (cMappedPages=%u)\n", pVM->nem.s.cMappedPages));
 
@@ -2382,5 +2441,5 @@
         }
 
-#ifdef NEM_WIN_USE_HYPERCALLS
+#ifndef NEM_WIN_USE_HYPERCALLS
         /* Hack alert! */
         uint32_t const cMappedPages = pVM->nem.s.cMappedPages;
@@ -2389,5 +2448,5 @@
         else
         {
-            PGMPhysNemEnumPagesByState(pVM, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
+            PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
             Log(("nemR3NativeRunGC: Unmapped all; cMappedPages=%u -> %u\n", cMappedPages, pVM->nem.s.cMappedPages));
         }
@@ -2565,7 +2624,5 @@
     {
 #ifdef NEM_WIN_USE_HYPERCALLS
-        pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys;
-        pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
-        int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
+        int rc = nemR3WinHypercallUnmapPage(pVM, pVCpu, GCPhys);
         AssertRC(rc);
         if (RT_SUCCESS(rc))
@@ -2668,4 +2725,6 @@
  * @returns VBox status code.
  * @param   pVM             The cross context VM structure.
+ * @param   pVCpu           The cross context virtual CPU structure of the
+ *                          calling EMT.
  * @param   GCPhysSrc       The source page address.
  * @param   GCPhysDst       The hyper-V destination page.  This may differ from
@@ -2674,6 +2733,7 @@
  * @param   pu2State        Our page state (input/output).
  * @param   fBackingChanged Set if the page backing is being changed.
+ * @thread  EMT(pVCpu)
  */
-static int nemR3NativeSetPhysPage(PVM pVM, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fPageProt,
+static int nemR3NativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fPageProt,
                                   uint8_t *pu2State, bool fBackingChanged)
 {
@@ -2692,8 +2752,5 @@
         {
 #ifdef NEM_WIN_USE_HYPERCALLS
-            PVMCPU pVCpu = VMMGetCpu(pVM);
-            pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhysDst;
-            pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
-            int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
+            int rc = nemR3WinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
             AssertRC(rc);
             if (RT_SUCCESS(rc))
@@ -2742,12 +2799,7 @@
     {
 #ifdef NEM_WIN_USE_HYPERCALLS
-        RT_NOREF_PV(GCPhysSrc);
-        PVMCPU pVCpu = VMMGetCpu(pVM);
-        pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc;
-        pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst;
-        pVCpu->nem.s.Hypercall.MapPages.cPages    = 1;
-        pVCpu->nem.s.Hypercall.MapPages.fFlags    = HV_MAP_GPA_READABLE   | HV_MAP_GPA_WRITABLE
-                                                  | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN;
-        int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
+        int rc = nemR3WinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
+                                            HV_MAP_GPA_READABLE   | HV_MAP_GPA_WRITABLE
+                                          | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
         AssertRC(rc);
         if (RT_SUCCESS(rc))
@@ -2788,10 +2840,6 @@
     {
 #ifdef NEM_WIN_USE_HYPERCALLS
-        PVMCPU pVCpu = VMMGetCpu(pVM);
-        pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc;
-        pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst;
-        pVCpu->nem.s.Hypercall.MapPages.cPages    = 1;
-        pVCpu->nem.s.Hypercall.MapPages.fFlags    = HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN;
-        int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
+        int rc = nemR3WinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
+                                          HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
         AssertRC(rc);
         if (RT_SUCCESS(rc))
@@ -2846,7 +2894,5 @@
 #ifdef NEM_WIN_USE_HYPERCALLS
     PVMCPU pVCpu = VMMGetCpu(pVM);
-    pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
-    pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
-    int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
+    int rc = nemR3WinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
     AssertRC(rc);
     if (RT_SUCCESS(rc))
@@ -2857,5 +2903,5 @@
         return VINF_SUCCESS;
     }
-    LogRel(("nemR3NativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
+    LogRel(("nemR3JustUnmapPageFromHyperV/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
     return rc;
 #else
@@ -2883,8 +2929,9 @@
 
     int rc;
-#if 0
+#ifdef NEM_WIN_USE_HYPERCALLS
+    PVMCPU pVCpu = VMMGetCpu(pVM);
     if (   pVM->nem.s.fA20Enabled
         || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
-        rc = nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
+        rc = nemR3NativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
     else
     {
@@ -2892,5 +2939,5 @@
         rc = nemR3WinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
         if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys) && RT_SUCCESS(rc))
-            rc = nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
+            rc = nemR3NativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
 
     }
@@ -2916,8 +2963,9 @@
     RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
 
-#if 0
+#ifdef NEM_WIN_USE_HYPERCALLS
+    PVMCPU pVCpu = VMMGetCpu(pVM);
     if (   pVM->nem.s.fA20Enabled
         || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
-        nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
+        nemR3NativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
     else
     {
@@ -2925,5 +2973,5 @@
         nemR3WinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
         if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
-            nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
+            nemR3NativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
     }
 #else
@@ -2946,8 +2994,9 @@
     RT_NOREF_PV(HCPhysPrev); RT_NOREF_PV(HCPhysNew); RT_NOREF_PV(enmType);
 
-#if 0
+#ifdef NEM_WIN_USE_HYPERCALLS
+    PVMCPU pVCpu = VMMGetCpu(pVM);
     if (   pVM->nem.s.fA20Enabled
         || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
-        nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
+        nemR3NativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
     else
     {
@@ -2955,5 +3004,5 @@
         nemR3WinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
         if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
-            nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
+            nemR3NativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
     }
 #else
Index: /trunk/src/VBox/VMM/VMMR3/VMM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/VMM.cpp	(revision 71080)
+++ /trunk/src/VBox/VMM/VMMR3/VMM.cpp	(revision 71081)
@@ -2487,8 +2487,21 @@
     PVMCPU pVCpu = VMMGetCpu(pVM);
     AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
-
-    /*
-     * Call Ring-0 entry with init code.
-     */
+    return VMMR3CallR0Emt(pVM, pVCpu, (VMMR0OPERATION)uOperation, u64Arg, pReqHdr);
+}
+
+
+/**
+ * Wrapper for SUPR3CallVMMR0Ex which will deal with VINF_VMM_CALL_HOST returns.
+ *
+ * @returns VBox status code.
+ * @param   pVM         The cross context VM structure.
+ * @param   pVCpu       The cross context VM structure.
+ * @param   uOperation  Operation to execute.
+ * @param   u64Arg      Constant argument.
+ * @param   pReqHdr     Pointer to a request header. See SUPR3CallVMMR0Ex for
+ *                      details.
+ */
+VMMR3_INT_DECL(int) VMMR3CallR0Emt(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
+{
     int rc;
     for (;;)
@@ -2497,5 +2510,5 @@
         rc = VERR_GENERAL_FAILURE;
 #else
-        rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, uOperation, u64Arg, pReqHdr);
+        rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, enmOperation, u64Arg, pReqHdr);
 #endif
         /*
@@ -2516,5 +2529,5 @@
 
     AssertLogRelMsgReturn(rc == VINF_SUCCESS || RT_FAILURE(rc),
-                          ("uOperation=%u rc=%Rrc\n", uOperation, rc),
+                          ("enmOperation=%u rc=%Rrc\n", enmOperation, rc),
                           VERR_IPE_UNEXPECTED_INFO_STATUS);
     return rc;
