Index: /trunk/include/VBox/err.h
===================================================================
--- /trunk/include/VBox/err.h	(revision 71074)
+++ /trunk/include/VBox/err.h	(revision 71075)
@@ -2815,6 +2815,12 @@
 /** NEM init failed. */
 #define VERR_NEM_INIT_FAILED                        (-6802)
+/** NEM init failed because of missing kernel API. */
+#define VERR_NEM_MISSING_KERNEL_API                 (-6803)
 /** NEM failed to create a native VM instance. */
-#define VERR_NEM_VM_CREATE_FAILED                   (-6803)
+#define VERR_NEM_VM_CREATE_FAILED                   (-6804)
+/** NEM failed to map page(s) into the VM. */
+#define VERR_NEM_MAP_PAGES_FAILED                   (-6805)
+/** NEM failed to unmap page(s) into the VM. */
+#define VERR_NEM_UNMAP_PAGES_FAILED                 (-6806)
 /** @} */
 
Index: /trunk/include/VBox/vmm/gvm.h
===================================================================
--- /trunk/include/VBox/vmm/gvm.h	(revision 71074)
+++ /trunk/include/VBox/vmm/gvm.h	(revision 71075)
@@ -54,4 +54,15 @@
         uint8_t             padding[64];
     } gvmm;
+
+#ifdef VBOX_WITH_NEM_R0
+    /** The NEM per vcpu data. */
+    union
+    {
+# ifdef ___NEMInternal_h
+        struct NEMR0PERVCPU s;
+# endif
+        uint8_t             padding[64];
+    } nem;
+#endif
 } GVMCPU;
 /** Pointer to the GVMCPU data. */
@@ -107,4 +118,15 @@
     } gmm;
 
+#ifdef VBOX_WITH_NEM_R0
+    /** The NEM per vcpu data. */
+    union
+    {
+# ifdef ___NEMInternal_h
+        struct NEMR0PERVM   s;
+# endif
+        uint8_t             padding[64];
+    } nem;
+#endif
+
     /** The RAWPCIVM per vm data. */
     union
@@ -113,7 +135,6 @@
         struct RAWPCIPERVM s;
 #endif
-        uint8_t     padding[64];
+        uint8_t             padding[64];
     } rawpci;
-
 
     /** GVMCPU array for the configured number of virtual CPUs. */
Index: /trunk/include/VBox/vmm/nem.h
===================================================================
--- /trunk/include/VBox/vmm/nem.h	(revision 71074)
+++ /trunk/include/VBox/vmm/nem.h	(revision 71075)
@@ -80,5 +80,15 @@
 /** @} */
 
-/** @defgroup grp_nem_hc    The NEM host context API
+
+/** @defgroup grp_nem_r0    The NEM ring-0 Context API
+ * @{  */
+VMMR0_INT_DECL(int)  NEMR0InitVM(PGVM pGVM, PVM pVM);
+VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM);
+VMMR0_INT_DECL(int)  NEMR0MapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu);
+VMMR0_INT_DECL(int)  NEMR0UnmapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu);
+/** @} */
+
+
+/** @defgroup grp_nem_hc    The NEM Host Context API
  * @{
  */
Index: /trunk/include/VBox/vmm/vm.h
===================================================================
--- /trunk/include/VBox/vmm/vm.h	(revision 71074)
+++ /trunk/include/VBox/vmm/vm.h	(revision 71075)
@@ -176,5 +176,5 @@
         struct NEMCPU       s;
 #endif
-        uint8_t             padding[128];       /* multiple of 64 */
+        uint8_t             padding[256];       /* multiple of 64 */
     } nem;
 
@@ -265,5 +265,5 @@
 
     /** Align the following members on page boundary. */
-    uint8_t                 abAlignment2[1976];
+    uint8_t                 abAlignment2[1848];
 
     /** PGM part. */
Index: /trunk/include/VBox/vmm/vm.mac
===================================================================
--- /trunk/include/VBox/vmm/vm.mac	(revision 71074)
+++ /trunk/include/VBox/vmm/vm.mac	(revision 71075)
@@ -64,5 +64,5 @@
     .hm                     resb 5824
     .em                     resb 1408
-    .nem                    resb 128
+    .nem                    resb 256
     .trpm                   resb 128
     .tm                     resb 384
Index: /trunk/include/VBox/vmm/vmm.h
===================================================================
--- /trunk/include/VBox/vmm/vmm.h	(revision 71074)
+++ /trunk/include/VBox/vmm/vmm.h	(revision 71075)
@@ -501,6 +501,13 @@
     VMMR0_DO_SRV_END,
 
+    /** Call NEMR0InitVM() (host specific). */
+    VMMR0_DO_NEM_INIT_VM = 576,
+    /** Call NEMR0MapPages() (host specific). */
+    VMMR0_DO_NEM_MAP_PAGES,
+    /** Call NEMR0UnmapPages() (host specific). */
+    VMMR0_DO_NEM_UNMAP_PAGES,
+
     /** Official call we use for testing Ring-0 APIs. */
-    VMMR0_DO_TESTS,
+    VMMR0_DO_TESTS = 640,
     /** Test the 32->64 bits switcher. */
     VMMR0_DO_TEST_SWITCHER3264,
Index: /trunk/include/iprt/nt/hyperv.h
===================================================================
--- /trunk/include/iprt/nt/hyperv.h	(revision 71075)
+++ /trunk/include/iprt/nt/hyperv.h	(revision 71075)
@@ -0,0 +1,471 @@
+/** @file
+ * Hyper-V related types and definitions.
+ */
+
+/*
+ * Copyright (C) 2018 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+#ifndef ___iprt_nt_hyperv_h
+#define ___iprt_nt_hyperv_h
+
+#include <iprt/types.h>
+#include <iprt/assertcompile.h>
+
+
+/** Hyper-V partition ID. */
+typedef uint64_t HV_PARTITION_ID;
+/** Invalid Hyper-V partition ID. */
+#define HV_PARTITION_ID_INVALID UINT64_C(0)
+/** Hyper-V virtual processor index (== VMCPUID). */
+typedef uint32_t HV_VP_INDEX;
+/** Guest physical address (== RTGCPHYS). */
+typedef uint64_t HV_GPA;
+/** Guest physical page number. */
+typedef uint64_t HV_GPA_PAGE_NUMBER;
+/** System(/parent) physical page number. */
+typedef uint64_t HV_SPA_PAGE_NUMBER;
+
+
+
+/** Hypercall IDs.   */
+typedef enum
+{
+    HvCallReserved0000 = 0,
+
+    HvCallSwitchVirtualAddressSpace,
+    HvCallFlushVirtualAddressSpace,
+    HvCallFlushVirtualAddressList,
+    HvCallGetLogicalProcessorRunTime,
+    /* 5, 6 & 7 are deprecated / reserved. */
+    HvCallNotifyLongSpinWait = 8,
+    HvCallParkLogicalProcessors,        /**< @since v2 */
+    HvCallInvokeHypervisorDebugger,     /**< @since v2 - not mentioned in TLFS v5.0b  */
+    HvCallSendSyntheticClusterIpi,      /**< @since v? */
+    HvCallModifyVtlProtectionMask,      /**< @since v? */
+    HvCallEnablePartitionVtl,           /**< @since v? */
+    HvCallDisablePartitionVtl,          /**< @since v? */
+    HvCallEnableVpVtl,                  /**< @since v? */
+    HvCallDisableVpVtl,                 /**< @since v? */
+    HvCallVtlCall,                      /**< @since v? */
+    HvCallVtlReturn,                    /**< @since v? */
+    HvCallFlushVirtualAddressSpaceEx,   /**< @since v? */
+    HvCallFlushVirtualAddressListEx,    /**< @since v? */
+    HvCallSendSyntheticClusterIpiEx,    /**< @since v? */
+    /* Reserved: 0x16..0x3f */
+
+    HvCallCreatePartition = 0x40,
+    HvCallInitializePartition,
+    HvCallFinalizePartition,
+    HvCallDeletePartition,
+    HvCallGetPartitionProperty,
+    HvCallSetPartitionProperty,
+    HvCallGetPartitionId,
+    HvCallGetNextChildPartition,
+    HvCallDepositMemory,                /**< 0x48 - Repeat call. */
+    HvCallWithdrawMemory,               /**< 0x49 - Repeat call. */
+    HvCallGetMemoryBalance,
+    HvCallMapGpaPages,                  /**< 0X4b - Repeat call. */
+    HvCallUnmapGpaPages,                /**< 0X4c - Repeat call. */
+    HvCallInstallIntercept,
+    HvCallCreateVp,
+    HvCallDeleteVp,                     /**< 0x4f - Fast call.  */
+    HvCallGetVpRegisters,               /**< 0x50 - Repeat call. */
+    HvCallSetVpRegisters,               /**< 0x51 - Repeat call. */
+    HvCallTranslateVirtualAddress,
+    HvCallReadGpa,
+    HvCallWriteGpa,
+    HvCallAssertVirtualInterruptV1,
+    HvCallClearVirtualInterrupt,        /**< 0x56 - Fast call. */
+    HvCallCreatePortV1,
+    HvCallDeletePort,                   /**< 0x58 - Fast call. */
+    HvCallConnectPortV1,
+    HvCallGetPortProperty,
+    HvCallDisconnectPort,
+    HvCallPostMessage,
+    HvCallSignalEvent,
+    HvCallSavePartitionState,
+    HvCallRestorePartitionState,
+    HvCallInitializeEventLogBufferGroup,
+    HvCallFinalizeEventLogBufferGroup,
+    HvCallCreateEventLogBuffer,
+    HvCallDeleteEventLogBuffer,
+    HvCallMapEventLogBuffer,
+    HvCallUnmapEventLogBuffer,
+    HvCallSetEventLogGroupSources,
+    HvCallReleaseEventLogBuffer,
+    HvCallFlushEventLogBuffer,
+    HvCallPostDebugData,
+    HvCallRetrieveDebugData,
+    HvCallResetDebugSession,
+    HvCallMapStatsPage,
+    HvCallUnmapStatsPage,
+    HvCallMapSparseGpaPages,            /**< @since v2 */
+    HvCallSetSystemProperty,            /**< @since v2 */
+    HvCallSetPortProperty,              /**< @since v2 */
+    /* 0x71..0x75 reserved/deprecated (was v2 test IDs). */
+    HvCallAddLogicalProcessor = 0x76,
+    HvCallRemoveLogicalProcessor,
+    HvCallQueryNumaDistance,
+    HvCallSetLogicalProcessorProperty,
+    HvCallGetLogicalProcessorProperty,
+    HvCallGetSystemProperty,
+    HvCallMapDeviceInterrupt,
+    HvCallUnmapDeviceInterrupt,
+    HvCallRetargetDeviceInterrupt,
+    /* 0x7f is reserved. */
+    HvCallMapDevicePages = 0x80,
+    HvCallUnmapDevicePages,
+    HvCallAttachDevice,
+    HvCallDetachDevice,
+    HvCallNotifyStandbyTransition,
+    HvCallPrepareForSleep,
+    HvCallPrepareForHibernate,
+    HvCallNotifyPartitionEvent,
+    HvCallGetLogicalProcessorRegisters,
+    HvCallSetLogicalProcessorRegisters,
+    HvCallQueryAssociatedLpsforMca,
+    HvCallNotifyRingEmpty,
+    HvCallInjectSyntheticMachineCheck,
+    HvCallScrubPartition,
+    HvCallCollectLivedump,
+    HvCallDisableHypervisor,
+    HvCallModifySparseGpaPages,
+    HvCallRegisterInterceptResult,
+    HvCallUnregisterInterceptResult,
+    /* 0x93 is reserved/undocumented. */
+    HvCallAssertVirtualInterrupt = 0x94,
+    HvCallCreatePort,
+    HvCallConnectPort,
+    HvCallGetSpaPageList,
+    /* 0x98 is reserved. */
+    HvCallStartVirtualProcessor = 0x99,
+    HvCallGetVpIndexFromApicId,
+    /* 0x9b.. are reserved/undocumented. */
+    HvCallFlushGuestPhysicalAddressSpace = 0xaf,
+    HvCallFlushGuestPhysicalAddressList,
+    /* 0xb1..0xb4 are unknown */
+    HvCallCreateCpuGroup = 0xb5,
+    HvCallDeleteCpuGroup,
+    HvCallGetCpuGroupProperty,
+    HvCallSetCpuGroupProperty,
+    HvCallGetCpuGroupAffinit,
+    HvCallGetNextCpuGroup = 0xba,
+    HvCallGetNextCpuGroupPartition,
+    HvCallPrecommitGpaPages = 0xbe,
+    HvCallUncommitGpaPages,             /**< Happens when VidDestroyGpaRangeCheckSecure/WHvUnmapGpaRange is called. */
+    /* 0xc0..0xcb are unknown */
+    HvCallQueryVtlProtectionMaskRange = 0xcc,
+    HvCallModifyVtlProtectionMaskRange,
+    /* 0xce..0xd1 are unknown */
+    HvCallAcquireSparseGpaPageHostAccess = 0xd2,
+    HvCallReleaseSparseGpaPageHostAccess,
+    HvCallCheckSparseGpaPageVtlAccess,
+    HvCallAcquireSparseSpaPageHostAccess = 0xd7,
+    HvCallReleaseSparseSpaPageHostAccess,
+    HvCallAcceptGpaPages,                       /**< 0x18 byte input, zero rep, no output. */
+
+    /** Number of defined hypercalls (varies with version). */
+    HvCallCount
+} HV_CALL_CODE;
+AssertCompile(HvCallSendSyntheticClusterIpiEx == 0x15);
+AssertCompile(HvCallMapGpaPages == 0x4b);
+AssertCompile(HvCallSetPortProperty == 0x70);
+AssertCompile(HvCallRetargetDeviceInterrupt == 0x7e);
+AssertCompile(HvCallUnregisterInterceptResult == 0x92);
+AssertCompile(HvCallGetSpaPageList == 0x97);
+AssertCompile(HvCallFlushGuestPhysicalAddressList == 0xb0);
+AssertCompile(HvCallUncommitGpaPages == 0xbf);
+AssertCompile(HvCallCount == 0xda);
+
+
+/** Hypercall status code. */
+typedef uint16_t HV_STATUS;
+
+/** @name Hyper-V Hypercall status codes
+ * @{ */
+#define HV_STATUS_SUCCESS                                               (0x0000)
+#define HV_STATUS_RESERVED_1                                            (0x0001)
+#define HV_STATUS_INVALID_HYPERCALL_CODE                                (0x0002)
+#define HV_STATUS_INVALID_HYPERCALL_INPUT                               (0x0003)
+#define HV_STATUS_INVALID_ALIGNMENT                                     (0x0004)
+#define HV_STATUS_INVALID_PARAMETER                                     (0x0005)
+#define HV_STATUS_ACCESS_DENIED                                         (0x0006)
+#define HV_STATUS_INVALID_PARTITION_STATE                               (0x0007)
+#define HV_STATUS_OPERATION_DENIED                                      (0x0008)
+#define HV_STATUS_UNKNOWN_PROPERTY                                      (0x0009)
+#define HV_STATUS_PROPERTY_VALUE_OUT_OF_RANGE                           (0x000a)
+#define HV_STATUS_INSUFFICIENT_MEMORY                                   (0x000b)
+#define HV_STATUS_PARTITION_TOO_DEEP                                    (0x000c)
+#define HV_STATUS_INVALID_PARTITION_ID                                  (0x000d)
+#define HV_STATUS_INVALID_VP_INDEX                                      (0x000e)
+#define HV_STATUS_RESERVED_F                                            (0x000f)
+#define HV_STATUS_NOT_FOUND                                             (0x0010)
+#define HV_STATUS_INVALID_PORT_ID                                       (0x0011)
+#define HV_STATUS_INVALID_CONNECTION_ID                                 (0x0012)
+#define HV_STATUS_INSUFFICIENT_BUFFERS                                  (0x0013)
+#define HV_STATUS_NOT_ACKNOWLEDGED                                      (0x0014)
+#define HV_STATUS_INVALID_VP_STATE                                      (0x0015)
+#define HV_STATUS_ACKNOWLEDGED                                          (0x0016)
+#define HV_STATUS_INVALID_SAVE_RESTORE_STATE                            (0x0017)
+#define HV_STATUS_INVALID_SYNIC_STATE                                   (0x0018)
+#define HV_STATUS_OBJECT_IN_USE                                         (0x0019)
+#define HV_STATUS_INVALID_PROXIMITY_DOMAIN_INFO                         (0x001a)
+#define HV_STATUS_NO_DATA                                               (0x001b)
+#define HV_STATUS_INACTIVE                                              (0x001c)
+#define HV_STATUS_NO_RESOURCES                                          (0x001d)
+#define HV_STATUS_FEATURE_UNAVAILABLE                                   (0x001e)
+#define HV_STATUS_PARTIAL_PACKET                                        (0x001f)
+#define HV_STATUS_PROCESSOR_FEATURE_SSE3_NOT_SUPPORTED                  (0x0020)
+#define HV_STATUS_PROCESSOR_FEATURE_LAHFSAHF_NOT_SUPPORTED              (0x0021)
+#define HV_STATUS_PROCESSOR_FEATURE_SSSE3_NOT_SUPPORTED                 (0x0022)
+#define HV_STATUS_PROCESSOR_FEATURE_SSE4_1_NOT_SUPPORTED                (0x0023)
+#define HV_STATUS_PROCESSOR_FEATURE_SSE4_2_NOT_SUPPORTED                (0x0024)
+#define HV_STATUS_PROCESSOR_FEATURE_SSE4A_NOT_SUPPORTED                 (0x0025)
+#define HV_STATUS_PROCESSOR_FEATURE_XOP_NOT_SUPPORTED                   (0x0026)
+#define HV_STATUS_PROCESSOR_FEATURE_POPCNT_NOT_SUPPORTED                (0x0027)
+#define HV_STATUS_PROCESSOR_FEATURE_CMPXCHG16B_NOT_SUPPORTED            (0x0028)
+#define HV_STATUS_PROCESSOR_FEATURE_ALTMOVCR8_NOT_SUPPORTED             (0x0029)
+#define HV_STATUS_PROCESSOR_FEATURE_LZCNT_NOT_SUPPORTED                 (0x002a)
+#define HV_STATUS_PROCESSOR_FEATURE_MISALIGNED_SSE_NOT_SUPPORTED        (0x002b)
+#define HV_STATUS_PROCESSOR_FEATURE_MMX_EXT_NOT_SUPPORTED               (0x002c)
+#define HV_STATUS_PROCESSOR_FEATURE_3DNOW_NOT_SUPPORTED                 (0x002d)
+#define HV_STATUS_PROCESSOR_FEATURE_EXTENDED_3DNOW_NOT_SUPPORTED        (0x002e)
+#define HV_STATUS_PROCESSOR_FEATURE_PAGE_1GB_NOT_SUPPORTED              (0x002f)
+#define HV_STATUS_PROCESSOR_CACHE_LINE_FLUSH_SIZE_INCOMPATIBLE          (0x0030)
+#define HV_STATUS_PROCESSOR_FEATURE_XSAVE_NOT_SUPPORTED                 (0x0031)
+#define HV_STATUS_PROCESSOR_FEATURE_XSAVEOPT_NOT_SUPPORTED              (0x0032)
+#define HV_STATUS_INSUFFICIENT_BUFFER                                   (0x0033)
+#define HV_STATUS_PROCESSOR_FEATURE_XSAVE_AVX_NOT_SUPPORTED             (0x0034)
+#define HV_STATUS_PROCESSOR_FEATURE_XSAVE_ FEATURE_NOT_SUPPORTED        (0x0035)
+#define HV_STATUS_PROCESSOR_XSAVE_SAVE_AREA_INCOMPATIBLE                (0x0036)
+#define HV_STATUS_INCOMPATIBLE_PROCESSOR                                (0x0037)
+#define HV_STATUS_INSUFFICIENT_DEVICE_DOMAINS                           (0x0038)
+#define HV_STATUS_PROCESSOR_FEATURE_AES_NOT_SUPPORTED                   (0x0039)
+#define HV_STATUS_PROCESSOR_FEATURE_PCLMULQDQ_NOT_SUPPORTED             (0x003a)
+#define HV_STATUS_PROCESSOR_FEATURE_INCOMPATIBLE_XSAVE_FEATURES         (0x003b)
+#define HV_STATUS_CPUID_FEATURE_VALIDATION_ERROR                        (0x003c)
+#define HV_STATUS_CPUID_XSAVE_FEATURE_VALIDATION_ERROR                  (0x003d)
+#define HV_STATUS_PROCESSOR_STARTUP_TIMEOUT                             (0x003e)
+#define HV_STATUS_SMX_ENABLED                                           (0x003f)
+#define HV_STATUS_PROCESSOR_FEATURE_PCID_NOT_SUPPORTED                  (0x0040)
+#define HV_STATUS_INVALID_LP_INDEX                                      (0x0041)
+#define HV_STATUS_FEATURE_FMA4_NOT_SUPPORTED                            (0x0042)
+#define HV_STATUS_FEATURE_F16C_NOT_SUPPORTED                            (0x0043)
+#define HV_STATUS_PROCESSOR_FEATURE_RDRAND_NOT_SUPPORTED                (0x0044)
+#define HV_STATUS_PROCESSOR_FEATURE_RDWRFSGS_NOT_SUPPORTED              (0x0045)
+#define HV_STATUS_PROCESSOR_FEATURE_SMEP_NOT_SUPPORTED                  (0x0046)
+#define HV_STATUS_PROCESSOR_FEATURE_ENHANCED_FAST_STRING_NOT_SUPPORTED  (0x0047)
+#define HV_STATUS_PROCESSOR_FEATURE_MOVBE_NOT_SUPPORTED                 (0x0048)
+#define HV_STATUS_PROCESSOR_FEATURE_BMI1_NOT_SUPPORTED                  (0x0049)
+#define HV_STATUS_PROCESSOR_FEATURE_BMI2_NOT_SUPPORTED                  (0x004a)
+#define HV_STATUS_PROCESSOR_FEATURE_HLE_NOT_SUPPORTED                   (0x004b)
+#define HV_STATUS_PROCESSOR_FEATURE_RTM_NOT_SUPPORTED                   (0x004c)
+#define HV_STATUS_PROCESSOR_FEATURE_XSAVE_FMA_NOT_SUPPORTED             (0x004d)
+#define HV_STATUS_PROCESSOR_FEATURE_XSAVE_AVX2_NOT_SUPPORTED            (0x004e)
+#define HV_STATUS_PROCESSOR_FEATURE_NPIEP1_NOT_SUPPORTED                (0x004f)
+#define HV_STATUS_INVALID_REGISTER_VALUE                                (0x0050)
+#define HV_STATUS_PROCESSOR_FEATURE_RDSEED_NOT_SUPPORTED                (0x0052)
+#define HV_STATUS_PROCESSOR_FEATURE_ADX_NOT_SUPPORTED                   (0x0053)
+#define HV_STATUS_PROCESSOR_FEATURE_SMAP_NOT_SUPPORTED                  (0x0054)
+#define HV_STATUS_NX_NOT_DETECTED                                       (0x0055)
+#define HV_STATUS_PROCESSOR_FEATURE_INTEL_PREFETCH_NOT_SUPPORTED        (0x0056)
+#define HV_STATUS_INVALID_DEVICE_ID                                     (0x0057)
+#define HV_STATUS_INVALID_DEVICE_STATE                                  (0x0058)
+#define HV_STATUS_PENDING_PAGE_REQUESTS                                 (0x0059)
+#define HV_STATUS_PAGE_REQUEST_INVALID                                  (0x0060)
+#define HV_STATUS_OPERATION_FAILED                                      (0x0071)
+#define HV_STATUS_NOT_ALLOWED_WITH_NESTED_VIRT_ACTIVE                   (0x0072)
+/** @} */
+
+
+/** @name Flags used with HvCallMapGpaPages and HvCallMapSparseGpaPages.
+ * @note There seems to be a more flags defined after v2.
+ * @{ */
+typedef uint32_t HV_MAP_GPA_FLAGS;
+#define HV_MAP_GPA_READABLE             UINT32_C(0x0001)
+#define HV_MAP_GPA_WRITABLE             UINT32_C(0x0002)
+#define HV_MAP_GPA_EXECUTABLE           UINT32_C(0x0004)
+/** Seems this have to be set when HV_MAP_GPA_EXECUTABLE is (17101). */
+#define HV_MAP_GPA_EXECUTABLE_AGAIN     UINT32_C(0x0008)
+/** Dunno what this is yet, but it requires HV_MAP_GPA_DUNNO_1000.
+ * The readable bit gets put here when both HV_MAP_GPA_DUNNO_1000 and
+ * HV_MAP_GPA_DUNNO_MASK_0700 are clear. */
+#define HV_MAP_GPA_DUNNO_ACCESS         UINT32_C(0x0010)
+/** Guess work. */
+#define HV_MAP_GPA_MAYBE_ACCESS_MASK    UINT32_C(0x001f)
+/** Some kind of mask. */
+#define HV_MAP_GPA_DUNNO_MASK_0700      UINT32_C(0x0700)
+/** Dunno what this is, but required for HV_MAP_GPA_DUNNO_ACCESS. */
+#define HV_MAP_GPA_DUNNO_1000           UINT32_C(0x1000)
+/** Working with large 2MB pages. */
+#define HV_MAP_GPA_LARGE                UINT32_C(0x2000)
+/** Valid mask as per build 17101. */
+#define HV_MAP_GPA_VALID_MASK           UINT32_C(0x7f1f)
+/** @}  */
+
+/** Input for HvCallMapGpaPages. */
+typedef struct
+{
+    HV_PARTITION_ID     TargetPartitionId;
+    HV_GPA_PAGE_NUMBER  TargetGpaBase;
+    HV_MAP_GPA_FLAGS    MapFlags;
+    uint32_t            u32ExplicitPadding;
+    /* The repeating part: */
+    HV_SPA_PAGE_NUMBER  PageList[RT_FLEXIBLE_ARRAY];
+} HV_INPUT_MAP_GPA_PAGES;
+AssertCompileMemberOffset(HV_INPUT_MAP_GPA_PAGES, PageList, 24);
+/** Pointer to the input for HvCallMapGpaPages. */
+typedef HV_INPUT_MAP_GPA_PAGES *PHV_INPUT_MAP_GPA_PAGES;
+
+
+/** A parent to guest mapping pair for HvCallMapSparseGpaPages. */
+typedef struct
+{
+    HV_GPA_PAGE_NUMBER TargetGpaPageNumber;
+    HV_SPA_PAGE_NUMBER SourceSpaPageNumber;
+} HV_GPA_MAPPING;
+/** Pointer to a parent->guest mapping pair for HvCallMapSparseGpaPages. */
+typedef HV_GPA_MAPPING *PHV_GPA_MAPPING;
+
+/** Input for HvCallMapSparseGpaPages. */
+typedef struct
+{
+    HV_PARTITION_ID     TargetPartitionId;
+    HV_MAP_GPA_FLAGS    MapFlags;
+    uint32_t            u32ExplicitPadding;
+    /* The repeating part: */
+    HV_GPA_MAPPING      PageList[RT_FLEXIBLE_ARRAY];
+} HV_INPUT_MAP_SPARSE_GPA_PAGES;
+AssertCompileMemberOffset(HV_INPUT_MAP_SPARSE_GPA_PAGES, PageList, 16);
+/** Pointer to the input for HvCallMapSparseGpaPages. */
+typedef HV_INPUT_MAP_SPARSE_GPA_PAGES *PHV_INPUT_MAP_SPARSE_GPA_PAGES;
+
+
+/** Input for HvCallUnmapGpaPages. */
+typedef struct
+{
+    HV_PARTITION_ID     TargetPartitionId;
+    HV_GPA_PAGE_NUMBER  TargetGpaBase;
+    /** This field is either an omission in the 7600 WDK or a later additions.
+     *  Anyway, not quite sure what it does.  Bit 2 seems to indicate 2MB pages. */
+    uint64_t            fFlags;
+} HV_INPUT_UNMAP_GPA_PAGES;
+AssertCompileSize(HV_INPUT_UNMAP_GPA_PAGES, 24);
+/** Pointer to the input for HvCallUnmapGpaPages. */
+typedef HV_INPUT_UNMAP_GPA_PAGES *PHV_INPUT_UNMAP_GPA_PAGES;
+
+
+
+/** Cache types used by HvCallReadGpa and HvCallWriteGpa. */
+typedef enum
+{
+    HvCacheTypeX64Uncached = 0,
+    HvCacheTypeX64WriteCombining,
+    /* 2 & 3 are undefined. */
+    HvCacheTypeX64WriteThrough = 4,
+    HvCacheTypeX64WriteProtected,
+    HvCacheTypeX64WriteBack
+} HV_CACHE_TYPE;
+
+/** Control flags for HvCallReadGpa and HvCallWriteGpa. */
+typedef union
+{
+    uint64_t            AsUINT64;
+    struct
+    {
+        uint64_t        CacheType : 8;      /**< HV_CACHE_TYPE */
+        uint64_t        Reserved  : 56;
+    };
+} HV_ACCESS_GPA_CONTROL_FLAGS;
+
+/** Results codes for HvCallReadGpa and HvCallWriteGpa. */
+typedef enum
+{
+    HvAccessGpaSuccess = 0,
+    HvAccessGpaUnmapped,
+    HvAccessGpaReadIntercept,
+    HvAccessGpaWriteIntercept,
+    HvAccessGpaIllegalOverlayAccess
+} HV_ACCESS_GPA_RESULT_CODE;
+
+/** The result of HvCallReadGpa and HvCallWriteGpa. */
+typedef union
+{
+    uint64_t                        AsUINT64;
+    struct
+    {
+        HV_ACCESS_GPA_RESULT_CODE   ResultCode;
+        uint32_t                    Reserved;
+    };
+} HV_ACCESS_GPA_RESULT;
+
+
+/** Input for HvCallReadGpa. */
+typedef struct
+{
+    HV_PARTITION_ID             PartitionId;
+    HV_VP_INDEX                 VpIndex;
+    uint32_t                    ByteCount;
+    HV_GPA                      BaseGpa;
+    HV_ACCESS_GPA_CONTROL_FLAGS ControlFlags;
+} HV_INPUT_READ_GPA;
+AssertCompileSize(HV_INPUT_READ_GPA, 32);
+/** Pointer to the input for HvCallReadGpa. */
+typedef HV_INPUT_READ_GPA *PHV_INPUT_READ_GPA;
+
+/** Output for HvCallReadGpa. */
+typedef struct
+{
+    HV_ACCESS_GPA_RESULT        AccessResult;
+    uint8_t                     Data[16];
+} HV_OUTPUT_READ_GPA;
+AssertCompileSize(HV_OUTPUT_READ_GPA, 24);
+/** Pointer to the output for HvCallReadGpa. */
+typedef HV_OUTPUT_READ_GPA *PHV_OUTPUT_READ_GPA;
+
+
+/** Input for HvCallWriteGpa. */
+typedef struct
+{
+    HV_PARTITION_ID             PartitionId;
+    HV_VP_INDEX                 VpIndex;
+    uint32_t                    ByteCount;
+    HV_GPA                      BaseGpa;
+    HV_ACCESS_GPA_CONTROL_FLAGS ControlFlags;
+    uint8_t                     Data[16];
+} HV_INPUT_WRITE_GPA;
+AssertCompileSize(HV_INPUT_READ_GPA, 32);
+/** Pointer to the input for HvCallWriteGpa. */
+typedef HV_INPUT_READ_GPA *PHV_INPUT_READ_GPA;
+
+/** Output for HvCallWriteGpa. */
+typedef struct
+{
+    HV_ACCESS_GPA_RESULT        AccessResult;
+} HV_OUTPUT_WRITE_GPA;
+AssertCompileSize(HV_OUTPUT_WRITE_GPA, 8);
+/** Pointer to the output for HvCallWriteGpa. */
+typedef HV_OUTPUT_WRITE_GPA *PHV_OUTPUT_WRITE_GPA;
+
+
+#endif
+
Index: /trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp
===================================================================
--- /trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp	(revision 71074)
+++ /trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp	(revision 71075)
@@ -39,5 +39,5 @@
 #include <iprt/asm-math.h>
 #include <iprt/cpuset.h>
-#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
+#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
 # include <iprt/dbg.h>
 #endif
@@ -331,13 +331,13 @@
     { "RTProcSelf",                             (void *)(uintptr_t)RTProcSelf },
     { "RTR0AssertPanicSystem",                  (void *)(uintptr_t)RTR0AssertPanicSystem },
-#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
-    { "RTR0DbgKrnlInfoOpen",                    (void *)(uintptr_t)RTR0DbgKrnlInfoOpen },          /* only-darwin, only-solaris */
-    { "RTR0DbgKrnlInfoQueryMember",             (void *)(uintptr_t)RTR0DbgKrnlInfoQueryMember },   /* only-darwin, only-solaris */
+#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
+    { "RTR0DbgKrnlInfoOpen",                    (void *)(uintptr_t)RTR0DbgKrnlInfoOpen },          /* only-darwin, only-solaris, only-windows */
+    { "RTR0DbgKrnlInfoQueryMember",             (void *)(uintptr_t)RTR0DbgKrnlInfoQueryMember },   /* only-darwin, only-solaris, only-windows */
 # if defined(RT_OS_SOLARIS)
     { "RTR0DbgKrnlInfoQuerySize",               (void *)(uintptr_t)RTR0DbgKrnlInfoQuerySize },     /* only-solaris */
 # endif
-    { "RTR0DbgKrnlInfoQuerySymbol",             (void *)(uintptr_t)RTR0DbgKrnlInfoQuerySymbol },   /* only-darwin, only-solaris */
-    { "RTR0DbgKrnlInfoRelease",                 (void *)(uintptr_t)RTR0DbgKrnlInfoRelease },       /* only-darwin, only-solaris */
-    { "RTR0DbgKrnlInfoRetain",                  (void *)(uintptr_t)RTR0DbgKrnlInfoRetain },        /* only-darwin, only-solaris */
+    { "RTR0DbgKrnlInfoQuerySymbol",             (void *)(uintptr_t)RTR0DbgKrnlInfoQuerySymbol },   /* only-darwin, only-solaris, only-windows */
+    { "RTR0DbgKrnlInfoRelease",                 (void *)(uintptr_t)RTR0DbgKrnlInfoRelease },       /* only-darwin, only-solaris, only-windows */
+    { "RTR0DbgKrnlInfoRetain",                  (void *)(uintptr_t)RTR0DbgKrnlInfoRetain },        /* only-darwin, only-solaris, only-windows */
 #endif
     { "RTR0MemAreKrnlAndUsrDifferent",          (void *)(uintptr_t)RTR0MemAreKrnlAndUsrDifferent },
Index: /trunk/src/VBox/HostDrivers/Support/SUPDrvIOC.h
===================================================================
--- /trunk/src/VBox/HostDrivers/Support/SUPDrvIOC.h	(revision 71074)
+++ /trunk/src/VBox/HostDrivers/Support/SUPDrvIOC.h	(revision 71075)
@@ -214,5 +214,5 @@
  * @remarks 0x002a0000 is used by 5.1. The next version number must be 0x002b0000.
  */
-#define SUPDRV_IOC_VERSION                              0x00290001
+#define SUPDRV_IOC_VERSION                              0x00290002
 
 /** SUP_IOCTL_COOKIE. */
Index: /trunk/src/VBox/HostDrivers/Support/SUPLib.cpp
===================================================================
--- /trunk/src/VBox/HostDrivers/Support/SUPLib.cpp	(revision 71074)
+++ /trunk/src/VBox/HostDrivers/Support/SUPLib.cpp	(revision 71075)
@@ -277,5 +277,5 @@
         CookieReq.u.In.u32ReqVersion = SUPDRV_IOC_VERSION;
         const uint32_t uMinVersion = (SUPDRV_IOC_VERSION & 0xffff0000) == 0x00290000
-                                   ? 0x00290001
+                                   ? 0x00290002
                                    : SUPDRV_IOC_VERSION & 0xffff0000;
         CookieReq.u.In.u32MinVersion = uMinVersion;
Index: /trunk/src/VBox/VMM/Makefile.kmk
===================================================================
--- /trunk/src/VBox/VMM/Makefile.kmk	(revision 71074)
+++ /trunk/src/VBox/VMM/Makefile.kmk	(revision 71075)
@@ -829,4 +829,9 @@
  	VMMR0/VMMR0JmpA-x86.asm
 
+ if1of ($(USERNAME),bird) # experimental.
+  VMMR0_SOURCES.win.amd64 += VMMR0/NEMR0Native-win.cpp
+  VMMR0_DEFS.win.amd64    += VBOX_WITH_NATIVE_NEM VBOX_WITH_NEM_R0
+ endif
+
  VMMR0_LIBS = \
  	$(PATH_STAGE_LIB)/ServicesR0$(VBOX_SUFF_LIB) \
Index: /trunk/src/VBox/VMM/VMMAll/AllPdbTypeHack.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/AllPdbTypeHack.cpp	(revision 71074)
+++ /trunk/src/VBox/VMM/VMMAll/AllPdbTypeHack.cpp	(revision 71075)
@@ -51,4 +51,5 @@
 #include "../include/EMInternal.h"
 #include "../include/IEMInternal.h"
+#include "../include/NEMInternal.h"
 #include "../include/REMInternal.h"
 #include "../VMMR0/GMMR0Internal.h"
Index: /trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp	(revision 71074)
+++ /trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp	(revision 71075)
@@ -58,4 +58,7 @@
 #include <VBox/vmm/vmcpuset.h>
 #include <VBox/vmm/vmm.h>
+#ifdef VBOX_WITH_NEM_R0
+# include <VBox/vmm/nem.h>
+#endif
 #include <VBox/param.h>
 #include <VBox/err.h>
@@ -1243,4 +1246,7 @@
 
     GMMR0CleanupVM(pGVM);
+#ifdef VBOX_WITH_NEM_R0
+    NEMR0CleanupVM(pGVM);
+#endif
 
     AssertCompile((uintptr_t)NIL_RTTHREADCTXHOOK == 0); /* Depends on zero initialized memory working for NIL at the moment. */
Index: /trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp	(revision 71075)
+++ /trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp	(revision 71075)
@@ -0,0 +1,328 @@
+/* $Id$ */
+/** @file
+ * NEM - Native execution manager, native ring-0 Windows backend.
+ */
+
+/*
+ * Copyright (C) 2018 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+*   Header Files                                                                                                                 *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_NEM
+#include <iprt/nt/hyperv.h>
+
+#include <VBox/vmm/nem.h>
+#include <VBox/vmm/iem.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/apic.h>
+#include "NEMInternal.h"
+#include <VBox/vmm/gvm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/gvmm.h>
+#include <VBox/param.h>
+
+#include <iprt/dbg.h>
+#include <iprt/memobj.h>
+#include <iprt/string.h>
+
+
+/* Assert compile context sanity. */
+#ifndef RT_OS_WINDOWS
+# error "Windows only file!"
+#endif
+#ifndef RT_ARCH_AMD64
+# error "AMD64 only file!"
+#endif
+
+
+/*********************************************************************************************************************************
+*   Global Variables                                                                                                             *
+*********************************************************************************************************************************/
+static uint64_t (* g_pfnHvlInvokeHypercall)(uint64_t uCallInfo, uint64_t GCPhysInput, uint64_t GCPhysOutput);
+
+
+
+/**
+ * Called by NEMR3Init to make sure we've got what we need.
+ *
+ * @returns VBox status code.
+ * @param   pGVM            The ring-0 VM handle.
+ * @param   pVM             The cross context VM handle.
+ * @thread  EMT(0)
+ */
+VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM, PVM pVM)
+{
+    int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
+    AssertRCReturn(rc, rc);
+
+    /*
+     * We want to perform hypercalls here.  The NT kernel started to expose a very low
+     * level interface to do this thru somewhere between build 14271 and 16299.  Since
+     * we need build 17083 to get anywhere at all, the exact build is not relevant here.
+     */
+    RTDBGKRNLINFO hKrnlInfo;
+    rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0);
+    if (RT_SUCCESS(rc))
+    {
+        rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "HvlInvokeHypercall", (void **)&g_pfnHvlInvokeHypercall);
+        RTR0DbgKrnlInfoRelease(hKrnlInfo);
+        if (RT_SUCCESS(rc))
+        {
+            /*
+             * Allocate a page for each VCPU to place hypercall data on.
+             */
+            for (VMCPUID i = 0; i < pGVM->cCpus; i++)
+            {
+                PGVMCPU pGVCpu = &pGVM->aCpus[i];
+                rc = RTR0MemObjAllocPage(&pGVCpu->nem.s.hHypercallDataMemObj, PAGE_SIZE, false /*fExecutable*/);
+                if (RT_SUCCESS(rc))
+                {
+                    pGVCpu->nem.s.HCPhysHypercallData = RTR0MemObjGetPagePhysAddr(pGVCpu->nem.s.hHypercallDataMemObj, 0 /*iPage*/);
+                    pGVCpu->nem.s.pbHypercallData     = (uint8_t *)RTR0MemObjAddress(pGVCpu->nem.s.hHypercallDataMemObj);
+                    AssertStmt(pGVCpu->nem.s.HCPhysHypercallData != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3);
+                    AssertStmt(pGVCpu->nem.s.pbHypercallData, rc = VERR_INTERNAL_ERROR_3);
+                }
+                else
+                    pGVCpu->nem.s.hHypercallDataMemObj = NIL_RTR0MEMOBJ;
+                if (RT_FAILURE(rc))
+                {
+                    /* bail. */
+                    do
+                    {
+                        RTR0MemObjFree(pGVCpu->nem.s.hHypercallDataMemObj, true /*fFreeMappings*/);
+                        pGVCpu->nem.s.hHypercallDataMemObj = NIL_RTR0MEMOBJ;
+                        pGVCpu->nem.s.HCPhysHypercallData  = NIL_RTHCPHYS;
+                        pGVCpu->nem.s.pbHypercallData      = NULL;
+                    } while (i-- > 0);
+                    return rc;
+                }
+            }
+            /*
+             * So far, so good.
+             */
+            /** @todo would be good if we could establish the partition ID ourselves. */
+            /** @todop this is too EARLY!   */
+            pGVM->nem.s.idHvPartition = pVM->nem.s.idHvPartition;
+            return rc;
+        }
+
+        rc = VERR_NEM_MISSING_KERNEL_API;
+    }
+
+    RT_NOREF(pGVM, pVM);
+    return rc;
+}
+
+
+/**
+ * Cleanup the NEM parts of the VM in ring-0.
+ *
+ * This is always called and must deal the state regardless of whether
+ * NEMR0InitVM() was called or not.  So, take care here.
+ *
+ * @param   pGVM            The ring-0 VM handle.
+ */
+VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM)
+{
+    pGVM->nem.s.idHvPartition = HV_PARTITION_ID_INVALID;
+
+    /* Free the hypercall pages. */
+    VMCPUID i = pGVM->cCpus;
+    while (i-- > 0)
+    {
+        PGVMCPU pGVCpu = &pGVM->aCpus[i];
+        if (pGVCpu->nem.s.pbHypercallData)
+        {
+            pGVCpu->nem.s.pbHypercallData = NULL;
+            int rc = RTR0MemObjFree(pGVCpu->nem.s.hHypercallDataMemObj, true /*fFreeMappings*/);
+            AssertRC(rc);
+        }
+        pGVCpu->nem.s.hHypercallDataMemObj = NIL_RTR0MEMOBJ;
+        pGVCpu->nem.s.HCPhysHypercallData  = NIL_RTHCPHYS;
+    }
+}
+
+
+/**
+ * Maps pages into the guest physical address space.
+ *
+ * Generally the caller will be under the PGM lock already, so no extra effort
+ * is needed to make sure all changes happens under it.
+ *
+ * @returns VBox status code.
+ * @param   pGVM            The ring-0 VM handle.
+ * @param   pVM             The cross context VM handle.
+ * @param   idCpu           The calling EMT.  Necessary for getting the
+ *                          hypercall page and arguments.
+ * @thread  EMT(idCpu)
+ */
+VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
+{
+    /*
+     * Validate the call.
+     */
+    int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
+    if (RT_SUCCESS(rc))
+    {
+        PVMCPU  pVCpu  = &pVM->aCpus[idCpu];
+        PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
+        AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
+
+        RTGCPHYS                GCPhysSrc = pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
+        RTGCPHYS const          GCPhysDst = pVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
+        uint32_t const          cPages    = pVCpu->nem.s.Hypercall.MapPages.cPages;
+        HV_MAP_GPA_FLAGS const  fFlags    = pVCpu->nem.s.Hypercall.MapPages.fFlags;
+
+        AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
+        AssertReturn(cPages <= NEM_MAX_MAP_PAGES, VERR_OUT_OF_RANGE);
+        AssertReturn(!(fFlags & ~(HV_MAP_GPA_MAYBE_ACCESS_MASK & ~HV_MAP_GPA_DUNNO_ACCESS)), VERR_INVALID_FLAGS);
+        AssertMsgReturn(!(GCPhysDst & X86_PAGE_OFFSET_MASK), ("GCPhysDst=%RGp\n", GCPhysDst), VERR_OUT_OF_RANGE);
+        AssertReturn(GCPhysDst < _1E, VERR_OUT_OF_RANGE);
+        if (GCPhysSrc != GCPhysDst)
+        {
+            AssertMsgReturn(!(GCPhysSrc & X86_PAGE_OFFSET_MASK), ("GCPhysSrc=%RGp\n", GCPhysSrc), VERR_OUT_OF_RANGE);
+            AssertReturn(GCPhysSrc < _1E, VERR_OUT_OF_RANGE);
+        }
+
+        /** @todo fix pGVM->nem.s.idHvPartition init. */
+        if (pGVM->nem.s.idHvPartition == 0)
+            pGVM->nem.s.idHvPartition = pVM->nem.s.idHvPartition;
+
+        /*
+         * Compose and make the hypercall.
+         * Ring-3 is not allowed to fill in the host physical addresses of the call.
+         */
+        HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nem.s.pbHypercallData;
+        AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3);
+        pMapPages->TargetPartitionId    = pGVM->nem.s.idHvPartition;
+        pMapPages->TargetGpaBase        = GCPhysDst >> X86_PAGE_SHIFT;
+        pMapPages->MapFlags             = fFlags;
+        pMapPages->u32ExplicitPadding   = 0;
+        for (uint32_t iPage = 0; iPage < cPages; iPage++, GCPhysSrc += X86_PAGE_SIZE)
+        {
+            RTHCPHYS HCPhys = NIL_RTGCPHYS;
+            rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysSrc, &HCPhys);
+            AssertRCBreak(rc);
+            pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT;
+        }
+        if (RT_SUCCESS(rc))
+        {
+            uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32),
+                                                       pGVCpu->nem.s.HCPhysHypercallData, 0);
+            Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n",
+                  GCPhysDst, GCPhysSrc - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult));
+            if (uResult == ((uint64_t)cPages << 32))
+                rc = VINF_SUCCESS;
+            else
+            {
+                rc = VERR_NEM_MAP_PAGES_FAILED;
+                LogRel(("g_pfnHvlInvokeHypercall/MapGpaPages -> %#RX64\n", uResult));
+            }
+        }
+    }
+    return rc;
+}
+
+
+#if 0 /* for debugging GPA unmapping.  */
+static int nemR3WinDummyReadGpa(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys)
+{
+    PHV_INPUT_READ_GPA  pIn  = (PHV_INPUT_READ_GPA)pGVCpu->nem.s.pbHypercallData;
+    PHV_OUTPUT_READ_GPA pOut = (PHV_OUTPUT_READ_GPA)(pIn + 1);
+    pIn->PartitionId            = pGVM->nem.s.idHvPartition;
+    pIn->VpIndex                = pGVCpu->idCpu;
+    pIn->ByteCount              = 0x10;
+    pIn->BaseGpa                = GCPhys;
+    pIn->ControlFlags.AsUINT64  = 0;
+    pIn->ControlFlags.CacheType = HvCacheTypeX64WriteCombining;
+    memset(pOut, 0xfe, sizeof(*pOut));
+    uint64_t volatile uResult = g_pfnHvlInvokeHypercall(HvCallReadGpa, pGVCpu->nem.s.HCPhysHypercallData,
+                                                        pGVCpu->nem.s.HCPhysHypercallData + sizeof(*pIn));
+    LogRel(("nemR3WinDummyReadGpa: %RGp -> %#RX64; code=%u rsvd=%u abData=%.16Rhxs\n",
+            GCPhys, uResult, pOut->AccessResult.ResultCode, pOut->AccessResult.Reserved, pOut->Data));
+    __debugbreak();
+
+    return uResult != 0 ? VERR_READ_ERROR : VINF_SUCCESS;
+}
+#endif
+
+
+/**
+ * Unmaps pages from the guest physical address space.
+ *
+ * Generally the caller will be under the PGM lock already, so no extra effort
+ * is needed to make sure all changes happens under it.
+ *
+ * @returns VBox status code.
+ * @param   pGVM            The ring-0 VM handle.
+ * @param   pVM             The cross context VM handle.
+ * @param   idCpu           The calling EMT.  Necessary for getting the
+ *                          hypercall page and arguments.
+ * @thread  EMT(idCpu)
+ */
+VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
+{
+    /*
+     * Validate the call.
+     */
+    int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
+    if (RT_SUCCESS(rc))
+    {
+        PVMCPU  pVCpu  = &pVM->aCpus[idCpu];
+        PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
+        AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
+
+        RTGCPHYS                GCPhys = pVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
+        uint32_t const          cPages = pVCpu->nem.s.Hypercall.UnmapPages.cPages;
+
+        AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
+        AssertReturn(cPages <= NEM_MAX_UNMAP_PAGES, VERR_OUT_OF_RANGE);
+        AssertMsgReturn(!(GCPhys & X86_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_OUT_OF_RANGE);
+        AssertReturn(GCPhys < _1E, VERR_OUT_OF_RANGE);
+
+        /** @todo fix pGVM->nem.s.idHvPartition init. */
+        if (pGVM->nem.s.idHvPartition == 0)
+            pGVM->nem.s.idHvPartition = pVM->nem.s.idHvPartition;
+
+        /*
+         * Compose and make the hypercall.
+         */
+        HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nem.s.pbHypercallData;
+        AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3);
+        pUnmapPages->TargetPartitionId    = pGVM->nem.s.idHvPartition;
+        pUnmapPages->TargetGpaBase        = GCPhys >> X86_PAGE_SHIFT;
+        pUnmapPages->fFlags               = 0;
+
+        uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32),
+                                                   pGVCpu->nem.s.HCPhysHypercallData, 0);
+        Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult));
+        if (uResult == ((uint64_t)cPages << 32))
+        {
+#if 1       /* Do we need to do this? Hopefully not... */
+            uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32),
+                                                           pGVCpu->nem.s.HCPhysHypercallData, 0);
+            AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR));
+#endif
+            rc = VINF_SUCCESS;
+        }
+        else
+        {
+            rc = VERR_NEM_UNMAP_PAGES_FAILED;
+            LogRel(("g_pfnHvlInvokeHypercall/UnmapGpaPages -> %#RX64\n", uResult));
+        }
+    }
+    return rc;
+}
+
+
Index: /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp	(revision 71074)
+++ /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp	(revision 71075)
@@ -27,4 +27,7 @@
 #include <VBox/vmm/pdmapi.h>
 #include <VBox/vmm/pgm.h>
+#ifdef VBOX_WITH_NEM_R0
+# include <VBox/vmm/nem.h>
+#endif
 #include <VBox/vmm/stam.h>
 #include <VBox/vmm/tm.h>
@@ -1959,4 +1962,33 @@
             break;
 #endif
+
+        /*
+         * NEM requests.
+         */
+#ifdef VBOX_WITH_NEM_R0
+# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
+        case VMMR0_DO_NEM_INIT_VM:
+            if (u64Arg || pReqHdr || idCpu != 0)
+                return VERR_INVALID_PARAMETER;
+            rc = NEMR0InitVM(pGVM, pVM);
+            VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
+            break;
+
+        case VMMR0_DO_NEM_MAP_PAGES:
+            if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
+                return VERR_INVALID_PARAMETER;
+            rc = NEMR0MapPages(pGVM, pVM, idCpu);
+            VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
+            break;
+
+        case VMMR0_DO_NEM_UNMAP_PAGES:
+            if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
+                return VERR_INVALID_PARAMETER;
+            rc = NEMR0UnmapPages(pGVM, pVM, idCpu);
+            VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
+            break;
+# endif
+#endif
+
         /*
          * For profiling.
Index: /trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp	(revision 71074)
+++ /trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp	(revision 71075)
@@ -5,4 +5,6 @@
  * Log group 2: Exit logging.
  * Log group 3: Log context on exit.
+ * Log group 5: Ring-3 memory management
+ * Log group 6: Ring-0 memory management
  * Log group 12: API intercepts.
  */
@@ -26,4 +28,5 @@
 #define LOG_GROUP LOG_GROUP_NEM
 #include <iprt/nt/nt-and-windows.h>
+#include <iprt/nt/hyperv.h>
 #include <WinHvPlatform.h>
 
@@ -73,4 +76,7 @@
 #define NEM_WIN_IS_RELEVANT_TO_A20(a_GCPhys)    \
     ( ((RTGCPHYS)((a_GCPhys) - _1M) < (RTGCPHYS)_64K) || ((RTGCPHYS)(a_GCPhys) < (RTGCPHYS)_64K) )
+
+
+#define NEM_WIN_USE_HYPERCALLS
 
 
@@ -147,4 +153,9 @@
 /** @} */
 
+/** @name APIs imported from Vid.dll
+ * @{ */
+static BOOL (WINAPI *g_pfnVidGetHvPartitionId)(HANDLE hPartition, HV_PARTITION_ID *pidPartition);
+/** @} */
+
 
 /**
@@ -176,4 +187,5 @@
     NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorRegisters),
     NEM_WIN_IMPORT(0, false, WHvSetVirtualProcessorRegisters),
+    NEM_WIN_IMPORT(1, false, VidGetHvPartitionId),
 #undef NEM_WIN_IMPORT
 };
@@ -752,11 +764,18 @@
         {
             /*
-             * Create and initialize a partition.
+             * Check out our ring-0 capabilities.
              */
-            rc = nemR3WinInitCreatePartition(pVM, pErrInfo);
+            rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_NEM_INIT_VM, 0, NULL);
             if (RT_SUCCESS(rc))
             {
-                VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
-                Log(("NEM: Marked active!\n"));
+                /*
+                 * Create and initialize a partition.
+                 */
+                rc = nemR3WinInitCreatePartition(pVM, pErrInfo);
+                if (RT_SUCCESS(rc))
+                {
+                    VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
+                    Log(("NEM: Marked active!\n"));
+                }
             }
         }
@@ -862,7 +881,12 @@
         return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
                           "Failed to get device handle for partition %p: %Rhrc", hPartition, hrc);
-    /** @todo Do a Vid query that uses the handle to check that we've got a
-     *  working value.  */
+
+    HV_PARTITION_ID idHvPartition = HV_PARTITION_ID_INVALID;
+    if (!g_pfnVidGetHvPartitionId(hPartitionDevice, &idHvPartition))
+        return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
+                          "Failed to get device handle and/or partition ID for %p (hPartitionDevice=%p, Last=%#x/%u)",
+                          hPartition, hPartitionDevice, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue);
     pVM->nem.s.hPartitionDevice = hPartitionDevice;
+    pVM->nem.s.idHvPartition    = idHvPartition;
 
     /*
@@ -890,5 +914,5 @@
     pVM->nem.s.fCreatedEmts = true;
 
-    LogRel(("NEM: Successfully set up partition (device handle %p)\n", hPartitionDevice));
+    LogRel(("NEM: Successfully set up partition (device handle %p, partition ID %#llx)\n", hPartitionDevice, idHvPartition));
     return VINF_SUCCESS;
 }
@@ -1588,5 +1612,5 @@
         if (pPendingInt->fInterruptionPending)
         {
-            Log6(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x unk0=%u unk1=%u\n",
+            Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x unk0=%u unk1=%u\n",
                   pPendingInt->enmInterruptionType, pPendingInt->InterruptionVector, pPendingInt->fDeliverErrCd,
                   pPendingInt->uErrCd, pPendingInt->fUnknown0, pPendingInt->fUnknown1));
@@ -1816,15 +1840,32 @@
 }
 
+
 static DECLCALLBACK(int) nemR3WinUnmapOnePageCallback(PVM pVM, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser)
 {
     RT_NOREF_PV(pvUser);
+#ifdef NEM_WIN_USE_HYPERCALLS
+    PVMCPU pVCpu = VMMGetCpu(pVM);
+    pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys;
+    pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
+    int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
+    AssertRC(rc);
+    if (RT_SUCCESS(rc))
+#else
     HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
     if (SUCCEEDED(hrc))
+#endif
+    {
+        Log5(("NEM GPA unmap all: %RGp (cMappedPages=%u)\n", GCPhys, pVM->nem.s.cMappedPages - 1));
         *pu2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
+    }
     else
     {
+#ifdef NEM_WIN_USE_HYPERCALLS
+        LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
+#else
         LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
                 GCPhys, g_apszPageStates[*pu2NemState], hrc, hrc, RTNtCurrentTeb()->LastStatusValue,
                 RTNtCurrentTeb()->LastErrorValue, pVM->nem.s.cMappedPages));
+#endif
         *pu2NemState = NEM_WIN_PAGE_STATE_NOT_SET;
     }
@@ -1947,14 +1988,27 @@
      * If this fails, which it does every so often, just unmap everything for now.
      */
+#ifdef NEM_WIN_USE_HYPERCALLS
+    pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys;
+    pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
+    int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
+    AssertRC(rc);
+    if (RT_SUCCESS(rc))
+#else
     /** @todo figure out whether we mess up the state or if it's WHv.   */
     HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
     if (SUCCEEDED(hrc))
+#endif
     {
         pState->fDidSomething = true;
         pState->fCanResume    = true;
         pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
-        Log5(("nemR3WinHandleMemoryAccessPageCheckerCallback: %RGp => unmapped[%s]\n", GCPhys, g_apszPageStates[u2State]));
+        uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
+        Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
         return VINF_SUCCESS;
     }
+#ifdef NEM_WIN_USE_HYPERCALLS
+    LogRel(("nemR3WinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhys, rc));
+    return rc;
+#else
     LogRel(("nemR3WinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
             GCPhys, g_apszPageStates[u2State], hrc, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue,
@@ -1968,4 +2022,5 @@
     pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
     return VINF_SUCCESS;
+#endif
 }
 
@@ -2183,5 +2238,5 @@
     const bool   fSingleStepping = false; /** @todo get this from somewhere. */
     VBOXSTRICTRC rcStrict = VINF_SUCCESS;
-    for (;;)
+    for (unsigned iLoop = 0;;iLoop++)
     {
         /*
@@ -2327,4 +2382,5 @@
         }
 
+#ifdef NEM_WIN_USE_HYPERCALLS
         /* Hack alert! */
         uint32_t const cMappedPages = pVM->nem.s.cMappedPages;
@@ -2336,4 +2392,5 @@
             Log(("nemR3NativeRunGC: Unmapped all; cMappedPages=%u -> %u\n", cMappedPages, pVM->nem.s.cMappedPages));
         }
+#endif
 
         /* If any FF is pending, return to the EM loops.  That's okay for the
@@ -2504,17 +2561,32 @@
                                                             PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
 {
-    Assert(pVCpu == NULL);
-
     /* We'll just unmap the memory. */
     if (pInfo->u2NemState > NEM_WIN_PAGE_STATE_UNMAPPED)
     {
+#ifdef NEM_WIN_USE_HYPERCALLS
+        pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys;
+        pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
+        int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
+        AssertRC(rc);
+        if (RT_SUCCESS(rc))
+#else
         HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
         if (SUCCEEDED(hrc))
+#endif
+        {
+            uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
+            Log5(("NEM GPA unmapped/A20: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[pInfo->u2NemState], cMappedPages));
             pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
+        }
         else
         {
+#ifdef NEM_WIN_USE_HYPERCALLS
+            LogRel(("nemR3WinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
+            return rc;
+#else
             LogRel(("nemR3WinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
                     GCPhys, hrc, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue));
             return VERR_INTERNAL_ERROR_2;
+#endif
         }
     }
@@ -2529,11 +2601,11 @@
  * @returns The PGMPhysNemQueryPageInfo result.
  * @param   pVM             The cross context VM structure.
- * @param   pVCpu           The cross context virtual CPU structure.  Optional.
+ * @param   pVCpu           The cross context virtual CPU structure.
  * @param   GCPhys          The page to unmap.
  */
-static int nemR3WinUnmapPageForA20Gate(PVM pVM, RTGCPHYS GCPhys)
+static int nemR3WinUnmapPageForA20Gate(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
 {
     PGMPHYSNEMPAGEINFO Info;
-    return PGMPhysNemPageInfoChecker(pVM, NULL /*pVCpu*/, GCPhys, false /*fMakeWritable*/, &Info,
+    return PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhys, false /*fMakeWritable*/, &Info,
                                      nemR3WinUnsetForA20CheckerCallback, NULL);
 }
@@ -2558,5 +2630,5 @@
         pVM->nem.s.fA20Enabled = fEnabled;
         for (RTGCPHYS GCPhys = _1M; GCPhys < _1M + _64K; GCPhys += X86_PAGE_SIZE)
-            nemR3WinUnmapPageForA20Gate(pVM, GCPhys);
+            nemR3WinUnmapPageForA20Gate(pVM, pVCpu, GCPhys);
     }
 }
@@ -2619,4 +2691,27 @@
         if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
         {
+#ifdef NEM_WIN_USE_HYPERCALLS
+            PVMCPU pVCpu = VMMGetCpu(pVM);
+            pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhysDst;
+            pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
+            int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
+            AssertRC(rc);
+            if (RT_SUCCESS(rc))
+            {
+                *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
+                uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
+                if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
+                {
+                    Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
+                          GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
+                    return VINF_SUCCESS;
+                }
+            }
+            else
+            {
+                LogRel(("nemR3NativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
+                return rc;
+            }
+#else
             HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
             if (SUCCEEDED(hrc))
@@ -2626,5 +2721,6 @@
                 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
                 {
-                    Log5(("nemR3NativeSetPhysPage: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
+                    Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
+                          GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
                     return VINF_SUCCESS;
                 }
@@ -2636,4 +2732,5 @@
                 return VERR_NEM_INIT_FAILED;
             }
+#endif
         }
     }
@@ -2644,4 +2741,25 @@
     if (fPageProt & NEM_PAGE_PROT_WRITE)
     {
+#ifdef NEM_WIN_USE_HYPERCALLS
+        RT_NOREF_PV(GCPhysSrc);
+        PVMCPU pVCpu = VMMGetCpu(pVM);
+        pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc;
+        pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst;
+        pVCpu->nem.s.Hypercall.MapPages.cPages    = 1;
+        pVCpu->nem.s.Hypercall.MapPages.fFlags    = HV_MAP_GPA_READABLE   | HV_MAP_GPA_WRITABLE
+                                                  | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN;
+        int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
+        AssertRC(rc);
+        if (RT_SUCCESS(rc))
+        {
+            *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
+            uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
+            Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
+                  GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
+            return VINF_SUCCESS;
+        }
+        LogRel(("nemR3NativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
+        return rc;
+#else
         void *pvPage;
         int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
@@ -2654,5 +2772,6 @@
                 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
                 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
-                Log5(("nemR3NativeSetPhysPage: %RGp => writable (total %u)\n", GCPhysDst, cMappedPages));
+                Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
+                      GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
                 return VINF_SUCCESS;
             }
@@ -2663,8 +2782,28 @@
         LogRel(("nemR3NativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
         return rc;
+#endif
     }
 
     if (fPageProt & NEM_PAGE_PROT_READ)
     {
+#ifdef NEM_WIN_USE_HYPERCALLS
+        PVMCPU pVCpu = VMMGetCpu(pVM);
+        pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc;
+        pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst;
+        pVCpu->nem.s.Hypercall.MapPages.cPages    = 1;
+        pVCpu->nem.s.Hypercall.MapPages.fFlags    = HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN;
+        int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
+        AssertRC(rc);
+        if (RT_SUCCESS(rc))
+        {
+            *pu2State = NEM_WIN_PAGE_STATE_READABLE;
+            uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
+            Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
+                  GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
+            return VINF_SUCCESS;
+        }
+        LogRel(("nemR3NativeSetPhysPage/readonly: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
+        return rc;
+#else
         const void *pvPage;
         int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
@@ -2677,5 +2816,6 @@
                 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
                 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
-                Log5(("nemR3NativeSetPhysPage: %RGp => read+exec (total %u)\n", GCPhysDst, cMappedPages));
+                Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
+                      GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
                 return VINF_SUCCESS;
             }
@@ -2686,4 +2826,5 @@
         LogRel(("nemR3NativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
         return rc;
+#endif
     }
 
@@ -2703,5 +2844,21 @@
     }
 
-    HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
+#ifdef NEM_WIN_USE_HYPERCALLS
+    PVMCPU pVCpu = VMMGetCpu(pVM);
+    pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
+    pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
+    int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
+    AssertRC(rc);
+    if (RT_SUCCESS(rc))
+    {
+        uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
+        Log5(("NEM GPA unmapped/just: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[*pu2State], cMappedPages));
+        *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
+        return VINF_SUCCESS;
+    }
+    LogRel(("nemR3NativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
+    return rc;
+#else
+    HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
     if (SUCCEEDED(hrc))
     {
@@ -2714,4 +2871,5 @@
             GCPhysDst, hrc, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue));
     return VERR_INTERNAL_ERROR_3;
+#endif
 }
 
@@ -2732,5 +2890,5 @@
     {
         /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
-        rc = nemR3WinUnmapPageForA20Gate(pVM, GCPhys | RT_BIT_32(20));
+        rc = nemR3WinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
         if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys) && RT_SUCCESS(rc))
             rc = nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
@@ -2765,5 +2923,5 @@
     {
         /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
-        nemR3WinUnmapPageForA20Gate(pVM, GCPhys | RT_BIT_32(20));
+        nemR3WinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
         if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
             nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
@@ -2795,5 +2953,5 @@
     {
         /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
-        nemR3WinUnmapPageForA20Gate(pVM, GCPhys | RT_BIT_32(20));
+        nemR3WinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
         if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
             nemR3NativeSetPhysPage(pVM, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
Index: /trunk/src/VBox/VMM/include/NEMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/NEMInternal.h	(revision 71074)
+++ /trunk/src/VBox/VMM/include/NEMInternal.h	(revision 71075)
@@ -25,4 +25,7 @@
 #include <VBox/vmm/stam.h>
 #include <VBox/vmm/vmapi.h>
+#ifdef RT_OS_WINDOWS
+#include <iprt/nt/hyperv.h>
+#endif
 
 RT_C_DECLS_BEGIN
@@ -83,4 +86,6 @@
      * controls. */
     RTR3PTR                     hPartitionDevice;
+    /** The Hyper-V partition ID.   */
+    uint64_t                    idHvPartition;
 
     /** Number of currently mapped pages. */
@@ -105,5 +110,25 @@
     /** NEMCPU_MAGIC. */
     uint32_t                    u32Magic;
-
+#ifdef RT_OS_WINDOWS
+    /** Parameters for making Hyper-V hypercalls. */
+    union
+    {
+        uint8_t                 ab[64];
+        /** Arguments for NEMR0MapPages (HvCallMapGpaPages). */
+        struct
+        {
+            RTGCPHYS            GCPhysSrc;
+            RTGCPHYS            GCPhysDst; /**< Same as GCPhysSrc except maybe when the A20 gate is disabled. */
+            uint32_t            cPages;
+            HV_MAP_GPA_FLAGS    fFlags;
+        }                       MapPages;
+        /** Arguments for NEMR0UnmapPages (HvCallUnmapGpaPages). */
+        struct
+        {
+            RTGCPHYS            GCPhys;
+            uint32_t            cPages;
+        }                       UnmapPages;
+    } Hypercall;
+#endif
 } NEMCPU;
 /** Pointer to NEM VMCPU instance data. */
@@ -114,4 +139,39 @@
 /** NEMCPU::u32Magic value after termination. */
 #define NEMCPU_MAGIC_DEAD       UINT32_C(0xdead2222)
+
+
+#ifdef IN_RING0
+
+/**
+ * NEM GVMCPU instance data.
+ */
+typedef struct NEMR0PERVCPU
+{
+# ifdef RT_OS_WINDOWS
+    /** @name Hypercall input/ouput page.
+     * @{ */
+    /** Host physical address of the hypercall input/output page. */
+    RTHCPHYS                    HCPhysHypercallData;
+    /** Pointer to the hypercall input/output page. */
+    uint8_t                    *pbHypercallData;
+    /** Handle to the memory object of the hypercall input/output page. */
+    RTR0MEMOBJ                  hHypercallDataMemObj;
+    /** @} */
+# endif
+} NEMR0PERVCPU;
+
+/**
+ * NEM GVM instance data.
+ */
+typedef struct NEMR0PERVM
+{
+# ifdef RT_OS_WINDOWS
+    /** The partition ID. */
+    uint64_t                    idHvPartition;
+# endif
+} NEMR0PERVM;
+
+#endif /* IN_RING*/
+
 
 #ifdef IN_RING3
@@ -148,4 +208,11 @@
 
 
+#ifdef RT_OS_WINDOWS
+/** Maximum number of pages we can map in a single NEMR0MapPages call. */
+# define NEM_MAX_MAP_PAGES      ((PAGE_SIZE - RT_UOFFSETOF(HV_INPUT_MAP_GPA_PAGES, PageList)) / sizeof(HV_SPA_PAGE_NUMBER))
+/** Maximum number of pages we can unmap in a single NEMR0UnmapPages call. */
+# define NEM_MAX_UNMAP_PAGES    4095
+
+#endif
 /** @} */
 
