Index: /trunk/include/VBox/err.h
===================================================================
--- /trunk/include/VBox/err.h	(revision 51642)
+++ /trunk/include/VBox/err.h	(revision 51643)
@@ -2353,4 +2353,12 @@
 /** Unknown or invalid GIM provider. */
 #define VERR_GIM_INVALID_PROVIDER                   (-6306)
+/** GIM generic operation failed. */
+#define VERR_GIM_OPERATION_FAILED                   (-6307)
+/** The GIM provider does not support any hypercalls. */
+#define VERR_GIM_HYPERCALLS_NOT_AVAILABLE           (-6308)
+/** The guest has not setup use of the hypercalls. */
+#define VERR_GIM_HYPERCALLS_NOT_ENABLED             (-6309)
+/** The GIM device is not registered with GIM when it ought to be. */
+#define VERR_GIM_DEVICE_NOT_REGISTERED              (-6310)
 /** @} */
 
Index: /trunk/include/VBox/vmm/gim.h
===================================================================
--- /trunk/include/VBox/vmm/gim.h	(revision 51642)
+++ /trunk/include/VBox/vmm/gim.h	(revision 51643)
@@ -57,5 +57,5 @@
     GIMPROVIDERID_KVM
 } GIMPROVIDERID;
-AssertCompileSize(GIMPROVIDERID, 4);
+AssertCompileSize(GIMPROVIDERID, sizeof(uint32_t));
 
 
@@ -74,5 +74,5 @@
     bool                fMapped;
     /** Alignment padding. */
-    uint8_t             au8Alignment0[4];
+    uint8_t             au8Alignment0[3];
     /** Size of the region (must be page aligned). */
     uint32_t            cbRegion;
@@ -94,6 +94,6 @@
 /** Pointer to a const GIM MMIO2 region. */
 typedef GIMMMIO2REGION const *PCGIMMMIO2REGION;
-AssertCompileMemberAlignment(GIMMMIO2REGION, cbRegion,   8);
-AssertCompileMemberAlignment(GIMMMIO2REGION, pvPageR0,   8);
+AssertCompileMemberAlignment(GIMMMIO2REGION, cbRegion, 8);
+AssertCompileMemberAlignment(GIMMMIO2REGION, pvPageR0, 8);
 
 
@@ -153,4 +153,5 @@
 VMMR0_INT_DECL(int)         GIMR0InitVM(PVM pVM);
 VMMR0_INT_DECL(int)         GIMR0TermVM(PVM pVM);
+VMMR0_INT_DECL(int)         GIMR0UpdateParavirtTsc(PVM pVM, uint64_t u64Offset);
 /** @} */
 #endif /* IN_RING0 */
@@ -171,5 +172,5 @@
 
 VMMDECL(bool)               GIMIsEnabled(PVM pVM);
-VMMDECL(int)                GIMUpdateParavirtTsc(PVM pVM, uint64_t u64Offset);
+VMMDECL(GIMPROVIDERID)      GIMGetProvider(PVM pVM);
 VMMDECL(bool)               GIMIsParavirtTscEnabled(PVM pVM);
 VMM_INT_DECL(int)           GIMHypercall(PVMCPU pVCpu, PCPUMCTX pCtx);
Index: /trunk/include/VBox/vmm/hm_vmx.h
===================================================================
--- /trunk/include/VBox/vmm/hm_vmx.h	(revision 51642)
+++ /trunk/include/VBox/vmm/hm_vmx.h	(revision 51643)
@@ -4,5 +4,5 @@
 
 /*
- * Copyright (C) 2006-2013 Oracle Corporation
+ * Copyright (C) 2006-2014 Oracle Corporation
  *
  * This file is part of VirtualBox Open Source Edition (OSE), as
@@ -134,4 +134,5 @@
 AssertCompileMemberOffset(VMXRESTOREHOST, uHostFSBase,    40);
 AssertCompileSize(VMXRESTOREHOST, 56);
+AssertCompileSizeAlignment(VMXRESTOREHOST, 8);
 
 /** @name Host-state MSR lazy-restoration flags.
@@ -779,44 +780,46 @@
 typedef const EPTPT *PCEPTPT;
 
-/**
- * VPID flush types.
+/** @name VMX VPID flush types.
+ *  Warning!! Valid enum members are in accordance to the VT-x spec.
+ * @{
  */
 typedef enum
 {
     /** Invalidate a specific page. */
-    VMX_FLUSH_VPID_INDIV_ADDR                    = 0,
+    VMXFLUSHVPID_INDIV_ADDR                    = 0,
     /** Invalidate one context (specific VPID). */
-    VMX_FLUSH_VPID_SINGLE_CONTEXT                = 1,
+    VMXFLUSHVPID_SINGLE_CONTEXT                = 1,
     /** Invalidate all contexts (all VPIDs). */
-    VMX_FLUSH_VPID_ALL_CONTEXTS                  = 2,
+    VMXFLUSHVPID_ALL_CONTEXTS                  = 2,
     /** Invalidate a single VPID context retaining global mappings. */
-    VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS = 3,
+    VMXFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS = 3,
     /** Unsupported by VirtualBox. */
-    VMX_FLUSH_VPID_NOT_SUPPORTED                 = 0xbad,
+    VMXFLUSHVPID_NOT_SUPPORTED                 = 0xbad0,
     /** Unsupported by CPU. */
-    VMX_FLUSH_VPID_NONE                          = 0xb00,
-    /** 32bit hackishness. */
-    VMX_FLUSH_VPID_32BIT_HACK                    = 0x7fffffff
-} VMX_FLUSH_VPID;
-
-/**
- * EPT flush types.
+    VMXFLUSHVPID_NONE                          = 0xbad1
+} VMXFLUSHVPID;
+AssertCompileSize(VMXFLUSHVPID, 4);
+/** @} */
+
+/** @name VMX EPT flush types.
+ *  Warning!! Valid enums values below are in accordance to the VT-x spec.
+ * @{
  */
 typedef enum
 {
     /** Invalidate one context (specific EPT). */
-    VMX_FLUSH_EPT_SINGLE_CONTEXT                = 1,
+    VMXFLUSHEPT_SINGLE_CONTEXT                 = 1,
     /* Invalidate all contexts (all EPTs) */
-    VMX_FLUSH_EPT_ALL_CONTEXTS                  = 2,
+    VMXFLUSHEPT_ALL_CONTEXTS                   = 2,
     /** Unsupported by VirtualBox.   */
-    VMX_FLUSH_EPT_NOT_SUPPORTED                 = 0xbad,
+    VMXFLUSHEPT_NOT_SUPPORTED                  = 0xbad0,
     /** Unsupported by CPU. */
-    VMX_FLUSH_EPT_NONE                          = 0xb00,
-    /** 32bit hackishness. */
-    VMX_FLUSH_EPT_32BIT_HACK                    = 0x7fffffff
-} VMX_FLUSH_EPT;
-/** @} */
-
-/** @name MSR autoload/store elements
+    VMXFLUSHEPT_NONE                           = 0xbad1
+} VMXFLUSHEPT;
+AssertCompileSize(VMXFLUSHEPT, 4);
+/** @} */
+
+/** @name VMX MSR autoload/store element.
+ *  In accordance to VT-x spec.
  * @{
  */
@@ -824,6 +827,9 @@
 typedef struct
 {
+    /** The MSR Id. */
     uint32_t    u32Msr;
+    /** Reserved (MBZ). */
     uint32_t    u32Reserved;
+    /** The MSR value. */
     uint64_t    u64Value;
 } VMXAUTOMSR;
@@ -877,4 +883,5 @@
 /** Pointer to a VMXMSRS struct. */
 typedef VMXMSRS *PVMXMSRS;
+AssertCompileSizeAlignment(VMXMSRS, 8);
 /** @} */
 
@@ -2188,5 +2195,5 @@
  * @param   pDescriptor Descriptor
  */
-DECLASM(int) VMXR0InvEPT(VMX_FLUSH_EPT enmFlush, uint64_t *pDescriptor);
+DECLASM(int) VMXR0InvEPT(VMXFLUSHEPT enmFlush, uint64_t *pDescriptor);
 
 /**
@@ -2196,5 +2203,5 @@
  * @param   pDescriptor Descriptor
  */
-DECLASM(int) VMXR0InvVPID(VMX_FLUSH_VPID enmFlush, uint64_t *pDescriptor);
+DECLASM(int) VMXR0InvVPID(VMXFLUSHVPID enmFlush, uint64_t *pDescriptor);
 
 /**
Index: /trunk/include/VBox/vmm/tm.h
===================================================================
--- /trunk/include/VBox/vmm/tm.h	(revision 51642)
+++ /trunk/include/VBox/vmm/tm.h	(revision 51643)
@@ -133,6 +133,6 @@
 VMMDECL(uint64_t)       TMCpuTickGet(PVMCPU pVCpu);
 VMM_INT_DECL(uint64_t)  TMCpuTickGetNoCheck(PVMCPU pVCpu);
-VMM_INT_DECL(bool)      TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC);
-VMM_INT_DECL(uint64_t)  TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, uint64_t *poffRealTSC);
+VMM_INT_DECL(bool)      TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfParavirtTsc);
+VMM_INT_DECL(uint64_t)  TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, bool *pfParavirtTsc, uint64_t *poffRealTSC);
 VMM_INT_DECL(int)       TMCpuTickSet(PVM pVM, PVMCPU pVCpu, uint64_t u64Tick);
 VMM_INT_DECL(int)       TMCpuTickSetLastSeen(PVMCPU pVCpu, uint64_t u64LastSeenTick);
Index: /trunk/include/VBox/vmm/vm.h
===================================================================
--- /trunk/include/VBox/vmm/vm.h	(revision 51642)
+++ /trunk/include/VBox/vmm/vm.h	(revision 51643)
@@ -149,5 +149,5 @@
         struct HMCPU    s;
 #endif
-        uint8_t             padding[5632];      /* multiple of 64 */
+        uint8_t             padding[5696];      /* multiple of 64 */
     } hm;
 
@@ -235,5 +235,5 @@
 
     /** Align the following members on page boundary. */
-    uint8_t                 abAlignment2[64];
+    //uint8_t                 abAlignment2[64];
 
     /** PGM part. */
Index: /trunk/include/VBox/vmm/vm.mac
===================================================================
--- /trunk/include/VBox/vmm/vm.mac	(revision 51642)
+++ /trunk/include/VBox/vmm/vm.mac	(revision 51643)
@@ -135,5 +135,5 @@
 
     .cpum                   resb 3584
-    .hm                     resb 5632
+    .hm                     resb 5696
     .em                     resb 1472
     .iem                    resb 3072
Index: /trunk/src/VBox/Devices/GIMDev/GIMDev.cpp
===================================================================
--- /trunk/src/VBox/Devices/GIMDev/GIMDev.cpp	(revision 51642)
+++ /trunk/src/VBox/Devices/GIMDev/GIMDev.cpp	(revision 51643)
@@ -115,4 +115,6 @@
             else
                 pCur->pvPageRC = NIL_RTRCPTR;
+
+            LogRel(("GIMDev: Registered %s\n", pCur->szDescription));
         }
     }
Index: /trunk/src/VBox/Main/src-server/MachineImpl.cpp
===================================================================
--- /trunk/src/VBox/Main/src-server/MachineImpl.cpp	(revision 51642)
+++ /trunk/src/VBox/Main/src-server/MachineImpl.cpp	(revision 51643)
@@ -8517,4 +8517,5 @@
         mHWData->mKeyboardHIDType = data.keyboardHIDType;
         mHWData->mChipsetType = data.chipsetType;
+        mHWData->mParavirtProvider = data.paravirtProvider;
         mHWData->mEmulatedUSBCardReaderEnabled = data.fEmulatedUSBCardReader;
         mHWData->mHPETEnabled = data.fHPETEnabled;
Index: /trunk/src/VBox/VMM/Makefile.kmk
===================================================================
--- /trunk/src/VBox/VMM/Makefile.kmk	(revision 51642)
+++ /trunk/src/VBox/VMM/Makefile.kmk	(revision 51643)
@@ -545,4 +545,6 @@
  	VMMR0/CPUMR0.cpp \
  	VMMR0/CPUMR0A.asm \
+	VMMR0/GIMR0.cpp \
+	VMMR0/GIMR0Hv.cpp \
  	VMMR0/GMMR0.cpp \
  	VMMR0/GVMMR0.cpp \
Index: /trunk/src/VBox/VMM/VMMAll/EMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/EMAll.cpp	(revision 51642)
+++ /trunk/src/VBox/VMM/VMMAll/EMAll.cpp	(revision 51643)
@@ -1749,5 +1749,5 @@
     if (RT_UNLIKELY(rc != VINF_SUCCESS))
     {
-        Assert(rc == VERR_CPUM_RAISE_GP_0);
+        Assert(rc == VERR_CPUM_RAISE_GP_0 || rc == VERR_EM_INTERPRETER);
         Log4(("EM: Refuse RDMSR: rc=%Rrc\n", rc));
         return VERR_EM_INTERPRETER;
Index: /trunk/src/VBox/VMM/VMMAll/GIMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/GIMAll.cpp	(revision 51642)
+++ /trunk/src/VBox/VMM/VMMAll/GIMAll.cpp	(revision 51643)
@@ -45,4 +45,16 @@
 
 /**
+ * Gets the GIM provider configured for this VM.
+ *
+ * @returns The GIM provider Id.
+ * @param   pVM     Pointer to the VM.
+ */
+VMMDECL(GIMPROVIDERID) GIMGetProvider(PVM pVM)
+{
+    return pVM->gim.s.enmProviderId;
+}
+
+
+/**
  * Implements a GIM hypercall with the provider configured for the VM.
  *
@@ -67,38 +79,4 @@
     }
 }
-
-
-/**
- * Updates the paravirtualized TSC supported by the GIM provider.
- *
- * @returns VBox status code.
- * @retval VINF_SUCCESS if the paravirt. TSC is setup and in use.
- * @retval VERR_GIM_NOT_ENABLED if no GIM provider is configured for this VM.
- * @retval VERR_GIM_PVTSC_NOT_AVAILABLE if the GIM provider does not support any
- *         paravirt. TSC.
- * @retval VERR_GIM_PVTSC_NOT_IN_USE if the GIM provider supports paravirt. TSC
- *         but the guest isn't currently using it.
- *
- * @param   pVM         Pointer to the VM.
- * @param   u64Offset   The computed TSC offset.
- *
- * @thread EMT(pVCpu)
- */
-VMMDECL(int) GIMUpdateParavirtTsc(PVM pVM, uint64_t u64Offset)
-{
-    if (!pVM->gim.s.fEnabled)
-        return VERR_GIM_NOT_ENABLED;
-
-    switch (pVM->gim.s.enmProviderId)
-    {
-        case GIMPROVIDERID_HYPERV:
-            return GIMHvUpdateParavirtTsc(pVM, u64Offset);
-
-        default:
-            break;
-    }
-    return VERR_GIM_PVTSC_NOT_AVAILABLE;
-}
-
 
 VMMDECL(bool) GIMIsParavirtTscEnabled(PVM pVM)
Index: /trunk/src/VBox/VMM/VMMAll/GIMAllHv.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/GIMAllHv.cpp	(revision 51642)
+++ /trunk/src/VBox/VMM/VMMAll/GIMAllHv.cpp	(revision 51643)
@@ -28,4 +28,8 @@
 #include <VBox/vmm/vm.h>
 #include <VBox/vmm/pgm.h>
+#include <VBox/vmm/pdmdev.h>
+
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/spinlock.h>
 
 
@@ -53,33 +57,4 @@
 {
     return MSR_GIM_HV_REF_TSC_IS_ENABLED(pVM->gim.s.u.Hv.u64TscPageMsr);
-}
-
-
-/**
- * Updates Hyper-V's reference TSC page.
- *
- * @returns VBox status code.
- * @param   pVM         Pointer to the VM.
- * @param   u64Offset   The computed TSC offset.
- * @thread EMT(pVCpu)
- */
-VMM_INT_DECL(int) GIMHvUpdateParavirtTsc(PVM pVM, uint64_t u64Offset)
-{
-    Assert(GIMIsEnabled(pVM));
-    bool fHvTscEnabled = MSR_GIM_HV_REF_TSC_IS_ENABLED(pVM->gim.s.u.Hv.u64TscPageMsr);
-    if (!fHvTscEnabled)
-        return VERR_GIM_PVTSC_NOT_ENABLED;
-
-    PGIMHV          pHv      = &pVM->gim.s.u.Hv;
-    PGIMMMIO2REGION pRegion  = &pHv->aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX];
-    PGIMHVREFTSC    pRefTsc  = (PGIMHVREFTSC)pRegion->CTX_SUFF(pvPage);
-    Assert(pRefTsc);
-
-    /** @todo Protect this with a spinlock! */
-    pRefTsc->u64TscScale  = UINT64_C(0x1000000000000000);
-    pRefTsc->u64TscOffset = u64Offset;
-    ASMAtomicIncU32(&pRefTsc->u32TscSequence);
-
-    return VINF_SUCCESS;
 }
 
@@ -104,5 +79,5 @@
         case MSR_GIM_HV_TIME_REF_COUNT:
         {
-            /* Hyper-V reports the time in 100ns units. */
+            /* Hyper-V reports the time in 100 ns units (10 MHz). */
             uint64_t u64Tsc      = TMCpuTickGet(pVCpu);
             uint64_t u64TscHz    = TMCpuTicksPerSecond(pVM);
@@ -129,10 +104,20 @@
 
         case MSR_GIM_HV_TSC_FREQ:
-            *puValue = TMCpuTicksPerSecond(pVM);
-            return VINF_SUCCESS;
+#ifndef IN_RING3
+            return VERR_EM_INTERPRETER;
+#else
+            LogRel(("GIM: MSR_GIM_HV_TSC_FREQ %u\n", TMCpuTicksPerSecond(pVM)));
+            //*puValue = TMCpuTicksPerSecond(pVM);
+            *puValue = 2690000000;
+            return VINF_SUCCESS;
+#endif
 
         case MSR_GIM_HV_APIC_FREQ:
             /** @todo Fix this later! Get the information from DevApic. */
             *puValue = UINT32_C(1000000000); /* TMCLOCK_FREQ_VIRTUAL */
+            return VINF_SUCCESS;
+
+        case MSR_GIM_HV_RESET:
+            *puValue = 0;
             return VINF_SUCCESS;
 
@@ -171,7 +156,6 @@
             if (!uRawValue)
             {
-                GIMR3Mmio2Unmap(pVM, &pHv->aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX]);
+                GIMR3HvDisableHypercallPage(pVM);
                 pHv->u64HypercallMsr &= ~MSR_GIM_HV_HYPERCALL_ENABLE_BIT;
-                Log4Func(("Disabled hypercalls\n"));
             }
             pHv->u64GuestOsIdMsr = uRawValue;
@@ -196,58 +180,20 @@
             }
 
-            PPDMDEVINSR3    pDevIns = pVM->gim.s.pDevInsR3;
-            PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX];
-            AssertPtr(pDevIns);
-            AssertPtr(pRegion);
-
-            /*
-             * Is the guest disabling the hypercall-page? Allow it regardless of the Guest-OS Id Msr.
-             */
+            /* Is the guest disabling the hypercall-page? Allow it regardless of the Guest-OS Id Msr. */
             if (!fEnable)
             {
-                GIMR3Mmio2Unmap(pVM, pRegion);
+                GIMR3HvDisableHypercallPage(pVM);
                 pHv->u64HypercallMsr = uRawValue;
-                Log4Func(("Disabled hypercalls\n"));
-                return VINF_SUCCESS;
-            }
-
-            /*
-             * Map the hypercall-page.
-             */
+                return VINF_SUCCESS;
+            }
+
+            /* Enable the hypercall-page. */
             RTGCPHYS GCPhysHypercallPage = MSR_GIM_HV_HYPERCALL_GUEST_PFN(uRawValue) << PAGE_SHIFT;
-            int rc = GIMR3Mmio2Map(pVM, pRegion, GCPhysHypercallPage, "Hyper-V Hypercall-page");
+            int rc = GIMR3HvEnableHypercallPage(pVM, GCPhysHypercallPage);
             if (RT_SUCCESS(rc))
             {
-                /*
-                 * Patch the hypercall-page.
-                 */
-                if (HMIsEnabled(pVM))
-                {
-                    size_t cbWritten = 0;
-                    rc = HMPatchHypercall(pVM, pRegion->pvPageR3, PAGE_SIZE, &cbWritten);
-                    if (   RT_SUCCESS(rc)
-                        && cbWritten < PAGE_SIZE - 1)
-                    {
-                        uint8_t *pbLast = (uint8_t *)pRegion->pvPageR3 + cbWritten;
-                        *pbLast = 0xc3;  /* RET */
-
-                        pHv->u64HypercallMsr = uRawValue;
-                        LogRelFunc(("Enabled hypercalls at %#RGp\n", GCPhysHypercallPage));
-                        LogRelFunc(("%.*Rhxd\n", cbWritten + 1, (uint8_t *)pRegion->pvPageR3));
-                        return VINF_SUCCESS;
-                    }
-
-                    LogFunc(("MSR_GIM_HV_HYPERCALL: HMPatchHypercall failed. rc=%Rrc cbWritten=%u\n", rc, cbWritten));
-                }
-                else
-                {
-                    /** @todo Handle raw-mode hypercall page patching. */
-                    LogRelFunc(("MSR_GIM_HV_HYPERCALL: raw-mode not yet implemented!\n"));
-                }
-
-                GIMR3Mmio2Unmap(pVM, pRegion);
-            }
-            else
-                LogFunc(("MSR_GIM_HV_HYPERCALL: GIMR3Mmio2Map failed. rc=%Rrc -> #GP(0)\n", rc));
+                pHv->u64HypercallMsr = uRawValue;
+                return VINF_SUCCESS;
+            }
 
             return VERR_CPUM_RAISE_GP_0;
@@ -263,35 +209,39 @@
             pHv->u64TscPageMsr = (uRawValue & ~MSR_GIM_HV_REF_TSC_ENABLE_BIT);
 
-            PPDMDEVINSR3    pDevIns = pVM->gim.s.pDevInsR3;
-            PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[GIM_HV_REF_TSC_PAGE_REGION_IDX];
-            AssertPtr(pDevIns);
-            AssertPtr(pRegion);
-
-            /*
-             * Is the guest disabling the TSC-page?
-             */
+            /* Is the guest disabling the TSC-page? */
             bool fEnable = RT_BOOL(uRawValue & MSR_GIM_HV_REF_TSC_ENABLE_BIT);
             if (!fEnable)
             {
-                GIMR3Mmio2Unmap(pVM, pRegion);
-                Log4Func(("Disabled TSC-page\n"));
-                return VINF_SUCCESS;
-            }
-
-            /*
-             * Map the TSC-page.
-             */
+                GIMR3HvDisableTscPage(pVM);
+                pHv->u64TscPageMsr = uRawValue;
+                return VINF_SUCCESS;
+            }
+
+            /* Enable the TSC-page. */
             RTGCPHYS GCPhysTscPage = MSR_GIM_HV_REF_TSC_GUEST_PFN(uRawValue) << PAGE_SHIFT;
-            int rc = GIMR3Mmio2Map(pVM, pRegion, GCPhysTscPage, "Hyper-V TSC-page");
+            int rc = GIMR3HvEnableTscPage(pVM, GCPhysTscPage);
             if (RT_SUCCESS(rc))
             {
                 pHv->u64TscPageMsr = uRawValue;
-                Log4Func(("MSR_GIM_HV_REF_TSC: Enabled Hyper-V TSC page at %#RGp\n", GCPhysTscPage));
-                return VINF_SUCCESS;
-            }
-            else
-                LogFunc(("MSR_GIM_HV_REF_TSC: GIMR3Mmio2Map failed. rc=%Rrc -> #GP(0)\n", rc));
+                return VINF_SUCCESS;
+            }
 
             return VERR_CPUM_RAISE_GP_0;
+#endif  /* !IN_RING3 */
+        }
+
+        case MSR_GIM_HV_RESET:
+        {
+#ifndef IN_RING3
+            return VERR_EM_INTERPRETER;
+#else
+            if (MSR_GIM_HV_RESET_IS_SET(uRawValue))
+            {
+                LogRel(("GIM: HyperV: Reset initiated by MSR.\n"));
+                int rc = PDMDevHlpVMReset(pVM->gim.s.pDevInsR3);
+                AssertRC(rc);
+            }
+            /* else: Ignore writes to other bits. */
+            return VINF_SUCCESS;
 #endif  /* !IN_RING3 */
         }
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp	(revision 51642)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp	(revision 51643)
@@ -1036,6 +1036,9 @@
         uint8_t  idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
         uint32_t iPage   = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
-        AssertLogRelReturn((uint8_t)(idMmio2 - 1U)< RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
-                            VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
+        AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
+                              ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
+                               RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,
+                               pPage->s.idPage, pPage->s.uStateY),
+                              VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
         PPGMMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
         AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
Index: /trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp	(revision 51642)
+++ /trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp	(revision 51643)
@@ -25,4 +25,5 @@
 #include "TMInternal.h"
 #include <VBox/vmm/vm.h>
+#include <VBox/vmm/gim.h>
 #include <VBox/sup.h>
 
@@ -140,15 +141,20 @@
  *
  * @returns true/false accordingly.
- * @param   pVCpu       Pointer to the VMCPU.
+ * @param   pVCpu           Pointer to the VMCPU.
  * @param   poffRealTSC     The offset against the TSC of the current CPU.
  *                          Can be NULL.
- * @thread EMT.
- */
-VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC)
+ * @param   pfParavirtTsc   Where to store whether paravirt. TSC can be used or
+ *                          not.
+ * @thread EMT(pVCpu).
+ */
+VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfParavirtTsc)
 {
     PVM pVM = pVCpu->CTX_SUFF(pVM);
+    bool fParavirtTsc = false;
 
     /*
      * We require:
+     *     1. Use of a paravirtualized TSC is enabled by the guest.
+     *     (OR)
      *     1. A fixed TSC, this is checked at init time.
      *     2. That the TSC is ticking (we shouldn't be here if it isn't)
@@ -158,11 +164,11 @@
      *          c) we're not using warp drive (accelerated virtual guest time).
      */
-    if (    pVM->tm.s.fMaybeUseOffsettedHostTSC
-        &&  RT_LIKELY(pVCpu->tm.s.fTSCTicking)
-        &&  (   pVM->tm.s.fTSCUseRealTSC
-             || (   !pVM->tm.s.fVirtualSyncCatchUp
-                 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
-                 && !pVM->tm.s.fVirtualWarpDrive))
-       )
+    if (    (*pfParavirtTsc = GIMIsParavirtTscEnabled(pVM)) == true
+        ||  (    pVM->tm.s.fMaybeUseOffsettedHostTSC
+             &&  RT_LIKELY(pVCpu->tm.s.fTSCTicking)
+             &&  (   pVM->tm.s.fTSCUseRealTSC
+                 || (   !pVM->tm.s.fVirtualSyncCatchUp
+                     && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
+                     && !pVM->tm.s.fVirtualWarpDrive))))
     {
         if (!pVM->tm.s.fTSCUseRealTSC)
@@ -233,9 +239,13 @@
  * @returns The number of host CPU clock ticks to the next timer deadline.
  * @param   pVCpu           The current CPU.
+ * @param   pfParavirtTsc   Where to store whether paravirt. TSC can be used or
+ *                          not.
  * @param   poffRealTSC     The offset against the TSC of the current CPU.
+ *
  * @thread  EMT(pVCpu).
- * @remarks Superset of TMCpuTickCanUseRealTSC.
- */
-VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, uint64_t *poffRealTSC)
+ * @remarks Superset of TMCpuTickCanUseRealTSC().
+ */
+VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, bool *pfParavirtTsc,
+                                                        uint64_t *poffRealTSC)
 {
     PVM         pVM = pVCpu->CTX_SUFF(pVM);
@@ -244,4 +254,6 @@
     /*
      * We require:
+     *     1. Use of a paravirtualized TSC is enabled by the guest.
+     *     (OR)
      *     1. A fixed TSC, this is checked at init time.
      *     2. That the TSC is ticking (we shouldn't be here if it isn't)
@@ -251,11 +263,11 @@
      *          c) we're not using warp drive (accelerated virtual guest time).
      */
-    if (    pVM->tm.s.fMaybeUseOffsettedHostTSC
-        &&  RT_LIKELY(pVCpu->tm.s.fTSCTicking)
-        &&  (   pVM->tm.s.fTSCUseRealTSC
-             || (   !pVM->tm.s.fVirtualSyncCatchUp
-                 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
-                 && !pVM->tm.s.fVirtualWarpDrive))
-       )
+    if (    (*pfParavirtTsc = GIMIsParavirtTscEnabled(pVM)) == true
+        ||  (    pVM->tm.s.fMaybeUseOffsettedHostTSC
+             &&  RT_LIKELY(pVCpu->tm.s.fTSCTicking)
+             &&  (   pVM->tm.s.fTSCUseRealTSC
+                 || (   !pVM->tm.s.fVirtualSyncCatchUp
+                     && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
+                     && !pVM->tm.s.fVirtualWarpDrive))))
     {
         *pfOffsettedTsc = true;
@@ -293,4 +305,5 @@
         cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
     }
+
     return cTicksToDeadline;
 }
@@ -412,10 +425,10 @@
 
 /**
- * Gets the last seen CPU timestamp counter.
- *
- * @returns last seen TSC
+ * Gets the last seen CPU timestamp counter of the guest.
+ *
+ * @returns the last seen TSC.
  * @param   pVCpu               Pointer to the VMCPU.
  *
- * @thread  EMT which TSC is to be set.
+ * @thread  EMT(pVCpu).
  */
 VMM_INT_DECL(uint64_t) TMCpuTickGetLastSeen(PVMCPU pVCpu)
Index: /trunk/src/VBox/VMM/VMMR0/GIMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/GIMR0.cpp	(revision 51642)
+++ /trunk/src/VBox/VMM/VMMR0/GIMR0.cpp	(revision 51643)
@@ -23,5 +23,5 @@
 #include "GIMHvInternal.h"
 
-#include <iprt/err.h>
+#include <VBox/err.h>
 #include <VBox/vmm/vm.h>
 
@@ -72,2 +72,34 @@
 }
 
+/**
+ * Updates the paravirtualized TSC supported by the GIM provider.
+ *
+ * @returns VBox status code.
+ * @retval VINF_SUCCESS if the paravirt. TSC is setup and in use.
+ * @retval VERR_GIM_NOT_ENABLED if no GIM provider is configured for this VM.
+ * @retval VERR_GIM_PVTSC_NOT_AVAILABLE if the GIM provider does not support any
+ *         paravirt. TSC.
+ * @retval VERR_GIM_PVTSC_NOT_IN_USE if the GIM provider supports paravirt. TSC
+ *         but the guest isn't currently using it.
+ *
+ * @param   pVM         Pointer to the VM.
+ * @param   u64Offset   The computed TSC offset.
+ *
+ * @thread EMT(pVCpu)
+ */
+VMMR0_INT_DECL(int) GIMR0UpdateParavirtTsc(PVM pVM, uint64_t u64Offset)
+{
+    if (!pVM->gim.s.fEnabled)
+        return VERR_GIM_NOT_ENABLED;
+
+    switch (pVM->gim.s.enmProviderId)
+    {
+        case GIMPROVIDERID_HYPERV:
+            return GIMR0HvUpdateParavirtTsc(pVM, u64Offset);
+
+        default:
+            break;
+    }
+    return VERR_GIM_PVTSC_NOT_AVAILABLE;
+}
+
Index: /trunk/src/VBox/VMM/VMMR0/GIMR0Hv.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/GIMR0Hv.cpp	(revision 51642)
+++ /trunk/src/VBox/VMM/VMMR0/GIMR0Hv.cpp	(revision 51643)
@@ -23,11 +23,12 @@
 #include "GIMHvInternal.h"
 
-#include <iprt/err.h>
-#include <iprt/asm.h>
-#include <iprt/memobj.h>
+#include <VBox/err.h>
 #include <VBox/vmm/gim.h>
 #include <VBox/vmm/vm.h>
 
+#include <iprt/spinlock.h>
 
+
+#if 0
 /**
  * Allocates and maps one physically contiguous page. The allocated page is
@@ -80,4 +81,38 @@
     }
 }
+#endif
+
+/**
+ * Updates Hyper-V's reference TSC page.
+ *
+ * @returns VBox status code.
+ * @param   pVM         Pointer to the VM.
+ * @param   u64Offset   The computed TSC offset.
+ * @thread  EMT.
+ */
+VMM_INT_DECL(int) GIMR0HvUpdateParavirtTsc(PVM pVM, uint64_t u64Offset)
+{
+    Assert(GIMIsEnabled(pVM));
+    bool fHvTscEnabled = MSR_GIM_HV_REF_TSC_IS_ENABLED(pVM->gim.s.u.Hv.u64TscPageMsr);
+    if (RT_UNLIKELY(!fHvTscEnabled))
+        return VERR_GIM_PVTSC_NOT_ENABLED;
+
+    PCGIMHV          pcHv     = &pVM->gim.s.u.Hv;
+    PCGIMMMIO2REGION pcRegion = &pcHv->aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX];
+    PGIMHVREFTSC     pRefTsc  = (PGIMHVREFTSC)pcRegion->CTX_SUFF(pvPage);
+    Assert(pRefTsc);
+
+    RTSpinlockAcquire(pcHv->hSpinlockR0);
+    pRefTsc->i64TscOffset = u64Offset;
+    if (pRefTsc->u32TscSequence < UINT32_C(0xfffffffe))
+        ASMAtomicIncU32(&pRefTsc->u32TscSequence);
+    else
+        ASMAtomicWriteU32(&pRefTsc->u32TscSequence, 1);
+    RTSpinlockRelease(pcHv->hSpinlockR0);
+
+    Assert(pRefTsc->u32TscSequence != 0);
+    Assert(pRefTsc->u32TscSequence != UINT32_C(0xffffffff));
+    return VINF_SUCCESS;
+}
 
 
@@ -90,26 +125,12 @@
 VMMR0_INT_DECL(int) GIMR0HvInitVM(PVM pVM)
 {
-#if 0
     AssertPtr(pVM);
     Assert(GIMIsEnabled(pVM));
 
     PGIMHV pHv = &pVM->gim.s.u.Hv;
-    Assert(pHv->hMemObjTscPage == NIL_RTR0MEMOBJ);
+    Assert(pHv->hSpinlockR0 == NIL_RTSPINLOCK);
 
-    /*
-     * Allocate the TSC page.
-     */
-    int rc = gimR0HvPageAllocZ(&pHv->hMemObjTscPage, &pHv->pvTscPageR0, &pHv->HCPhysTscPage);
-    if (RT_FAILURE(rc))
-        goto cleanup;
-#endif
-
-    return VINF_SUCCESS;
-
-#if 0
-cleanup:
-    gimR0HvPageFree(&pHv->hMemObjTscPage, &pHv->pvTscPageR0, &pHv->HCPhysTscPage);
+    int rc = RTSpinlockCreate(&pHv->hSpinlockR0, RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, "Hyper-V");
     return rc;
-#endif
 }
 
@@ -125,8 +146,9 @@
     AssertPtr(pVM);
     Assert(GIMIsEnabled(pVM));
-#if 0
+
     PGIMHV pHv = &pVM->gim.s.u.Hv;
-    gimR0HvPageFree(&pHv->hMemObjTscPage, &pHv->pvTscPageR0, &pHv->HCPhysTscPage);
-#endif
+    RTSpinlockDestroy(pHv->hSpinlockR0);
+    pHv->hSpinlockR0 = NIL_RTSPINLOCK;
+
     return VINF_SUCCESS;
 }
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 51642)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 51643)
@@ -30,4 +30,5 @@
 #include <VBox/vmm/iom.h>
 #include <VBox/vmm/tm.h>
+#include <VBox/vmm/gim.h>
 
 #ifdef DEBUG_ramshankar
@@ -2234,9 +2235,23 @@
 static void hmR0SvmUpdateTscOffsetting(PVMCPU pVCpu)
 {
+    bool     fParavirtTsc = false;
     PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
-    if (TMCpuTickCanUseRealTSC(pVCpu, &pVmcb->ctrl.u64TSCOffset))
-    {
-        uint64_t u64CurTSC = ASMReadTSC();
-        if (u64CurTSC + pVmcb->ctrl.u64TSCOffset > TMCpuTickGetLastSeen(pVCpu))
+    if (TMCpuTickCanUseRealTSC(pVCpu, &pVmcb->ctrl.u64TSCOffset, &fParavirtTsc))
+    {
+        uint64_t u64CurTSC   = ASMReadTSC();
+        uint64_t u64LastTick = TMCpuTickGetLastSeen(pVCpu);
+        if (fParavirtTsc)
+        {
+            if (u64CurTSC + pVmcb->ctrl.u64TSCOffset > u64LastTick)
+            {
+                pVmcb->ctrl.u64TSCOffset = u64LastTick - u64CurTSC;
+                STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffsetAdjusted);
+            }
+            int rc = GIMR0UpdateParavirtTsc(pVCpu->CTX_SUFF(pVM), pVmcb->ctrl.u64TSCOffset);
+            AssertRC(rc);
+            STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
+        }
+
+        if (u64CurTSC + pVmcb->ctrl.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
         {
             pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;
@@ -2253,4 +2268,5 @@
     else
     {
+        Assert(!fParavirtTsc);
         pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
         pVmcb->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 51642)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 51643)
@@ -34,4 +34,5 @@
 #include <VBox/vmm/selm.h>
 #include <VBox/vmm/tm.h>
+#include <VBox/vmm/gim.h>
 #ifdef VBOX_WITH_REM
 # include <VBox/vmm/rem.h>
@@ -336,6 +337,6 @@
 *   Internal Functions                                                         *
 *******************************************************************************/
-static void               hmR0VmxFlushEpt(PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush);
-static void               hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr);
+static void               hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush);
+static void               hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr);
 static int                hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
                                                  uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntState);
@@ -1067,5 +1068,5 @@
     if (pMsrs->u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
     {
-        hmR0VmxFlushEpt(NULL /* pVCpu */, VMX_FLUSH_EPT_ALL_CONTEXTS);
+        hmR0VmxFlushEpt(NULL /* pVCpu */, VMXFLUSHEPT_ALL_CONTEXTS);
         pCpu->fFlushAsidBeforeUse = false;
     }
@@ -1698,8 +1699,8 @@
  * @remarks Can be called with interrupts disabled.
  */
-static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush)
+static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush)
 {
     uint64_t au64Descriptor[2];
-    if (enmFlush == VMX_FLUSH_EPT_ALL_CONTEXTS)
+    if (enmFlush == VMXFLUSHEPT_ALL_CONTEXTS)
         au64Descriptor[0] = 0;
     else
@@ -1734,5 +1735,5 @@
  * @remarks Can be called with interrupts disabled.
  */
-static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr)
+static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr)
 {
     NOREF(pVM);
@@ -1741,5 +1742,5 @@
 
     uint64_t au64Descriptor[2];
-    if (enmFlush == VMX_FLUSH_VPID_ALL_CONTEXTS)
+    if (enmFlush == VMXFLUSHVPID_ALL_CONTEXTS)
     {
         au64Descriptor[0] = 0;
@@ -1795,5 +1796,5 @@
             if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
             {
-                hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt);
+                hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, GCVirt);
                 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
             }
@@ -1961,5 +1962,5 @@
         {
             for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
-                hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
+                hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
         }
         else
@@ -2120,9 +2121,9 @@
         if (pCpu->fFlushAsidBeforeUse)
         {
-            if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_SINGLE_CONTEXT)
-                hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_SINGLE_CONTEXT, 0 /* GCPtr */);
-            else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_ALL_CONTEXTS)
+            if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_SINGLE_CONTEXT)
+                hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */);
+            else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_ALL_CONTEXTS)
             {
-                hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_ALL_CONTEXTS, 0 /* GCPtr */);
+                hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
                 pCpu->fFlushAsidBeforeUse = false;
             }
@@ -2151,5 +2152,5 @@
             {
                 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
-                    hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
+                    hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
             }
             else
@@ -2226,11 +2227,11 @@
         {
             if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
-                pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_SINGLE_CONTEXT;
+                pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_SINGLE_CONTEXT;
             else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
-                pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_ALL_CONTEXTS;
+                pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_ALL_CONTEXTS;
             else
             {
                 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
-                pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
+                pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
                 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
             }
@@ -2240,5 +2241,5 @@
             {
                 LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.Msrs.u64EptVpidCaps));
-                pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
+                pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
                 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
             }
@@ -2247,5 +2248,5 @@
         {
             /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
-            pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
+            pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
             return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
         }
@@ -2260,7 +2261,7 @@
         {
             if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
-                pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_SINGLE_CONTEXT;
+                pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_SINGLE_CONTEXT;
             else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
-                pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_ALL_CONTEXTS;
+                pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_ALL_CONTEXTS;
             else
             {
@@ -2270,5 +2271,5 @@
                 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
                     LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
-                pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
+                pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED;
                 pVM->hm.s.vmx.fVpid = false;
             }
@@ -2278,5 +2279,5 @@
             /*  Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
             Log4(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
-            pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
+            pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED;
             pVM->hm.s.vmx.fVpid = false;
         }
@@ -2728,6 +2729,6 @@
 
     /* Initialize these always, see hmR3InitFinalizeR0().*/
-    pVM->hm.s.vmx.enmFlushEpt  = VMX_FLUSH_EPT_NONE;
-    pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NONE;
+    pVM->hm.s.vmx.enmFlushEpt  = VMXFLUSHEPT_NONE;
+    pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NONE;
 
     /* Setup the tagged-TLB flush handlers. */
@@ -5565,8 +5566,10 @@
     int  rc            = VERR_INTERNAL_ERROR_5;
     bool fOffsettedTsc = false;
+    bool fParavirtTsc  = false;
     PVM pVM            = pVCpu->CTX_SUFF(pVM);
     if (pVM->hm.s.vmx.fUsePreemptTimer)
     {
-        uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hm.s.vmx.u64TSCOffset);
+        uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &fParavirtTsc,
+                                                                     &pVCpu->hm.s.vmx.u64TSCOffset);
 
         /* Make sure the returned values have sane upper and lower boundaries. */
@@ -5580,5 +5583,30 @@
     }
     else
-        fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset);
+        fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fParavirtTsc);
+
+    if (fParavirtTsc)
+    {
+        uint64_t const u64CurTsc   = ASMReadTSC();
+        uint64_t const u64LastTick = TMCpuTickGetLastSeen(pVCpu);
+        if (u64CurTsc + pVCpu->hm.s.vmx.u64TSCOffset < u64LastTick)
+        {
+            pVCpu->hm.s.vmx.u64TSCOffset = (u64LastTick - u64CurTsc);
+            STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffsetAdjusted);
+        }
+
+        Assert(u64CurTsc + pVCpu->hm.s.vmx.u64TSCOffset >= u64LastTick);
+        rc = GIMR0UpdateParavirtTsc(pVM, pVCpu->hm.s.vmx.u64TSCOffset);
+        if (RT_SUCCESS(rc))
+        {
+            /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
+            rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0);                                  AssertRC(rc);
+
+            pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
+            rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);              AssertRC(rc);
+            STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
+            return;
+        }
+        /* else: Shouldn't really fail. If it does, fallback to offsetted TSC mode. */
+    }
 
     if (fOffsettedTsc)
@@ -10358,5 +10386,4 @@
               ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc));
     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
-
     if (RT_LIKELY(rc == VINF_SUCCESS))
     {
Index: /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp	(revision 51642)
+++ /trunk/src/VBox/VMM/VMMR0/VMMR0.cpp	(revision 51643)
@@ -36,4 +36,5 @@
 #include <VBox/vmm/gvmm.h>
 #include <VBox/vmm/gmm.h>
+#include <VBox/vmm/gim.h>
 #include <VBox/intnet.h>
 #include <VBox/vmm/hm.h>
@@ -379,14 +380,18 @@
                     if (RT_SUCCESS(rc))
                     {
-                        GVMMR0DoneInitVM(pVM);
-                        return rc;
+                        rc = GIMR0InitVM(pVM);
+                        if (RT_SUCCESS(rc))
+                        {
+                            GVMMR0DoneInitVM(pVM);
+                            return rc;
+                        }
+
+                        /* bail out*/
+#ifdef VBOX_WITH_PCI_PASSTHROUGH
+                        PciRawR0TermVM(pVM);
+#endif
                     }
                 }
-
-                /* bail out */
             }
-#ifdef VBOX_WITH_PCI_PASSTHROUGH
-            PciRawR0TermVM(pVM);
-#endif
             HMR0TermVM(pVM);
         }
@@ -423,4 +428,6 @@
     if (GVMMR0DoingTermVM(pVM, pGVM))
     {
+        GIMR0TermVM(pVM);
+
         /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
          *        here to make sure we don't leak any shared pages if we crash... */
Index: /trunk/src/VBox/VMM/VMMR3/GIM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/GIM.cpp	(revision 51642)
+++ /trunk/src/VBox/VMM/VMMR3/GIM.cpp	(revision 51643)
@@ -90,12 +90,10 @@
      */
     int rc;
-#if 0
-    rc = SSMR3RegisterInternal(pVM, "GIM", 0, GIM_SSM_VERSION, sizeof(GIM),
-                                    NULL, NULL, NULL,
-                                    NULL, gimR3Save, NULL,
-                                    NULL, gimR3Load, NULL);
+    rc = SSMR3RegisterInternal(pVM, "GIM", 0 /* uInstance */, GIM_SSM_VERSION, sizeof(GIM),
+                                    NULL /* pfnLivePrep */, NULL /* pfnLiveExec */, NULL /* pfnLiveVote*/,
+                                    NULL /* pfnSavePrep */, gimR3Save,              NULL /* pfnSaveDone */,
+                                    NULL /* pfnLoadPrep */, gimR3Load,              NULL /* pfnLoadDone */);
     if (RT_FAILURE(rc))
         return rc;
-#endif
 
     /*
@@ -226,5 +224,5 @@
 
 /**
- * Execute state save operation.
+ * Executes state-save operation.
  *
  * @returns VBox status code.
@@ -234,6 +232,44 @@
 DECLCALLBACK(int) gimR3Save(PVM pVM, PSSMHANDLE pSSM)
 {
-    /** @todo save state. */
-    return VINF_SUCCESS;
+    AssertReturn(pVM,  VERR_INVALID_PARAMETER);
+    AssertReturn(pSSM, VERR_SSM_INVALID_STATE);
+
+    /** @todo Save per-CPU data. */
+    int rc;
+#if 0
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
+    {
+        rc = SSMR3PutXYZ(pSSM, pVM->aCpus[i].gim.s.XYZ);
+    }
+#endif
+
+    /*
+     * Save per-VM data.
+     */
+    rc = SSMR3PutBool(pSSM, pVM->gim.s.fEnabled);
+    AssertRCReturn(rc, rc);
+    rc = SSMR3PutU32(pSSM, pVM->gim.s.enmProviderId);
+    AssertRCReturn(rc, rc);
+    rc = SSMR3PutU32(pSSM, pVM->gim.s.u32Version);
+    AssertRCReturn(rc, rc);
+
+    /*
+     * Save provider-specific data.
+     */
+    if (pVM->gim.s.fEnabled)
+    {
+        switch (pVM->gim.s.enmProviderId)
+        {
+            case GIMPROVIDERID_HYPERV:
+                rc = GIMR3HvSave(pVM, pSSM);
+                AssertRCReturn(rc, rc);
+                break;
+
+            default:
+                break;
+        }
+    }
+
+    return rc;
 }
 
@@ -250,6 +286,44 @@
 DECLCALLBACK(int) gimR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
 {
-    /** @todo load state. */
-    return VINF_SUCCESS;
+    if (uPass != SSM_PASS_FINAL)
+        return VINF_SUCCESS;
+
+    /** @todo Load per-CPU data. */
+    int rc;
+#if 0
+    for (VMCPUID i = 0; i < pVM->cCpus; i++)
+    {
+        rc = SSMR3PutXYZ(pSSM, pVM->aCpus[i].gim.s.XYZ);
+    }
+#endif
+
+    /*
+     * Load per-VM data.
+     */
+    rc = SSMR3GetBool(pSSM, &pVM->gim.s.fEnabled);
+    AssertRCReturn(rc, rc);
+    rc = SSMR3GetU32(pSSM, (uint32_t *)&pVM->gim.s.enmProviderId);
+    AssertRCReturn(rc, rc);
+    rc = SSMR3GetU32(pSSM, &pVM->gim.s.u32Version);
+    AssertRCReturn(rc, rc);
+
+    /*
+     * Load provider-specific data.
+     */
+    if (pVM->gim.s.fEnabled)
+    {
+        switch (pVM->gim.s.enmProviderId)
+        {
+            case GIMPROVIDERID_HYPERV:
+                rc = GIMR3HvLoad(pVM, pSSM, uVersion);
+                AssertRCReturn(rc, rc);
+                break;
+
+            default:
+                break;
+        }
+    }
+
+    return rc;
 }
 
@@ -266,4 +340,15 @@
 VMMR3_INT_DECL(int) GIMR3Term(PVM pVM)
 {
+    if (!pVM->gim.s.fEnabled)
+        return VINF_SUCCESS;
+
+    switch (pVM->gim.s.enmProviderId)
+    {
+        case GIMPROVIDERID_HYPERV:
+            return GIMR3HvTerm(pVM);
+
+        default:
+            break;
+    }
     return VINF_SUCCESS;
 }
@@ -350,5 +435,5 @@
  * @param   pRegion     Pointer to the GIM MMIO2 region.
  */
-VMM_INT_DECL(int) GIMR3Mmio2Unmap(PVM pVM, PGIMMMIO2REGION pRegion)
+VMMR3_INT_DECL(int) GIMR3Mmio2Unmap(PVM pVM, PGIMMMIO2REGION pRegion)
 {
     AssertPtr(pVM);
@@ -359,6 +444,8 @@
     if (pRegion->fMapped)
     {
-        PGMHandlerPhysicalDeregister(pVM, pRegion->GCPhysPage);
-        int rc = PDMDevHlpMMIO2Unmap(pDevIns, pRegion->iRegion, pRegion->GCPhysPage);
+        int rc = PGMHandlerPhysicalDeregister(pVM, pRegion->GCPhysPage);
+        AssertRC(rc);
+
+        rc = PDMDevHlpMMIO2Unmap(pDevIns, pRegion->iRegion, pRegion->GCPhysPage);
         if (RT_SUCCESS(rc))
         {
@@ -372,5 +459,9 @@
 
 /**
- * Write access handler for a mapped MMIO2 region that presently ignores writes.
+ * Write access handler for a mapped MMIO2 region. At present, this handler
+ * simply ignores writes.
+ *
+ * In the future we might want to let the GIM provider decide what the handler
+ * should do (like throwing #GP faults).
  *
  * @returns VBox status code.
@@ -383,6 +474,6 @@
  * @param pvUser            User argument (NULL, not used).
  */
-static DECLCALLBACK(int) gimR3Mmio2PageWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
-                                                    PGMACCESSTYPE enmAccessType, void *pvUser)
+static DECLCALLBACK(int) gimR3Mmio2WriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
+                                                PGMACCESSTYPE enmAccessType, void *pvUser)
 {
     /*
@@ -402,7 +493,6 @@
  * @param   pRegion         Pointer to the GIM MMIO2 region.
  * @param   GCPhysRegion    Where in the guest address space to map the region.
- * @param   pszDesc         Description of the region being mapped.
- */
-VMM_INT_DECL(int) GIMR3Mmio2Map(PVM pVM, PGIMMMIO2REGION pRegion, RTGCPHYS GCPhysRegion, const char *pszDesc)
+ */
+VMMR3_INT_DECL(int) GIMR3Mmio2Map(PVM pVM, PGIMMMIO2REGION pRegion, RTGCPHYS GCPhysRegion)
 {
     PPDMDEVINS pDevIns = pVM->gim.s.pDevInsR3;
@@ -412,5 +502,5 @@
     if (GCPhysRegion & PAGE_OFFSET_MASK)
     {
-        LogFunc(("%s: %#RGp not paging aligned\n", pszDesc, GCPhysRegion));
+        LogFunc(("%s: %#RGp not paging aligned\n", pRegion->szDescription, GCPhysRegion));
         return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
     }
@@ -421,16 +511,16 @@
     if (!PGMPhysIsGCPhysNormal(pVM, GCPhysRegion))
     {
-        LogFunc(("%s: %#RGp is not normal memory\n", pszDesc, GCPhysRegion));
+        LogFunc(("%s: %#RGp is not normal memory\n", pRegion->szDescription, GCPhysRegion));
         return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
     }
 
-    if (pRegion->fMapped)
-    {
-        LogFunc(("%s: A mapping for %#RGp already exists.\n", pszDesc, GCPhysRegion));
-        return VERR_PGM_MAPPING_CONFLICT;
-    }
-
-    /*
-     * Map the MMIO2 region over the guest-physical address.
+    if (!pRegion->fRegistered)
+    {
+        LogFunc(("%s: Region has not been registered.\n"));
+        return VERR_GIM_IPE_1;
+    }
+
+    /*
+     * Map the MMIO2 region over the specified guest-physical address.
      */
     int rc = PDMDevHlpMMIO2Map(pDevIns, pRegion->iRegion, GCPhysRegion);
@@ -443,13 +533,13 @@
                                           PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
                                           GCPhysRegion, GCPhysRegion + (pRegion->cbRegion - 1),
-                                          gimR3Mmio2PageWriteHandler,  NULL /* pvUserR3 */,
+                                          gimR3Mmio2WriteHandler,  NULL /* pvUserR3 */,
                                           NULL /* pszModR0 */, NULL /* pszHandlerR0 */, NIL_RTR0PTR /* pvUserR0 */,
                                           NULL /* pszModRC */, NULL /* pszHandlerRC */, NIL_RTRCPTR /* pvUserRC */,
-                                          pszDesc);
+                                          pRegion->szDescription);
         if (RT_SUCCESS(rc))
         {
             pRegion->fMapped    = true;
             pRegion->GCPhysPage = GCPhysRegion;
-            return VINF_SUCCESS;
+            return rc;
         }
 
@@ -460,2 +550,39 @@
 }
 
+#if 0
+/**
+ * Registers the physical handler for the registered and mapped MMIO2 region.
+ *
+ * @returns VBox status code.
+ * @param   pVM         Pointer to the VM.
+ * @param   pRegion     Pointer to the GIM MMIO2 region.
+ */
+VMMR3_INT_DECL(int) GIMR3Mmio2HandlerPhysicalRegister(PVM pVM, PGIMMMIO2REGION pRegion)
+{
+    AssertPtr(pRegion);
+    AssertReturn(pRegion->fRegistered, VERR_GIM_IPE_2);
+    AssertReturn(pRegion->fMapped, VERR_GIM_IPE_3);
+
+    return PGMR3HandlerPhysicalRegister(pVM,
+                                        PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
+                                        pRegion->GCPhysPage, pRegion->GCPhysPage + (pRegion->cbRegion - 1),
+                                        gimR3Mmio2WriteHandler,  NULL /* pvUserR3 */,
+                                        NULL /* pszModR0 */, NULL /* pszHandlerR0 */, NIL_RTR0PTR /* pvUserR0 */,
+                                        NULL /* pszModRC */, NULL /* pszHandlerRC */, NIL_RTRCPTR /* pvUserRC */,
+                                        pRegion->szDescription);
+}
+
+
+/**
+ * Deregisters the physical handler for the MMIO2 region.
+ *
+ * @returns VBox status code.
+ * @param   pVM         Pointer to the VM.
+ * @param   pRegion     Pointer to the GIM MMIO2 region.
+ */
+VMMR3_INT_DECL(int) GIMR3Mmio2HandlerPhysicalDeregister(PVM pVM, PGIMMMIO2REGION pRegion)
+{
+    return PGMHandlerPhysicalDeregister(pVM, pRegion->GCPhysPage);
+}
+#endif
+
Index: /trunk/src/VBox/VMM/VMMR3/GIMHv.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/GIMHv.cpp	(revision 51642)
+++ /trunk/src/VBox/VMM/VMMR3/GIMHv.cpp	(revision 51643)
@@ -26,6 +26,8 @@
 #include <iprt/string.h>
 #include <iprt/mem.h>
+#include <iprt/spinlock.h>
 
 #include <VBox/vmm/cpum.h>
+#include <VBox/vmm/ssm.h>
 #include <VBox/vmm/vm.h>
 #include <VBox/vmm/hm.h>
@@ -111,15 +113,15 @@
     PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX];
     pRegion->iRegion    = GIM_HV_HYPERCALL_PAGE_REGION_IDX;
+    pRegion->fRCMapping = false;
     pRegion->cbRegion   = PAGE_SIZE;
     pRegion->GCPhysPage = NIL_RTGCPHYS;
-    RTStrCopy(pRegion->szDescription, sizeof(pRegion->szDescription), "Hypercall Page");
-    Assert(!pRegion->fRCMapping);
-    Assert(!pRegion->fMapped);
+    RTStrCopy(pRegion->szDescription, sizeof(pRegion->szDescription), "Hyper-V hypercall page");
 
     pRegion = &pHv->aMmio2Regions[GIM_HV_REF_TSC_PAGE_REGION_IDX];
     pRegion->iRegion    = GIM_HV_REF_TSC_PAGE_REGION_IDX;
+    pRegion->fRCMapping = false;
     pRegion->cbRegion   = PAGE_SIZE;
     pRegion->GCPhysPage = NIL_RTGCPHYS;
-    RTStrCopy(pRegion->szDescription, sizeof(pRegion->szDescription), "TSC Page");
+    RTStrCopy(pRegion->szDescription, sizeof(pRegion->szDescription), "Hyper-V TSC page");
 
     /*
@@ -219,4 +221,11 @@
 
 
+VMMR3_INT_DECL(int) GIMR3HvTerm(PVM pVM)
+{
+    GIMR3HvReset(pVM);
+    return VINF_SUCCESS;
+}
+
+
 VMMR3_INT_DECL(void) GIMR3HvRelocate(PVM pVM, RTGCINTPTR offDelta)
 {
@@ -239,4 +248,5 @@
      * Unmap MMIO2 pages that the guest may have setup.
      */
+    LogRelFunc(("Resetting Hyper-V MMIO2 regions and MSRs...\n"));
     PGIMHV pHv = &pVM->gim.s.u.Hv;
     for (unsigned i = 0; i < RT_ELEMENTS(pHv->aMmio2Regions); i++)
@@ -272,2 +282,318 @@
 }
 
+
+/**
+ * Hyper-V state-save operation.
+ *
+ * @returns VBox status code.
+ * @param   pVM     Pointer to the VM.
+ * @param   pSSM    Pointer to the SSM handle.
+ */
+VMMR3_INT_DECL(int) GIMR3HvSave(PVM pVM, PSSMHANDLE pSSM)
+{
+    PCGIMHV pcHv = &pVM->gim.s.u.Hv;
+
+    /** @todo Save per-VCPU data. */
+
+    /*
+     * Save per-VM MSRs.
+     */
+    int rc = SSMR3PutU64(pSSM, pcHv->u64GuestOsIdMsr);          AssertRCReturn(rc, rc);
+    rc = SSMR3PutU64(pSSM, pcHv->u64HypercallMsr);              AssertRCReturn(rc, rc);
+    rc = SSMR3PutU64(pSSM, pcHv->u64TscPageMsr);                AssertRCReturn(rc, rc);
+
+    /*
+     * Save Hyper-V features / capabilities.
+     */
+    rc = SSMR3PutU32(pSSM, pcHv->uBaseFeat);                    AssertRCReturn(rc, rc);
+    rc = SSMR3PutU32(pSSM, pcHv->uPartFlags);                   AssertRCReturn(rc, rc);
+    rc = SSMR3PutU32(pSSM, pcHv->uPowMgmtFeat);                 AssertRCReturn(rc, rc);
+    rc = SSMR3PutU32(pSSM, pcHv->uMiscFeat);                    AssertRCReturn(rc, rc);
+    rc = SSMR3PutU32(pSSM, pcHv->uHyperHints);                  AssertRCReturn(rc, rc);
+
+    /*
+     * Save per-VM MMIO2 regions.
+     */
+    rc = SSMR3PutU32(pSSM, RT_ELEMENTS(pcHv->aMmio2Regions));
+    for (unsigned i = 0; i < RT_ELEMENTS(pcHv->aMmio2Regions); i++)
+    {
+        /* Save the fields necessary to remap the regions upon load.*/
+        PCGIMMMIO2REGION pcRegion = &pcHv->aMmio2Regions[i];
+        rc = SSMR3PutU8(pSSM,     pcRegion->iRegion);           AssertRCReturn(rc, rc);
+        rc = SSMR3PutBool(pSSM,   pcRegion->fRCMapping);        AssertRCReturn(rc, rc);
+        rc = SSMR3PutU32(pSSM,    pcRegion->cbRegion);          AssertRCReturn(rc, rc);
+        rc = SSMR3PutGCPhys(pSSM, pcRegion->GCPhysPage);        AssertRCReturn(rc, rc);
+        rc = SSMR3PutStrZ(pSSM,   pcRegion->szDescription);     AssertRCReturn(rc, rc);
+    }
+
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Hyper-V state-load operation, final pass.
+ *
+ * @returns VBox status code.
+ * @param   pVM             Pointer to the VM.
+ * @param   pSSM            Pointer to the SSM handle.
+ * @param   uSSMVersion     The saved-state version.
+ */
+VMMR3_INT_DECL(int) GIMR3HvLoad(PVM pVM, PSSMHANDLE pSSM, uint32_t uSSMVersion)
+{
+    PGIMHV pHv = &pVM->gim.s.u.Hv;
+
+    /** @todo Load per-VCPU data. */
+
+    /*
+     * Load per-VM MSRs.
+     */
+    int rc = SSMR3GetU64(pSSM, &pHv->u64GuestOsIdMsr);          AssertRCReturn(rc, rc);
+    rc = SSMR3GetU64(pSSM, &pHv->u64HypercallMsr);              AssertRCReturn(rc, rc);
+    rc = SSMR3GetU64(pSSM, &pHv->u64TscPageMsr);                AssertRCReturn(rc, rc);
+
+    /*
+     * Save Hyper-V features / capabilities.
+     */
+    rc = SSMR3GetU32(pSSM, &pHv->uBaseFeat);                    AssertRCReturn(rc, rc);
+    rc = SSMR3GetU32(pSSM, &pHv->uPartFlags);                   AssertRCReturn(rc, rc);
+    rc = SSMR3GetU32(pSSM, &pHv->uPowMgmtFeat);                 AssertRCReturn(rc, rc);
+    rc = SSMR3GetU32(pSSM, &pHv->uMiscFeat);                    AssertRCReturn(rc, rc);
+    rc = SSMR3GetU32(pSSM, &pHv->uHyperHints);                  AssertRCReturn(rc, rc);
+
+    /*
+     * Load per-VM MMIO2 regions.
+     */
+    uint32_t cRegions;
+    rc = SSMR3GetU32(pSSM, &cRegions);
+    if (cRegions != RT_ELEMENTS(pHv->aMmio2Regions))
+    {
+        LogRelFunc(("MMIO2 region array size mismatch. size=%u expected=%u\n", cRegions, RT_ELEMENTS(pHv->aMmio2Regions)));
+        return VERR_SSM_FIELD_INVALID_VALUE;
+    }
+
+    for (unsigned i = 0; i < RT_ELEMENTS(pHv->aMmio2Regions); i++)
+    {
+        /* The regions would have been registered while constructing the GIM device. */
+        PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[i];
+        rc = SSMR3GetU8(pSSM,     &pRegion->iRegion);           AssertRCReturn(rc, rc);
+        rc = SSMR3GetBool(pSSM,   &pRegion->fRCMapping);        AssertRCReturn(rc, rc);
+        rc = SSMR3GetU32(pSSM,    &pRegion->cbRegion);          AssertRCReturn(rc, rc);
+        rc = SSMR3GetGCPhys(pSSM, &pRegion->GCPhysPage);        AssertRCReturn(rc, rc);
+        rc = SSMR3GetStrZ(pSSM,    pRegion->szDescription, sizeof(pRegion->szDescription));
+        AssertRCReturn(rc, rc);
+    }
+
+    /*
+     * Enable the Hypercall-page.
+     */
+    PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX];
+    if (MSR_GIM_HV_HYPERCALL_IS_ENABLED(pHv->u64HypercallMsr))
+    {
+        Assert(pRegion->GCPhysPage != NIL_RTGCPHYS);
+        if (pRegion->fRegistered)
+        {
+            rc = GIMR3HvEnableHypercallPage(pVM, pRegion->GCPhysPage);
+            if (RT_FAILURE(rc))
+                return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to enable the hypercall page. GCPhys=%#RGp rc=%Rrc"),
+                                        pRegion->GCPhysPage, rc);
+        }
+        else
+            return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Hypercall MMIO2 region not registered. Missing GIM device?!"));
+    }
+
+    /*
+     * Enable the TSC-page.
+     */
+    pRegion = &pHv->aMmio2Regions[GIM_HV_REF_TSC_PAGE_REGION_IDX];
+    if (MSR_GIM_HV_REF_TSC_IS_ENABLED(pHv->u64TscPageMsr))
+    {
+        Assert(pRegion->GCPhysPage != NIL_RTGCPHYS);
+        if (pRegion->fRegistered)
+        {
+            rc = GIMR3HvEnableTscPage(pVM, pRegion->GCPhysPage);
+            if (RT_FAILURE(rc))
+                return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to enable the TSC page. GCPhys=%#RGp rc=%Rrc"),
+                                        pRegion->GCPhysPage, rc);
+        }
+        else
+            return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("TSC-page MMIO2 region not registered. Missing GIM device?!"));
+    }
+
+    return rc;
+}
+
+
+/**
+ * Enables the Hyper-V TSC page.
+ *
+ * @returns VBox status code.
+ * @param   pVM             Pointer to the VM.
+ * @param   GCPhysTscPage   Where to map the TSC page.
+ */
+VMMR3_INT_DECL(int) GIMR3HvEnableTscPage(PVM pVM, RTGCPHYS GCPhysTscPage)
+{
+    PPDMDEVINSR3    pDevIns = pVM->gim.s.pDevInsR3;
+    PGIMMMIO2REGION pRegion = &pVM->gim.s.u.Hv.aMmio2Regions[GIM_HV_REF_TSC_PAGE_REGION_IDX];
+    AssertPtrReturn(pDevIns, VERR_GIM_DEVICE_NOT_REGISTERED);
+
+    int rc;
+    if (pRegion->fMapped)
+    {
+        /*
+         * Is it already enabled at the given guest-address?
+         */
+        if (pRegion->GCPhysPage == GCPhysTscPage)
+            return VINF_SUCCESS;
+
+        /*
+         * If it's mapped at a different address, unmap the previous address.
+         */
+        rc = GIMR3HvDisableTscPage(pVM);
+        AssertRC(rc);
+    }
+
+    /*
+     * Map the TSC-page at the specified address.
+     */
+    Assert(!pRegion->fMapped);
+    rc = GIMR3Mmio2Map(pVM, pRegion, GCPhysTscPage);
+    if (RT_SUCCESS(rc))
+    {
+        Assert(pRegion->GCPhysPage == GCPhysTscPage);
+
+        /*
+         * Update the TSC scale. Windows guests expect a non-zero TSC sequence, otherwise
+         * they fallback to using the reference count MSR which is not ideal in terms of VM-exits.
+         *
+         * Also, Hyper-V normalizes the time in 10 MHz, see:
+         * http://technet.microsoft.com/it-it/sysinternals/dn553408%28v=vs.110%29
+         */
+        PGIMHVREFTSC pRefTsc = (PGIMHVREFTSC)pRegion->pvPageR3;
+        Assert(pRefTsc);
+
+        uint64_t const u64TscKHz = TMCpuTicksPerSecond(pVM) / UINT64_C(1000);
+        pRefTsc->u32TscSequence  = 1;
+        //pRefTsc->u64TscScale     = ((UINT64_C(10000) << 32) / u64TscKHz) << 32;
+        pRefTsc->u64TscScale     = 0xf4000000000000;
+
+        LogRel(("GIM: HyperV: Enabled TSC page at %#RGp (u64TscScale=%#RX64 u64TscKHz=%#RX64)\n", GCPhysTscPage,
+                pRefTsc->u64TscScale, u64TscKHz));
+        return VINF_SUCCESS;
+    }
+    else
+        LogRelFunc(("GIMR3Mmio2Map failed. rc=%Rrc\n", rc));
+
+    return VERR_GIM_OPERATION_FAILED;
+}
+
+
+/**
+ * Disables the Hyper-V TSC page.
+ *
+ * @returns VBox status code.
+ * @param   pVM     Pointer to the VM.
+ */
+VMMR3_INT_DECL(int) GIMR3HvDisableTscPage(PVM pVM)
+{
+    PGIMHV pHv = &pVM->gim.s.u.Hv;
+    PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[GIM_HV_REF_TSC_PAGE_REGION_IDX];
+    if (pRegion->fMapped)
+    {
+        GIMR3Mmio2Unmap(pVM, pRegion);
+        Assert(!pRegion->fMapped);
+        LogRel(("GIM: HyperV: Disabled TSC-page\n"));
+        return VINF_SUCCESS;
+    }
+    return VERR_GIM_PVTSC_NOT_ENABLED;
+}
+
+
+/**
+ * Disables the Hyper-V Hypercall page.
+ *
+ * @returns VBox status code.
+ */
+VMMR3_INT_DECL(int) GIMR3HvDisableHypercallPage(PVM pVM)
+{
+    PGIMHV pHv = &pVM->gim.s.u.Hv;
+    PGIMMMIO2REGION pRegion = &pHv->aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX];
+    if (pRegion->fMapped)
+    {
+        GIMR3Mmio2Unmap(pVM, pRegion);
+        Assert(!pRegion->fMapped);
+        LogRel(("GIM: HyperV: Disabled Hypercall-page\n"));
+        return VINF_SUCCESS;
+    }
+    return VERR_GIM_HYPERCALLS_NOT_ENABLED;
+}
+
+
+/**
+ * Enables the Hyper-V Hypercall page.
+ *
+ * @returns VBox status code.
+ * @param   pVM                     Pointer to the VM.
+ * @param   GCPhysHypercallPage     Where to map the hypercall page.
+ */
+VMMR3_INT_DECL(int) GIMR3HvEnableHypercallPage(PVM pVM, RTGCPHYS GCPhysHypercallPage)
+{
+    PPDMDEVINSR3    pDevIns = pVM->gim.s.pDevInsR3;
+    PGIMMMIO2REGION pRegion = &pVM->gim.s.u.Hv.aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX];
+    AssertPtrReturn(pDevIns, VERR_GIM_DEVICE_NOT_REGISTERED);
+
+    if (pRegion->fMapped)
+    {
+        /*
+         * Is it already enabled at the given guest-address?
+         */
+        if (pRegion->GCPhysPage == GCPhysHypercallPage)
+            return VINF_SUCCESS;
+
+        /*
+         * If it's mapped at a different address, unmap the previous address.
+         */
+        int rc2 = GIMR3HvDisableHypercallPage(pVM);
+        AssertRC(rc2);
+    }
+
+    /*
+     * Map the hypercall-page at the specified address.
+     */
+    Assert(!pRegion->fMapped);
+    int rc = GIMR3Mmio2Map(pVM, pRegion, GCPhysHypercallPage);
+    if (RT_SUCCESS(rc))
+    {
+        Assert(pRegion->GCPhysPage == GCPhysHypercallPage);
+
+        /*
+         * Patch the hypercall-page.
+         */
+        if (HMIsEnabled(pVM))
+        {
+            size_t cbWritten = 0;
+            rc = HMPatchHypercall(pVM, pRegion->pvPageR3, PAGE_SIZE, &cbWritten);
+            if (   RT_SUCCESS(rc)
+                && cbWritten < PAGE_SIZE - 1)
+            {
+                uint8_t *pbLast = (uint8_t *)pRegion->pvPageR3 + cbWritten;
+                *pbLast = 0xc3;  /* RET */
+
+                LogRel(("GIM: HyperV: Enabled hypercalls at %#RGp\n", GCPhysHypercallPage));
+                return VINF_SUCCESS;
+            }
+            else
+                LogRelFunc(("HMPatchHypercall failed. rc=%Rrc cbWritten=%u\n", rc, cbWritten));
+        }
+        else
+        {
+            /** @todo Handle raw-mode hypercall page patching. */
+            LogRelFunc(("Raw-mode not yet implemented!\n"));
+        }
+        GIMR3Mmio2Unmap(pVM, pRegion);
+    }
+    else
+        LogRelFunc(("GIMR3Mmio2Map failed. rc=%Rrc\n", rc));
+
+    return rc;
+}
+
Index: /trunk/src/VBox/VMM/VMMR3/HM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/HM.cpp	(revision 51642)
+++ /trunk/src/VBox/VMM/VMMR3/HM.cpp	(revision 51643)
@@ -733,4 +733,6 @@
         HM_REG_COUNTER(&pVCpu->hm.s.StatTlbShootdownFlush,      "/HM/CPU%d/Flush/Shootdown/TLB", "Inter-VCPU request to flush entire guest-TLB.");
 
+        HM_REG_COUNTER(&pVCpu->hm.s.StatTscOffsetAdjusted,      "/HM/CPU%d/TSC/OffsetAdjusted", "TSC offset overflowed for paravirt. TSC. Fudged.");
+        HM_REG_COUNTER(&pVCpu->hm.s.StatTscParavirt,            "/HM/CPU%d/TSC/Paravirt", "Paravirtualized TSC in effect.");
         HM_REG_COUNTER(&pVCpu->hm.s.StatTscOffset,              "/HM/CPU%d/TSC/Offset", "TSC offsetting is in effect.");
         HM_REG_COUNTER(&pVCpu->hm.s.StatTscIntercept,           "/HM/CPU%d/TSC/Intercept", "Guest is in catchup mode, intercept TSC accesses.");
@@ -1172,5 +1174,5 @@
     {
         CPUMClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);
-        LogRel(("HM: RDTSCP disabled.\n"));
+        LogRel(("HM: RDTSCP disabled\n"));
     }
 
@@ -1241,6 +1243,6 @@
 
     LogRel((pVM->hm.s.fAllow64BitGuests
-            ? "HM: Guest support: 32-bit and 64-bit.\n"
-            : "HM: Guest support: 32-bit only.\n"));
+            ? "HM: Guest support: 32-bit and 64-bit\n"
+            : "HM: Guest support: 32-bit only\n"));
 
     /*
@@ -1287,5 +1289,5 @@
             CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
         else
-            LogRel(("HM: NX not enabled on the host, unavailable to PAE guest.\n"));
+            LogRel(("HM: NX not enabled on the host, unavailable to PAE guest\n"));
     }
 
@@ -1296,10 +1298,10 @@
     {
         LogRel(("HM: Nested paging enabled!\n"));
-        if (pVM->hm.s.vmx.enmFlushEpt == VMX_FLUSH_EPT_SINGLE_CONTEXT)
-            LogRel(("HM:   EPT flush type                = VMX_FLUSH_EPT_SINGLE_CONTEXT\n"));
-        else if (pVM->hm.s.vmx.enmFlushEpt == VMX_FLUSH_EPT_ALL_CONTEXTS)
-            LogRel(("HM:   EPT flush type                = VMX_FLUSH_EPT_ALL_CONTEXTS\n"));
-        else if (pVM->hm.s.vmx.enmFlushEpt == VMX_FLUSH_EPT_NOT_SUPPORTED)
-            LogRel(("HM:   EPT flush type                = VMX_FLUSH_EPT_NOT_SUPPORTED\n"));
+        if (pVM->hm.s.vmx.enmFlushEpt == VMXFLUSHEPT_SINGLE_CONTEXT)
+            LogRel(("HM:   EPT flush type                = VMXFLUSHEPT_SINGLE_CONTEXT\n"));
+        else if (pVM->hm.s.vmx.enmFlushEpt == VMXFLUSHEPT_ALL_CONTEXTS)
+            LogRel(("HM:   EPT flush type                = VMXFLUSHEPT_ALL_CONTEXTS\n"));
+        else if (pVM->hm.s.vmx.enmFlushEpt == VMXFLUSHEPT_NOT_SUPPORTED)
+            LogRel(("HM:   EPT flush type                = VMXFLUSHEPT_NOT_SUPPORTED\n"));
         else
             LogRel(("HM:   EPT flush type                = %d\n", pVM->hm.s.vmx.enmFlushEpt));
@@ -1313,5 +1315,5 @@
             /* Use large (2 MB) pages for our EPT PDEs where possible. */
             PGMSetLargePageUsage(pVM, true);
-            LogRel(("HM: Large page support enabled!\n"));
+            LogRel(("HM: Large page support enabled\n"));
         }
 #endif
@@ -1323,17 +1325,17 @@
     {
         LogRel(("HM: VPID enabled!\n"));
-        if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_INDIV_ADDR)
-            LogRel(("HM:   VPID flush type               = VMX_FLUSH_VPID_INDIV_ADDR\n"));
-        else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_SINGLE_CONTEXT)
-            LogRel(("HM:   VPID flush type               = VMX_FLUSH_VPID_SINGLE_CONTEXT\n"));
-        else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_ALL_CONTEXTS)
-            LogRel(("HM:   VPID flush type               = VMX_FLUSH_VPID_ALL_CONTEXTS\n"));
-        else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
-            LogRel(("HM:   VPID flush type               = VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS\n"));
+        if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_INDIV_ADDR)
+            LogRel(("HM:   VPID flush type               = VMXFLUSHVPID_INDIV_ADDR\n"));
+        else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_SINGLE_CONTEXT)
+            LogRel(("HM:   VPID flush type               = VMXFLUSHVPID_SINGLE_CONTEXT\n"));
+        else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_ALL_CONTEXTS)
+            LogRel(("HM:   VPID flush type               = VMXFLUSHVPID_ALL_CONTEXTS\n"));
+        else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
+            LogRel(("HM:   VPID flush type               = VMXFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS\n"));
         else
             LogRel(("HM:   VPID flush type               = %d\n", pVM->hm.s.vmx.enmFlushVpid));
     }
-    else if (pVM->hm.s.vmx.enmFlushVpid == VMX_FLUSH_VPID_NOT_SUPPORTED)
-        LogRel(("HM: Ignoring VPID capabilities of CPU.\n"));
+    else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_NOT_SUPPORTED)
+        LogRel(("HM: Ignoring VPID capabilities of CPU\n"));
 
     /*
@@ -1347,7 +1349,7 @@
     }
     if (pVM->hm.s.vmx.fUsePreemptTimer)
-        LogRel(("HM: VMX-preemption timer enabled (cPreemptTimerShift=%u).\n", pVM->hm.s.vmx.cPreemptTimerShift));
+        LogRel(("HM: VMX-preemption timer enabled (cPreemptTimerShift=%u)\n", pVM->hm.s.vmx.cPreemptTimerShift));
     else
-        LogRel(("HM: VMX-preemption timer disabled.\n"));
+        LogRel(("HM: VMX-preemption timer disabled\n"));
 
     return VINF_SUCCESS;
@@ -1465,9 +1467,9 @@
         CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
 
-    LogRel(("HM: TPR patching %s.\n", (pVM->hm.s.fTprPatchingAllowed) ? "enabled" : "disabled"));
+    LogRel(("HM: TPR patching %s\n", (pVM->hm.s.fTprPatchingAllowed) ? "enabled" : "disabled"));
 
     LogRel((pVM->hm.s.fAllow64BitGuests
-            ? "HM: Guest support: 32-bit and 64-bit.\n"
-            : "HM: Guest support: 32-bit only.\n"));
+            ? "HM: Guest support: 32-bit and 64-bit\n"
+            : "HM: Guest support: 32-bit only\n"));
 
     return VINF_SUCCESS;
Index: /trunk/src/VBox/VMM/include/GIMHvInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/GIMHvInternal.h	(revision 51642)
+++ /trunk/src/VBox/VMM/include/GIMHvInternal.h	(revision 51643)
@@ -389,4 +389,12 @@
 AssertCompile(MSR_GIM_HV_RANGE11_START <= MSR_GIM_HV_RANGE11_END);
 
+/** @name Hyper-V MSR - Reset (MSR_GIM_HV_RESET).
+ * @{
+ */
+/** The hypercall enable bit. */
+#define MSR_GIM_HV_RESET_BIT                      RT_BIT_64(0)
+/** Whether the hypercall-page is enabled or not. */
+#define MSR_GIM_HV_RESET_IS_SET(a)                RT_BOOL((a) & MSR_GIM_HV_RESET_BIT)
+/** @} */
 
 /** @name Hyper-V MSR - Hypercall (MSR_GIM_HV_HYPERCALL).
@@ -434,8 +442,11 @@
     uint32_t            uReserved0;
     uint64_t volatile   u64TscScale;
-    uint64_t volatile   u64TscOffset;
+    int64_t  volatile   i64TscOffset;
 } GIMHVTSCPAGE;
-/** Pointer to GIM VMCPU instance data. */
+/** Pointer to Hyper-V reference TSC. */
 typedef GIMHVREFTSC *PGIMHVREFTSC;
+/** Pointer to a const Hyper-V reference TSC. */
+typedef GIMHVREFTSC const *PCGIMHVREFTSC;
+
 
 /**
@@ -465,4 +476,10 @@
     uint32_t                    u32Alignment0;
 
+    /** Per-VM R0 Spinlock for protecting EMT writes to the TSC page. */
+    RTSPINLOCK                  hSpinlockR0;
+#if HC_ARCH_BITS == 32
+    uint32_t                    u32Alignment1;
+#endif
+
     /** Array of MMIO2 regions. */
     GIMMMIO2REGION              aMmio2Regions[GIM_HV_REGION_IDX_MAX + 1];
@@ -473,4 +490,5 @@
 typedef GIMHV const *PCGIMHV;
 AssertCompileMemberAlignment(GIMHV, aMmio2Regions, 8);
+AssertCompileMemberAlignment(GIMHV, hSpinlockR0, sizeof(uintptr_t));
 
 RT_C_DECLS_BEGIN
@@ -479,15 +497,23 @@
 VMMR0_INT_DECL(int)             GIMR0HvInitVM(PVM pVM);
 VMMR0_INT_DECL(int)             GIMR0HvTermVM(PVM pVM);
+VMMR0_INT_DECL(int)             GIMR0HvUpdateParavirtTsc(PVM pVM, uint64_t u64Offset);
 #endif /* IN_RING0 */
 
 #ifdef IN_RING3
 VMMR3_INT_DECL(int)             GIMR3HvInit(PVM pVM);
+VMMR3_INT_DECL(int)             GIMR3HvTerm(PVM pVM);
 VMMR3_INT_DECL(void)            GIMR3HvRelocate(PVM pVM, RTGCINTPTR offDelta);
 VMMR3_INT_DECL(void)            GIMR3HvReset(PVM pVM);
 VMMR3_INT_DECL(PGIMMMIO2REGION) GIMR3HvGetMmio2Regions(PVM pVM, uint32_t *pcRegions);
+VMMR3_INT_DECL(int)             GIMR3HvSave(PVM pVM, PSSMHANDLE pSSM);
+VMMR3_INT_DECL(int)             GIMR3HvLoad(PVM pVM, PSSMHANDLE pSSM, uint32_t uSSMVersion);
+
+VMMR3_INT_DECL(int)             GIMR3HvDisableTscPage(PVM pVM);
+VMMR3_INT_DECL(int)             GIMR3HvEnableTscPage(PVM pVM, RTGCPHYS GCPhysTscPage);
+VMMR3_INT_DECL(int)             GIMR3HvDisableHypercallPage(PVM pVM);
+VMMR3_INT_DECL(int)             GIMR3HvEnableHypercallPage(PVM pVM, RTGCPHYS GCPhysHypercallPage);
 #endif /* IN_RING3 */
 
 VMM_INT_DECL(bool)              GIMHvIsParavirtTscEnabled(PVM pVM);
-VMM_INT_DECL(int)               GIMHvUpdateParavirtTsc(PVM pVM, uint64_t u64Offset);
 VMM_INT_DECL(int)               GIMHvHypercall(PVMCPU pVCpu, PCPUMCTX pCtx);
 VMM_INT_DECL(int)               GIMHvReadMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue);
Index: /trunk/src/VBox/VMM/include/GIMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/GIMInternal.h	(revision 51642)
+++ /trunk/src/VBox/VMM/include/GIMInternal.h	(revision 51643)
@@ -29,4 +29,7 @@
  * @{
  */
+
+/** The saved state version. */
+#define GIM_SSM_VERSION                          1
 
 /**
@@ -89,6 +92,8 @@
 
 #ifdef IN_RING3
-VMM_INT_DECL(int)           GIMR3Mmio2Unmap(PVM pVM, PGIMMMIO2REGION pRegion);
-VMM_INT_DECL(int)           GIMR3Mmio2Map(PVM pVM, PGIMMMIO2REGION pRegion, RTGCPHYS GCPhysRegion, const char *pszDesc);
+VMMR3_INT_DECL(int)           GIMR3Mmio2Unmap(PVM pVM, PGIMMMIO2REGION pRegion);
+VMMR3_INT_DECL(int)           GIMR3Mmio2Map(PVM pVM, PGIMMMIO2REGION pRegion, RTGCPHYS GCPhysRegion);
+VMMR3_INT_DECL(int)           GIMR3Mmio2HandlerPhysicalRegister(PVM pVM, PGIMMMIO2REGION pRegion);
+VMMR3_INT_DECL(int)           GIMR3Mmio2HandlerPhysicalDeregister(PVM pVM, PGIMMMIO2REGION pRegion);
 #endif /* IN_RING3 */
 
Index: /trunk/src/VBox/VMM/include/HMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/HMInternal.h	(revision 51642)
+++ /trunk/src/VBox/VMM/include/HMInternal.h	(revision 51643)
@@ -5,5 +5,5 @@
 
 /*
- * Copyright (C) 2006-2013 Oracle Corporation
+ * Copyright (C) 2006-2014 Oracle Corporation
  *
  * This file is part of VirtualBox Open Source Edition (OSE), as
@@ -313,26 +313,18 @@
     /** Set when we've initialized VMX or SVM. */
     bool                        fInitialized;
-
     /** Set if nested paging is enabled. */
     bool                        fNestedPaging;
-
     /** Set if nested paging is allowed. */
     bool                        fAllowNestedPaging;
-
     /** Set if large pages are enabled (requires nested paging). */
     bool                        fLargePages;
-
     /** Set if we can support 64-bit guests or not. */
     bool                        fAllow64BitGuests;
-
     /** Set if an IO-APIC is configured for this VM. */
     bool                        fHasIoApic;
-
     /** Set when TPR patching is allowed. */
     bool                        fTprPatchingAllowed;
-
     /** Set when we initialize VT-x or AMD-V once for all CPUs. */
     bool                        fGlobalInit;
-
     /** Set when TPR patching is active. */
     bool                        fTPRPatchingActive;
@@ -341,5 +333,4 @@
     /** Maximum ASID allowed. */
     uint32_t                    uMaxAsid;
-
     /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
      * This number is set much higher when RTThreadPreemptIsPending is reliable. */
@@ -352,10 +343,10 @@
     /** Size of the guest patch memory block. */
     uint32_t                    cbGuestPatchMem;
-    uint32_t                    uPadding1;
+    uint32_t                    u32Alignment0;
 
 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     /** 32 to 64 bits switcher entrypoint. */
     R0PTRTYPE(PFNHMSWITCHERHC)  pfnHost32ToGuest64R0;
-    RTR0PTR                     uPadding2;
+    RTR0PTR                     pvR0Alignment0;
 #endif
 
@@ -365,20 +356,14 @@
          *  CPU. */
         bool                        fSupported;
-
         /** Set when we've enabled VMX. */
         bool                        fEnabled;
-
         /** Set if VPID is supported. */
         bool                        fVpid;
-
         /** Set if VT-x VPID is allowed. */
         bool                        fAllowVpid;
-
         /** Set if unrestricted guest execution is in use (real and protected mode without paging). */
         bool                        fUnrestrictedGuest;
-
         /** Set if unrestricted guest execution is allowed to be used. */
         bool                        fAllowUnrestricted;
-
         /** Whether we're using the preemption timer or not. */
         bool                        fUsePreemptTimer;
@@ -388,27 +373,23 @@
         /** Virtual address of the TSS page used for real mode emulation. */
         R3PTRTYPE(PVBOXTSS)         pRealModeTSS;
-
         /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
         R3PTRTYPE(PX86PD)           pNonPagingModeEPTPageTable;
 
+        /** Physical address of the APIC-access page. */
+        RTHCPHYS                    HCPhysApicAccess;
         /** R0 memory object for the APIC-access page. */
         RTR0MEMOBJ                  hMemObjApicAccess;
-        /** Physical address of the APIC-access page. */
-        RTHCPHYS                    HCPhysApicAccess;
         /** Virtual address of the APIC-access page. */
         R0PTRTYPE(uint8_t *)        pbApicAccess;
 
 #ifdef VBOX_WITH_CRASHDUMP_MAGIC
+        RTHCPHYS                    HCPhysScratch;
         RTR0MEMOBJ                  hMemObjScratch;
-        RTHCPHYS                    HCPhysScratch;
         R0PTRTYPE(uint8_t *)        pbScratch;
 #endif
 
         /** Internal Id of which flush-handler to use for tagged-TLB entries. */
-        unsigned                    uFlushTaggedTlb;
-
-#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
-        uint32_t                    u32Alignment;
-#endif
+        uint32_t                    uFlushTaggedTlb;
+        uint32_t                    u32Alignment0;
         /** Host CR4 value (set by ring-0 VMX init) */
         uint64_t                    u64HostCr4;
@@ -418,5 +399,5 @@
         /** Whether the CPU supports VMCS fields for swapping EFER. */
         bool                        fSupportsVmcsEfer;
-        bool                        afAlignment1[7];
+        uint8_t                     u8Alignment2[7];
 
         /** VMX MSR values */
@@ -424,6 +405,6 @@
 
         /** Flush types for invept & invvpid; they depend on capabilities. */
-        VMX_FLUSH_EPT               enmFlushEpt;
-        VMX_FLUSH_VPID              enmFlushVpid;
+        VMXFLUSHEPT                 enmFlushEpt;
+        VMXFLUSHVPID                enmFlushVpid;
     } vmx;
 
@@ -439,9 +420,10 @@
         /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
         bool                        fIgnoreInUseError;
-
+        uint8_t                     u8Alignment0[4];
+
+        /** Physical address of the IO bitmap (12kb). */
+        RTHCPHYS                    HCPhysIOBitmap;
         /** R0 memory object for the IO bitmap (12kb). */
         RTR0MEMOBJ                  hMemObjIOBitmap;
-        /** Physical address of the IO bitmap (12kb). */
-        RTHCPHYS                    HCPhysIOBitmap;
         /** Virtual address of the IO bitmap. */
         R0PTRTYPE(void *)           pvIOBitmap;
@@ -452,5 +434,4 @@
         /** SVM revision. */
         uint32_t                    u32Rev;
-
         /** SVM feature bits from cpuid 0x8000000a */
         uint32_t                    u32Features;
@@ -458,5 +439,6 @@
 
     /**
-     * AVL tree with all patches (active or disabled) sorted by guest instruction address
+     * AVL tree with all patches (active or disabled) sorted by guest instruction
+     * address.
      */
     AVLOU32TREE                     PatchTree;
@@ -488,5 +470,7 @@
 #define VMCSCACHE_MAX_ENTRY                             128
 
-/* Structure for storing read and write VMCS actions. */
+/**
+ * Structure for storing read and write VMCS actions.
+ */
 typedef struct VMCSCACHE
 {
@@ -546,4 +530,5 @@
 /** Pointer to VMCSCACHE. */
 typedef VMCSCACHE *PVMCSCACHE;
+AssertCompileSizeAlignment(VMCSCACHE, 8);
 
 /** VMX StartVM function. */
@@ -597,16 +582,9 @@
     struct
     {
-        /** Physical address of the VM control structure (VMCS). */
-        RTHCPHYS                    HCPhysVmcs;
-        /** R0 memory object for the VM control structure (VMCS). */
-        RTR0MEMOBJ                  hMemObjVmcs;
-        /** Virtual address of the VM control structure (VMCS). */
-        R0PTRTYPE(void *)           pvVmcs;
         /** Ring 0 handlers for VT-x. */
         PFNHMVMXSTARTVM             pfnStartVM;
 #if HC_ARCH_BITS == 32
-        uint32_t                    u32Alignment1;
-#endif
-
+        uint32_t                    u32Alignment0;
+#endif
         /** Current VMX_VMCS32_CTRL_PIN_EXEC. */
         uint32_t                    u32PinCtls;
@@ -620,14 +598,4 @@
         uint32_t                    u32EntryCtls;
 
-        /** Physical address of the virtual APIC page for TPR caching. */
-        RTHCPHYS                    HCPhysVirtApic;
-        /** R0 memory object for the virtual APIC page for TPR caching. */
-        RTR0MEMOBJ                  hMemObjVirtApic;
-        /** Virtual address of the virtual APIC page for TPR caching. */
-        R0PTRTYPE(uint8_t *)        pbVirtApic;
-#if HC_ARCH_BITS == 32
-        uint32_t                    u32Alignment2;
-#endif
-
         /** Current CR0 mask. */
         uint32_t                    u32CR0Mask;
@@ -638,6 +606,19 @@
         /** The updated-guest-state mask. */
         volatile uint32_t           fUpdatedGuestState;
-        /** Current EPTP. */
-        RTHCPHYS                    HCPhysEPTP;
+        uint32_t                    u32Alignment1;
+
+        /** Physical address of the VM control structure (VMCS). */
+        RTHCPHYS                    HCPhysVmcs;
+        /** R0 memory object for the VM control structure (VMCS). */
+        RTR0MEMOBJ                  hMemObjVmcs;
+        /** Virtual address of the VM control structure (VMCS). */
+        R0PTRTYPE(void *)           pvVmcs;
+
+        /** Physical address of the virtual APIC page for TPR caching. */
+        RTHCPHYS                    HCPhysVirtApic;
+        /** R0 memory object for the virtual APIC page for TPR caching. */
+        RTR0MEMOBJ                  hMemObjVirtApic;
+        /** Virtual address of the virtual APIC page for TPR caching. */
+        R0PTRTYPE(uint8_t *)        pbVirtApic;
 
         /** Physical address of the MSR bitmap. */
@@ -665,9 +646,12 @@
         R0PTRTYPE(void *)           pvHostMsr;
 
+        /** Current EPTP. */
+        RTHCPHYS                    HCPhysEPTP;
+
         /** Number of guest/host MSR pairs in the auto-load/store area. */
         uint32_t                    cMsrs;
         /** Whether the host MSR values are up-to-date in the auto-load/store area. */
         bool                        fUpdatedHostMsrs;
-        uint8_t                     u8Align[7];
+        uint8_t                     u8Alignment0[3];
 
         /** Host LSTAR MSR value to restore lazily while leaving VT-x. */
@@ -681,5 +665,5 @@
         /** A mask of which MSRs have been swapped and need restoration. */
         uint32_t                    fRestoreHostMsrs;
-        uint32_t                    u32Alignment3;
+        uint32_t                    u32Alignment2;
 
         /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
@@ -704,4 +688,5 @@
         } RealMode;
 
+        /** VT-x error-reporting (mainly for ring-3 propagation). */
         struct
         {
@@ -712,8 +697,8 @@
             RTCPUID                 idEnteredCpu;
             RTCPUID                 idCurrentCpu;
-            uint32_t                u32Padding;
+            uint32_t                u32Alignment0;
         } LastError;
 
-        /** State of the VMCS. */
+        /** Current state of the VMCS. */
         uint32_t                    uVmcsState;
         /** Which host-state bits to restore before being preempted. */
@@ -721,35 +706,36 @@
         /** The host-state restoration structure. */
         VMXRESTOREHOST              RestoreHost;
+
         /** Set if guest was executing in real mode (extra checks). */
         bool                        fWasInRealMode;
-        uint8_t                     u8Align2[7];
-
-        /** Alignment padding. */
-        uint32_t                    u32Padding;
+        uint8_t                     u8Alignment1[7];
     } vmx;
 
     struct
     {
+        /** Ring 0 handlers for VT-x. */
+        PFNHMSVMVMRUN               pfnVMRun;
+#if HC_ARCH_BITS == 32
+        uint32_t                    u32Alignment0;
+#endif
+
+        /** Physical address of the host VMCB which holds additional host-state. */
+        RTHCPHYS                    HCPhysVmcbHost;
         /** R0 memory object for the host VMCB which holds additional host-state. */
         RTR0MEMOBJ                  hMemObjVmcbHost;
-        /** Physical address of the host VMCB which holds additional host-state. */
-        RTHCPHYS                    HCPhysVmcbHost;
         /** Virtual address of the host VMCB which holds additional host-state. */
         R0PTRTYPE(void *)           pvVmcbHost;
 
+        /** Physical address of the guest VMCB. */
+        RTHCPHYS                    HCPhysVmcb;
         /** R0 memory object for the guest VMCB. */
         RTR0MEMOBJ                  hMemObjVmcb;
-        /** Physical address of the guest VMCB. */
-        RTHCPHYS                    HCPhysVmcb;
         /** Virtual address of the guest VMCB. */
         R0PTRTYPE(void *)           pvVmcb;
 
-        /** Ring 0 handlers for VT-x. */
-        PFNHMSVMVMRUN               pfnVMRun;
-
+        /** Physical address of the MSR bitmap (8 KB). */
+        RTHCPHYS                    HCPhysMsrBitmap;
         /** R0 memory object for the MSR bitmap (8 KB). */
         RTR0MEMOBJ                  hMemObjMsrBitmap;
-        /** Physical address of the MSR bitmap (8 KB). */
-        RTHCPHYS                    HCPhysMsrBitmap;
         /** Virtual address of the MSR bitmap. */
         R0PTRTYPE(void *)           pvMsrBitmap;
@@ -758,8 +744,5 @@
          *  we should check if the VTPR changed on every VM-exit. */
         bool                        fSyncVTpr;
-        uint8_t                     u8Align[7];
-
-        /** Alignment padding. */
-        uint32_t                    u32Padding;
+        uint8_t                     u8Alignment0[7];
     } svm;
 
@@ -791,5 +774,5 @@
         /** Pending IO operation type. */
         HMPENDINGIO             enmType;
-        uint32_t                uPadding;
+        uint32_t                u32Alignment0;
         RTGCPTR                 GCPtrRip;
         RTGCPTR                 GCPtrRipNext;
@@ -822,5 +805,5 @@
         RTGCPTR             aPages[HM_MAX_TLB_SHOOTDOWN_PAGES];
         uint32_t            cPages;
-        uint32_t            u32Padding; /**< Explicit alignment padding. */
+        uint32_t            u32Alignment0; /**< Explicit alignment padding. */
     } TlbShootdown;
 
@@ -931,4 +914,6 @@
     STAMCOUNTER             StatSwitchLongJmpToR3;
 
+    STAMCOUNTER             StatTscOffsetAdjusted;
+    STAMCOUNTER             StatTscParavirt;
     STAMCOUNTER             StatTscOffset;
     STAMCOUNTER             StatTscIntercept;
@@ -970,19 +955,21 @@
 /** Pointer to HM VMCPU instance data. */
 typedef HMCPU *PHMCPU;
+AssertCompileMemberAlignment(HMCPU, vmx, 8);
+AssertCompileMemberAlignment(HMCPU, svm, 8);
+AssertCompileMemberAlignment(HMCPU, Event, 8);
 
 
 #ifdef IN_RING0
-
 VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpu(void);
 VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu);
 
 
-#ifdef VBOX_STRICT
+# ifdef VBOX_STRICT
 VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
 VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
-#else
-# define HMDumpRegs(a, b ,c)            do { } while (0)
-# define HMR0DumpDescriptor(a, b, c)    do { } while (0)
-#endif
+# else
+#  define HMDumpRegs(a, b ,c)            do { } while (0)
+#  define HMR0DumpDescriptor(a, b, c)    do { } while (0)
+# endif /* VBOX_STRICT */
 
 # ifdef VBOX_WITH_KERNEL_USING_XMM
@@ -1004,5 +991,5 @@
  */
 DECLASM(uint64_t) HMR0Get64bitCR3(void);
-# endif
+# endif  /* VBOX_WITH_HYBRID_32BIT_KERNEL */
 
 #endif /* IN_RING0 */
Index: /trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp	(revision 51642)
+++ /trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp	(revision 51643)
@@ -7,5 +7,5 @@
 
 /*
- * Copyright (C) 2006-2012 Oracle Corporation
+ * Copyright (C) 2006-2014 Oracle Corporation
  *
  * This file is part of VirtualBox Open Source Edition (OSE), as
@@ -408,12 +408,19 @@
     /* hm - 32-bit gcc won't align uint64_t naturally, so check. */
     CHECK_MEMBER_ALIGNMENT(HM, uMaxAsid, 8);
-    CHECK_MEMBER_ALIGNMENT(HM, vmx.u64HostCr4, 8);
-    CHECK_MEMBER_ALIGNMENT(HM, vmx.Msrs.u64FeatureCtrl, 8);
-    CHECK_MEMBER_ALIGNMENT(HM, StatTprPatchSuccess, 8);
+    CHECK_MEMBER_ALIGNMENT(HM, vmx, 8);
+    CHECK_MEMBER_ALIGNMENT(HM, vmx.Msrs, 8);
+    CHECK_MEMBER_ALIGNMENT(HM, svm, 8);
+    CHECK_MEMBER_ALIGNMENT(HM, PatchTree, 8);
+    CHECK_MEMBER_ALIGNMENT(HM, aPatches, 8);
+    CHECK_MEMBER_ALIGNMENT(HMCPU, vmx, 8);
+    CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.pfnStartVM, 8);
+    CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.HCPhysVmcs, 8);
+    CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.LastError, 8);
+    CHECK_MEMBER_ALIGNMENT(HMCPU, svm, 8);
+    CHECK_MEMBER_ALIGNMENT(HMCPU, svm.pfnVMRun, 8);
+    CHECK_MEMBER_ALIGNMENT(HMCPU, Event, 8);
+    CHECK_MEMBER_ALIGNMENT(HMCPU, Event.u64IntInfo, 8);
+    CHECK_MEMBER_ALIGNMENT(HMCPU, DisState, 8);
     CHECK_MEMBER_ALIGNMENT(HMCPU, StatEntry, 8);
-    CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.HCPhysVmcs, sizeof(RTHCPHYS));
-    CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.u32PinCtls, 8);
-    CHECK_MEMBER_ALIGNMENT(HMCPU, DisState, 8);
-    CHECK_MEMBER_ALIGNMENT(HMCPU, Event.u64IntInfo, 8);
 
     /* Make sure the set is large enough and has the correct size. */
