Index: /trunk/include/VBox/vmm/hm_vmx.h
===================================================================
--- /trunk/include/VBox/vmm/hm_vmx.h	(revision 75386)
+++ /trunk/include/VBox/vmm/hm_vmx.h	(revision 75387)
@@ -3187,4 +3187,9 @@
 /** The size of the Virtual-APIC page (in pages). */
 #define VMX_V_VIRT_APIC_PAGES                                   1
+
+/** Virtual X2APIC MSR range start. */
+#define VMX_V_VIRT_APIC_MSR_START                               0x800
+/** Virtual X2APIC MSR range end. */
+#define VMX_V_VIRT_APIC_MSR_END                                 0x8ff
 
 /** The size of the VMREAD/VMWRITE bitmap (in bytes). */
Index: /trunk/include/VBox/vmm/iem.h
===================================================================
--- /trunk/include/VBox/vmm/iem.h	(revision 75386)
+++ /trunk/include/VBox/vmm/iem.h	(revision 75387)
@@ -328,4 +328,5 @@
 
 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVirtApicAccessMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Val, bool fWrite);
 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVmexitPreemptTimer(PVMCPU pVCpu);
 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending);
Index: /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp	(revision 75386)
+++ /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp	(revision 75387)
@@ -25,4 +25,7 @@
 #include <VBox/vmm/hm.h>
 #include <VBox/vmm/hm_vmx.h>
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+# include <VBox/vmm/iem.h>
+#endif
 #include <VBox/vmm/tm.h>
 #include <VBox/vmm/gim.h>
@@ -1271,4 +1274,21 @@
 {
     RT_NOREF_PV(pRange);
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+    if (   CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest)
+        && CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.s.Guest, VMX_PROC_CTLS2_VIRT_X2APIC_MODE))
+    {
+        /** @todo NSTVMX: perhaps IEMExecVmxVirtApicAccessMsr should be moved to
+         *        HMVMXAll.cpp? */
+        VBOXSTRICTRC rcStrict = IEMExecVmxVirtApicAccessMsr(pVCpu, idMsr, puValue, false /* fWrite */);
+        Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_OUT_OF_RANGE || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE);
+        if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
+        {
+            if (rcStrict == VERR_OUT_OF_RANGE)
+                return VERR_CPUM_RAISE_GP_0;
+            Assert(rcStrict == VINF_SUCCESS);
+            return VINF_SUCCESS;
+        }
+    }
+#endif
     return APICReadMsr(pVCpu, idMsr, puValue);
 }
@@ -1279,4 +1299,21 @@
 {
     RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue);
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+    if (   CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest)
+        && CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.s.Guest, VMX_PROC_CTLS2_VIRT_X2APIC_MODE))
+    {
+        /** @todo NSTVMX: perhaps IEMExecVmxVirtApicAccessMsr should be moved to
+         *        HMVMXAll.cpp? */
+        VBOXSTRICTRC rcStrict = IEMExecVmxVirtApicAccessMsr(pVCpu, idMsr, &uValue, true /* fWrite */);
+        Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_OUT_OF_RANGE || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE);
+        if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
+        {
+            if (rcStrict == VERR_OUT_OF_RANGE)
+                return VERR_CPUM_RAISE_GP_0;
+            Assert(rcStrict == VINF_SUCCESS);
+            return VINF_SUCCESS;
+        }
+    }
+#endif
     return APICWriteMsr(pVCpu, idMsr, uValue);
 }
Index: /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 75386)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 75387)
@@ -989,4 +989,6 @@
 IEM_STATIC VBOXSTRICTRC     iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
 IEM_STATIC VBOXSTRICTRC     iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess);
+IEM_STATIC VBOXSTRICTRC     iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value);
+IEM_STATIC VBOXSTRICTRC     iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value);
 #endif
 
@@ -15696,4 +15698,36 @@
 
 /**
+ * Interface for HM and EM to virtualize x2APIC MSR accesses.
+ *
+ * @returns Strict VBox status code.
+ * @retval  VINF_SUCCESS if the MSR access was virtualized.
+ * @retval  VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
+ *          the x2APIC device.
+ * @retval  VERR_OUT_RANGE if the caller must raise \#GP(0).
+ * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
+ * @param   idMsr       The MSR being read.
+ * @param   pu64Value   Pointer to the value being written or where to store the
+ *                      value being read.
+ * @param   fWrite      Whether this is an MSR write or read access.
+ * @thread  EMT(pVCpu)
+ */
+VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
+{
+    IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
+    Assert(pu64Value);
+
+    VBOXSTRICTRC rcStrict;
+    if (!fWrite)
+        rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
+    else
+        rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
+    if (pVCpu->iem.s.cActiveMappings)
+        iemMemRollback(pVCpu);
+    return iemExecStatusCodeFiddling(pVCpu, rcStrict);
+
+}
+
+
+/**
  * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
  *
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h	(revision 75386)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h	(revision 75387)
@@ -3976,4 +3976,21 @@
 
 /**
+ * Reads a 64-bit register from the virtual-APIC page at the given offset.
+ *
+ * @returns The register from the virtual-APIC page.
+ * @param   pVCpu       The cross context virtual CPU structure.
+ * @param   offReg      The offset of the register being read.
+ */
+DECLINLINE(uint64_t) iemVmxVirtApicReadRaw64(PVMCPU pVCpu, uint16_t offReg)
+{
+    Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
+    uint8_t  const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
+    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
+    uint64_t const uReg = *(const uint64_t *)(pbVirtApic + offReg);
+    return uReg;
+}
+
+
+/**
  * Writes a 32-bit register to the virtual-APIC page at the given offset.
  *
@@ -3988,4 +4005,20 @@
     Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
     *(uint32_t *)(pbVirtApic + offReg) = uReg;
+}
+
+
+/**
+ * Writes a 64-bit register to the virtual-APIC page at the given offset.
+ *
+ * @param   pVCpu       The cross context virtual CPU structure.
+ * @param   offReg      The offset of the register being written.
+ * @param   uReg        The register value to write.
+ */
+DECLINLINE(void) iemVmxVirtApicWriteRaw64(PVMCPU pVCpu, uint16_t offReg, uint64_t uReg)
+{
+    Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
+    uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
+    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
+    *(uint64_t *)(pbVirtApic + offReg) = uReg;
 }
 
@@ -4230,5 +4263,6 @@
         /*
          * Record the currently updated APIC offset, as we need this later for figuring
-         * out what to do as well as the exit qualification when causing an APIC-write VM-exit.
+         * out whether to perform TPR, EOI or self-IPI virtualization as well as well
+         * as for supplying the exit qualification when causing an APIC-write VM-exit.
          */
         pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offAccess;
@@ -4273,4 +4307,121 @@
 
     return VINF_VMX_MODIFIES_BEHAVIOR;
+}
+
+
+/**
+ * Virtualizes an MSR-based APIC read access.
+ *
+ * @returns VBox strict status code.
+ * @retval  VINF_VMX_MODIFIES_BEHAVIOR if the MSR read was virtualized.
+ * @retval  VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR read access must be
+ *          handled by the x2APIC device.
+ * @retval  VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
+ *          not within the range of valid MSRs, caller must raise \#GP(0).
+ * @param   pVCpu       The cross context virtual CPU structure.
+ * @param   idMsr       The x2APIC MSR being read.
+ * @param   pu64Value   Where to store the read x2APIC MSR value (only valid when
+ *                      VINF_VMX_MODIFIES_BEHAVIOR is returned).
+ */
+IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value)
+{
+    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
+    Assert(pVmcs);
+    Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
+    Assert(pu64Value);
+
+    if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
+    {
+        /*
+         * Intel has different ideas in the x2APIC spec. vs the VT-x spec. as to
+         * what the end of the valid x2APIC MSR range is. Hence the use of different
+         * macros here.
+         *
+         * See Intel spec. 10.12.1.2 "x2APIC Register Address Space".
+         * See Intel spec. 29.5 "Virtualizing MSR-based APIC Accesses".
+         */
+        if (   idMsr >= VMX_V_VIRT_APIC_MSR_START
+            && idMsr <= VMX_V_VIRT_APIC_MSR_END)
+        {
+            uint16_t const offReg   = (idMsr & 0xff) << 4;
+            uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
+            *pu64Value = u64Value;
+            return VINF_VMX_MODIFIES_BEHAVIOR;
+        }
+        return VERR_OUT_OF_RANGE;
+    }
+
+    if (idMsr == MSR_IA32_X2APIC_TPR)
+    {
+        uint16_t const offReg   = (idMsr & 0xff) << 4;
+        uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
+        *pu64Value = u64Value;
+        return VINF_VMX_MODIFIES_BEHAVIOR;
+    }
+
+    return VINF_VMX_INTERCEPT_NOT_ACTIVE;
+}
+
+
+/**
+ * Virtualizes an MSR-based APIC write access.
+ *
+ * @returns VBox strict status code.
+ * @retval  VINF_VMX_MODIFIES_BEHAVIOR if the MSR write was virtualized.
+ * @retval  VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
+ *          not within the range of valid MSRs, caller must raise \#GP(0).
+ *
+ * @param   pVCpu       The cross context virtual CPU structure.
+ * @param   idMsr       The x2APIC MSR being written.
+ * @param   u64Value    The value of the x2APIC MSR being written.
+ */
+IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value)
+{
+    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
+    Assert(pVmcs);
+
+    /*
+     * Check if the access is to be virtualized.
+     * See Intel spec. 29.5 "Virtualizing MSR-based APIC Accesses".
+     */
+    if (   idMsr == MSR_IA32_X2APIC_TPR
+        || (   (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
+            && (   idMsr == MSR_IA32_X2APIC_EOI
+                || idMsr == MSR_IA32_X2APIC_SELF_IPI)))
+    {
+        /* Validate the MSR write depending on the register. */
+        switch (idMsr)
+        {
+            case MSR_IA32_X2APIC_TPR:
+            case MSR_IA32_X2APIC_SELF_IPI:
+            {
+                if (u64Value & UINT64_C(0xffffffffffffff00))
+                    return VERR_OUT_OF_RANGE;
+                break;
+            }
+            case MSR_IA32_X2APIC_EOI:
+            {
+                if (u64Value != 0)
+                    return VERR_OUT_OF_RANGE;
+                break;
+            }
+        }
+
+        /* Write the MSR to the virtual-APIC page. */
+        uint16_t const offReg = (idMsr & 0xff) << 4;
+        iemVmxVirtApicWriteRaw64(pVCpu, offReg, u64Value);
+
+        /*
+         * Record the currently updated APIC offset, as we need this later for figuring
+         * out whether to perform TPR, EOI or self-IPI virtualization as well as well
+         * as for supplying the exit qualification when causing an APIC-write VM-exit.
+         */
+        pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offReg;
+        VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_UPDATE_VAPIC);
+
+        return VINF_VMX_MODIFIES_BEHAVIOR;
+    }
+
+    return VINF_VMX_INTERCEPT_NOT_ACTIVE;
 }
 
