Index: /trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp	(revision 92452)
+++ /trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp	(revision 92453)
@@ -967,4 +967,6 @@
         APICSetTpr(pVCpu, u64Cr8);
     }
+    if (fWhat & CPUMCTX_EXTRN_XCRx)
+        READ_GREG(HV_X86_XCR0, pVCpu->cpum.GstCtx.aXcr[0]);
 
     /* Debug registers. */
@@ -1087,4 +1089,8 @@
         pVCpu->cpum.GstCtx.fExtrn = 0;
 
+#ifdef LOG_ENABLED
+    nemR3DarwinLogState(pVM, pVCpu);
+#endif
+
     /* Typical. */
     if (!fMaybeChangedMode && !fUpdateCr3)
@@ -1262,4 +1268,5 @@
 
 
+#if 0 /* unused */
 DECL_FORCE_INLINE(bool) nemR3DarwinIsVmxLbr(PCVMCC pVM)
 {
@@ -1267,4 +1274,5 @@
     return false;
 }
+#endif
 
 
@@ -1274,5 +1282,5 @@
 #define IN_NEM_DARWIN
 //#define HMVMX_ALWAYS_TRAP_ALL_XCPTS
-#define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
+//#define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
 #define VCPU_2_VMXSTATE(a_pVCpu)            (a_pVCpu)->nem.s
 #define VCPU_2_VMXSTATS(a_pVCpu)            (*(a_pVCpu)->nem.s.pVmxStats)
@@ -1539,4 +1547,8 @@
     RT_NOREF(pVM);
 
+#ifdef LOG_ENABLED
+    nemR3DarwinLogState(pVM, pVCpu);
+#endif
+
     uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL;
     if (!fWhat)
@@ -1571,6 +1583,15 @@
     AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
 
+    if (fWhat & CPUMCTX_EXTRN_XCRx)
+    {
+        WRITE_GREG(HV_X86_XCR0, pVCpu->cpum.GstCtx.aXcr[0]);
+        ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_XCRx);
+    }
+
     if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
+    {
         WRITE_GREG(HV_X86_TPR, CPUMGetGuestCR8(pVCpu));
+        ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
+    }
 
     /* Debug registers. */
@@ -1581,11 +1602,18 @@
         WRITE_GREG(HV_X86_DR2, pVCpu->cpum.GstCtx.dr[2]); // CPUMGetHyperDR2(pVCpu));
         WRITE_GREG(HV_X86_DR3, pVCpu->cpum.GstCtx.dr[3]); // CPUMGetHyperDR3(pVCpu));
+        ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_DR0_DR3);
     }
     if (fWhat & CPUMCTX_EXTRN_DR6)
+    {
         WRITE_GREG(HV_X86_DR6, pVCpu->cpum.GstCtx.dr[6]); // CPUMGetHyperDR6(pVCpu));
+        ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_DR6);
+    }
     if (fWhat & CPUMCTX_EXTRN_DR7)
+    {
         WRITE_GREG(HV_X86_DR7, pVCpu->cpum.GstCtx.dr[7]); // CPUMGetHyperDR7(pVCpu));
-
-    if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
+        ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_DR7);
+    }
+
+    if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE))
     {
         hv_return_t hrc = hv_vcpu_write_fpstate(pVCpu->nem.s.hVCpuId, &pVCpu->cpum.GstCtx.XState, sizeof(pVCpu->cpum.GstCtx.XState));
@@ -1594,11 +1622,19 @@
         else
             return nemR3DarwinHvSts2Rc(hrc);
+
+        ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~(HM_CHANGED_GUEST_X87 | HM_CHANGED_GUEST_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE));
     }
 
     /* MSRs */
     if (fWhat & CPUMCTX_EXTRN_EFER)
+    {
         WRITE_VMCS_FIELD(VMX_VMCS64_GUEST_EFER_FULL, pVCpu->cpum.GstCtx.msrEFER);
+        ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR);
+    }
     if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
+    {
         WRITE_MSR(MSR_K8_KERNEL_GS_BASE, pVCpu->cpum.GstCtx.msrKERNELGSBASE);
+        ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_KERNEL_GS_BASE);
+    }
     if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
     {
@@ -1606,4 +1642,5 @@
         WRITE_MSR(MSR_IA32_SYSENTER_EIP, pVCpu->cpum.GstCtx.SysEnter.eip);
         WRITE_MSR(MSR_IA32_SYSENTER_ESP, pVCpu->cpum.GstCtx.SysEnter.esp);
+        ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_MSR_MASK);
     }
     if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
@@ -1613,4 +1650,5 @@
         WRITE_MSR(MSR_K8_CSTAR, pVCpu->cpum.GstCtx.msrCSTAR);
         WRITE_MSR(MSR_K8_SF_MASK, pVCpu->cpum.GstCtx.msrSFMASK);
+        ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSCALL_MSRS);
     }
     if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
@@ -1619,4 +1657,6 @@
         if (RT_UNLIKELY(hrc != HV_SUCCESS))
             return nemR3DarwinHvSts2Rc(hrc);
+
+        ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_OTHER_MSRS);
 
 #if 0
@@ -1652,11 +1692,4 @@
     WRITE_VMCS_FIELD(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0 /*MSR_IA32_DEBUGCTL_LBR*/);
 
-#if 0 /** @todo */
-    WRITE_GREG(HV_X86_TSS_BASE, );
-    WRITE_GREG(HV_X86_TSS_LIMIT, );
-    WRITE_GREG(HV_X86_TSS_AR, );
-    WRITE_GREG(HV_X86_XCR0, );
-#endif
-
     hv_vcpu_invalidate_tlb(pVCpu->nem.s.hVCpuId);
     hv_vcpu_flush(pVCpu->nem.s.hVCpuId);
@@ -1665,15 +1698,9 @@
 
     /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
-    ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~(  (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP)
-                                                   |  HM_CHANGED_GUEST_CR2
-                                                   | (HM_CHANGED_GUEST_DR_MASK & ~HM_CHANGED_GUEST_DR7)
-                                                   |  HM_CHANGED_GUEST_X87
-                                                   |  HM_CHANGED_GUEST_SSE_AVX
-                                                   |  HM_CHANGED_GUEST_OTHER_XSAVE
-                                                   |  HM_CHANGED_GUEST_XCRx
-                                                   |  HM_CHANGED_GUEST_KERNEL_GS_BASE /* Part of lazy or auto load-store MSRs. */
-                                                   |  HM_CHANGED_GUEST_SYSCALL_MSRS   /* Part of lazy or auto load-store MSRs. */
-                                                   |  HM_CHANGED_GUEST_TSC_AUX
-                                                   |  HM_CHANGED_GUEST_OTHER_MSRS
+    ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~(
+                                                     HM_CHANGED_GUEST_TSC_AUX
+                                                   | HM_CHANGED_GUEST_HWVIRT
+                                                   | HM_CHANGED_VMX_GUEST_AUTO_MSRS
+                                                   | HM_CHANGED_VMX_GUEST_LAZY_MSRS
                                                    | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK)));
 
@@ -1708,8 +1735,7 @@
                                     VERR_NEM_IPE_0);
 
-    /** @todo Only copy the state on demand (requires changing to adhere to fCtxChanged from th VMX code
-     * flags instead of the fExtrn one living in CPUM.
-     */
-    rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, UINT64_MAX);
+    /** @todo Only copy the state on demand (the R0 VT-x code saves some stuff unconditionally and the VMX template assumes that
+     * when handling exits). */
+    rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
     AssertRCReturn(rc, rc);
 
@@ -1723,5 +1749,5 @@
 
 /**
- * Worker for nemR3NativeInit that loads the Hypervisor.framwork shared library.
+ * Worker for nemR3NativeInit that loads the Hypervisor.framework shared library.
  *
  * @returns VBox status code.
@@ -2389,5 +2415,5 @@
     if (pVM->nem.s.fCreatedAsid)
     {
-        hv_return_t hrc = hv_vcpu_set_space(pVCpu->nem.s.hVCpuId, pVM->nem.s.uVmAsid);
+        hrc = hv_vcpu_set_space(pVCpu->nem.s.hVCpuId, pVM->nem.s.uVmAsid);
         AssertReturn(hrc == HV_SUCCESS, VERR_NEM_VM_CREATE_FAILED);
     }
@@ -2476,5 +2502,5 @@
          */
         hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpuId);
-        Assert(hrc == HV_SUCCESS);
+        Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
 
         if (pVCpu->nem.s.pVmxStats)
@@ -2490,5 +2516,5 @@
     {
         hv_return_t hrc = hv_vm_space_destroy(pVM->nem.s.uVmAsid);
-        Assert(hrc == HV_SUCCESS);
+        Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
         pVM->nem.s.fCreatedAsid = false;
     }
@@ -2624,4 +2650,5 @@
                 pVCpu->nem.s.Event.fPending = false;
 
+                Assert(!pVCpu->nem.s.fCtxChanged);
                 hv_return_t hrc;
                 if (hv_vcpu_run_until)
@@ -2645,4 +2672,5 @@
                         break;
                     }
+                    //Assert(!pVCpu->cpum.GstCtx.fExtrn);
                 }
                 else
@@ -2771,4 +2799,6 @@
     LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
 
+    RT_NOREF(pVM, fFlags);
+
     hv_return_t hrc = hv_vcpu_interrupt(&pVCpu->nem.s.hVCpuId, 1);
     if (hrc != HV_SUCCESS)
@@ -2813,5 +2843,5 @@
                                                   void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
 {
-    RT_NOREF(pVM, puNemRange);
+    RT_NOREF(pVM, puNemRange, pvRam, fFlags);
 
     Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
@@ -3069,5 +3099,5 @@
     Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
           GCPhys, HCPhys, fPageProt, enmType, *pu2State));
-    RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
+    RT_NOREF(HCPhys, fPageProt, enmType);
 
     return nemHCJustUnmapPage(pVM, GCPhys, pu2State);
@@ -3106,4 +3136,5 @@
 VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
 {
+    LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat));
     STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
 
@@ -3163,5 +3194,8 @@
     hv_return_t hrc = hv_vm_sync_tsc(uPausedTscValue);
     if (RT_LIKELY(hrc == HV_SUCCESS))
+    {
+        ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_TSC_AUX);
         return VINF_SUCCESS;
+    }
 
     return nemR3DarwinHvSts2Rc(hrc);
@@ -3169,4 +3203,21 @@
 
 
+/**
+ * Returns features supported by the NEM backend.
+ *
+ * @returns Flags of features supported by the native NEM backend.
+ * @param   pVM             The cross context VM structure.
+ */
+VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
+{
+    RT_NOREF(pVM);
+    /*
+     * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging
+     * and unrestricted guest execution support so we can safely return these flags here always.
+     */
+    return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR;
+}
+
+
 /** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
  *
