Index: /trunk/include/VBox/vmm/hm.h
===================================================================
--- /trunk/include/VBox/vmm/hm.h	(revision 73616)
+++ /trunk/include/VBox/vmm/hm.h	(revision 73617)
@@ -127,4 +127,5 @@
  * @{ */
 VMMDECL(bool)                   HMIsEnabledNotMacro(PVM pVM);
+VMMDECL(bool)                   HMCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx);
 VMM_INT_DECL(int)               HMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt);
 VMM_INT_DECL(bool)              HMHasPendingIrq(PVM pVM);
@@ -145,4 +146,5 @@
 VMM_INT_DECL(int)               HMVmxGetHostMsr(PVM pVM, uint32_t idMsr, uint64_t *puValue);
 VMM_INT_DECL(const char *)      HMVmxGetInstrDiagDesc(VMXVINSTRDIAG enmInstrDiag);
+VMM_INT_DECL(bool)              HMVmxCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx);
 /** @} */
 
@@ -248,5 +250,4 @@
 VMMR3_INT_DECL(void)            HMR3ResetCpu(PVMCPU pVCpu);
 VMMR3_INT_DECL(void)            HMR3CheckError(PVM pVM, int iStatusCode);
-VMMR3DECL(bool)                 HMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx);
 VMMR3_INT_DECL(void)            HMR3NotifyDebugEventChanged(PVM pVM);
 VMMR3_INT_DECL(void)            HMR3NotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu);
Index: /trunk/src/VBox/VMM/VMMAll/HMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HMAll.cpp	(revision 73616)
+++ /trunk/src/VBox/VMM/VMMAll/HMAll.cpp	(revision 73617)
@@ -56,4 +56,39 @@
 
 /**
+ * Checks if the guest is in a suitable state for hardware-assisted execution.
+ *
+ * @returns @c true if it is suitable, @c false otherwise.
+ * @param   pVCpu   The cross context virtual CPU structure.
+ * @param   pCtx    Pointer to the guest CPU context.
+ *
+ * @remarks @a pCtx can be a partial context created and not necessarily the same as
+ *          pVCpu->cpum.GstCtx.
+ */
+VMMDECL(bool) HMCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx)
+{
+    PVM pVM = pVCpu->CTX_SUFF(pVM);
+    Assert(HMIsEnabled(pVM));
+
+#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
+    if (   CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
+        || CPUMIsGuestVmxEnabled(pCtx))
+    {
+        LogFunc(("In nested-guest mode - returning false"));
+        return false;
+    }
+#endif
+
+    /* AMD-V supports real & protected mode with or without paging. */
+    if (pVM->hm.s.svm.fEnabled)
+    {
+        pVCpu->hm.s.fActive = true;
+        return true;
+    }
+
+    return HMVmxCanExecuteGuest(pVCpu, pCtx);
+}
+
+
+/**
  * Queues a guest page for invalidation.
  *
Index: /trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp	(revision 73616)
+++ /trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp	(revision 73617)
@@ -24,4 +24,5 @@
 #include "HMInternal.h"
 #include <VBox/vmm/vm.h>
+#include <VBox/vmm/pdmapi.h>
 
 
@@ -166,2 +167,336 @@
 }
 
+
+/**
+ * Checks if a code selector (CS) is suitable for execution using hardware-assisted
+ * VMX when unrestricted execution isn't available.
+ *
+ * @returns true if selector is suitable for VMX, otherwise
+ *        false.
+ * @param   pSel        Pointer to the selector to check (CS).
+ * @param   uStackDpl   The CPL, aka the DPL of the stack segment.
+ */
+static bool hmVmxIsCodeSelectorOk(PCCPUMSELREG pSel, unsigned uStackDpl)
+{
+    /*
+     * Segment must be an accessed code segment, it must be present and it must
+     * be usable.
+     * Note! These are all standard requirements and if CS holds anything else
+     *       we've got buggy code somewhere!
+     */
+    AssertCompile(X86DESCATTR_TYPE == 0xf);
+    AssertMsgReturn(   (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P | X86DESCATTR_UNUSABLE))
+                    ==                 (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P),
+                    ("%#x\n", pSel->Attr.u),
+                    false);
+
+    /* For conforming segments, CS.DPL must be <= SS.DPL, while CS.DPL
+       must equal SS.DPL for non-confroming segments.
+       Note! This is also a hard requirement like above. */
+    AssertMsgReturn(  pSel->Attr.n.u4Type & X86_SEL_TYPE_CONF
+                    ? pSel->Attr.n.u2Dpl <= uStackDpl
+                    : pSel->Attr.n.u2Dpl == uStackDpl,
+                    ("u4Type=%#x u2Dpl=%u uStackDpl=%u\n", pSel->Attr.n.u4Type, pSel->Attr.n.u2Dpl, uStackDpl),
+                    false);
+
+    /*
+     * The following two requirements are VT-x specific:
+     *  - G bit must be set if any high limit bits are set.
+     *  - G bit must be clear if any low limit bits are clear.
+     */
+    if (   ((pSel->u32Limit & 0xfff00000) == 0x00000000 ||  pSel->Attr.n.u1Granularity)
+        && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
+        return true;
+    return false;
+}
+
+
+/**
+ * Checks if a data selector (DS/ES/FS/GS) is suitable for execution using
+ * hardware-assisted VMX when unrestricted execution isn't available.
+ *
+ * @returns true if selector is suitable for VMX, otherwise
+ *        false.
+ * @param   pSel        Pointer to the selector to check
+ *                      (DS/ES/FS/GS).
+ */
+static bool hmVmxIsDataSelectorOk(PCCPUMSELREG pSel)
+{
+    /*
+     * Unusable segments are OK.  These days they should be marked as such, as
+     * but as an alternative we for old saved states and AMD<->VT-x migration
+     * we also treat segments with all the attributes cleared as unusable.
+     */
+    if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
+        return true;
+
+    /** @todo tighten these checks. Will require CPUM load adjusting. */
+
+    /* Segment must be accessed. */
+    if (pSel->Attr.u & X86_SEL_TYPE_ACCESSED)
+    {
+        /* Code segments must also be readable. */
+        if (  !(pSel->Attr.u & X86_SEL_TYPE_CODE)
+            || (pSel->Attr.u & X86_SEL_TYPE_READ))
+        {
+            /* The S bit must be set. */
+            if (pSel->Attr.n.u1DescType)
+            {
+                /* Except for conforming segments, DPL >= RPL. */
+                if (   pSel->Attr.n.u2Dpl  >= (pSel->Sel & X86_SEL_RPL)
+                    || pSel->Attr.n.u4Type >= X86_SEL_TYPE_ER_ACC)
+                {
+                    /* Segment must be present. */
+                    if (pSel->Attr.n.u1Present)
+                    {
+                        /*
+                         * The following two requirements are VT-x specific:
+                         *   - G bit must be set if any high limit bits are set.
+                         *   - G bit must be clear if any low limit bits are clear.
+                         */
+                        if (   ((pSel->u32Limit & 0xfff00000) == 0x00000000 ||  pSel->Attr.n.u1Granularity)
+                            && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
+                            return true;
+                    }
+                }
+            }
+        }
+    }
+
+    return false;
+}
+
+
+/**
+ * Checks if the stack selector (SS) is suitable for execution using
+ * hardware-assisted VMX when unrestricted execution isn't available.
+ *
+ * @returns true if selector is suitable for VMX, otherwise
+ *        false.
+ * @param   pSel        Pointer to the selector to check (SS).
+ */
+static bool hmVmxIsStackSelectorOk(PCCPUMSELREG pSel)
+{
+    /*
+     * Unusable segments are OK.  These days they should be marked as such, as
+     * but as an alternative we for old saved states and AMD<->VT-x migration
+     * we also treat segments with all the attributes cleared as unusable.
+     */
+    /** @todo r=bird: actually all zeroes isn't gonna cut it... SS.DPL == CPL. */
+    if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
+        return true;
+
+    /*
+     * Segment must be an accessed writable segment, it must be present.
+     * Note! These are all standard requirements and if SS holds anything else
+     *       we've got buggy code somewhere!
+     */
+    AssertCompile(X86DESCATTR_TYPE == 0xf);
+    AssertMsgReturn(   (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P | X86_SEL_TYPE_CODE))
+                    ==                 (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P),
+                    ("%#x\n", pSel->Attr.u), false);
+
+    /* DPL must equal RPL.
+       Note! This is also a hard requirement like above. */
+    AssertMsgReturn(pSel->Attr.n.u2Dpl == (pSel->Sel & X86_SEL_RPL),
+                    ("u2Dpl=%u Sel=%#x\n", pSel->Attr.n.u2Dpl, pSel->Sel), false);
+
+    /*
+     * The following two requirements are VT-x specific:
+     *   - G bit must be set if any high limit bits are set.
+     *   - G bit must be clear if any low limit bits are clear.
+     */
+    if (   ((pSel->u32Limit & 0xfff00000) == 0x00000000 ||  pSel->Attr.n.u1Granularity)
+        && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
+        return true;
+    return false;
+}
+
+
+/**
+ * Checks if the guest is in a suitable state for hardware-assisted VMX execution.
+ *
+ * @returns @c true if it is suitable, @c false otherwise.
+ * @param   pVCpu   The cross context virtual CPU structure.
+ * @param   pCtx    Pointer to the guest CPU context.
+ *
+ * @remarks @a pCtx can be a partial context and thus may not be necessarily the
+ *          same as pVCpu->cpum.GstCtx! Thus don't eliminate the @a pCtx parameter.
+ *          Secondly, if additional checks are added that require more of the CPU
+ *          state, make sure REM (which supplies a partial state) is updated.
+ */
+VMM_INT_DECL(bool) HMVmxCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx)
+{
+    PVM pVM = pVCpu->CTX_SUFF(pVM);
+    Assert(HMIsEnabled(pVM));
+    Assert(!CPUMIsGuestVmxEnabled(pCtx));
+    Assert(   ( pVM->hm.s.vmx.fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS)
+           || (!pVM->hm.s.vmx.fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS));
+
+    pVCpu->hm.s.fActive = false;
+
+    bool const fSupportsRealMode = pVM->hm.s.vmx.fUnrestrictedGuest || PDMVmmDevHeapIsEnabled(pVM);
+    if (!pVM->hm.s.vmx.fUnrestrictedGuest)
+    {
+        /*
+         * The VMM device heap is a requirement for emulating real mode or protected mode without paging with the unrestricted
+         * guest execution feature is missing (VT-x only).
+         */
+        if (fSupportsRealMode)
+        {
+            if (CPUMIsGuestInRealModeEx(pCtx))
+            {
+                /*
+                 * In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector
+                 * bases and limits, i.e. limit must be 64K and base must be selector * 16.
+                 * If this is not true, we cannot execute real mode as V86 and have to fall
+                 * back to emulation.
+                 */
+                if (   pCtx->cs.Sel != (pCtx->cs.u64Base >> 4)
+                    || pCtx->ds.Sel != (pCtx->ds.u64Base >> 4)
+                    || pCtx->es.Sel != (pCtx->es.u64Base >> 4)
+                    || pCtx->ss.Sel != (pCtx->ss.u64Base >> 4)
+                    || pCtx->fs.Sel != (pCtx->fs.u64Base >> 4)
+                    || pCtx->gs.Sel != (pCtx->gs.u64Base >> 4))
+                {
+                    STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelBase);
+                    return false;
+                }
+                if (   (pCtx->cs.u32Limit != 0xffff)
+                    || (pCtx->ds.u32Limit != 0xffff)
+                    || (pCtx->es.u32Limit != 0xffff)
+                    || (pCtx->ss.u32Limit != 0xffff)
+                    || (pCtx->fs.u32Limit != 0xffff)
+                    || (pCtx->gs.u32Limit != 0xffff))
+                {
+                    STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelLimit);
+                    return false;
+                }
+                STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckRmOk);
+            }
+            else
+            {
+                /*
+                 * Verify the requirements for executing code in protected mode. VT-x can't
+                 * handle the CPU state right after a switch from real to protected mode
+                 * (all sorts of RPL & DPL assumptions).
+                 */
+                if (pVCpu->hm.s.vmx.fWasInRealMode)
+                {
+                    /** @todo If guest is in V86 mode, these checks should be different! */
+                    if ((pCtx->cs.Sel & X86_SEL_RPL) != (pCtx->ss.Sel & X86_SEL_RPL))
+                    {
+                        STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRpl);
+                        return false;
+                    }
+                    if (   !hmVmxIsCodeSelectorOk(&pCtx->cs, pCtx->ss.Attr.n.u2Dpl)
+                        || !hmVmxIsDataSelectorOk(&pCtx->ds)
+                        || !hmVmxIsDataSelectorOk(&pCtx->es)
+                        || !hmVmxIsDataSelectorOk(&pCtx->fs)
+                        || !hmVmxIsDataSelectorOk(&pCtx->gs)
+                        || !hmVmxIsStackSelectorOk(&pCtx->ss))
+                    {
+                        STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel);
+                        return false;
+                    }
+                }
+                /* VT-x also chokes on invalid TR or LDTR selectors (minix). */
+                if (pCtx->gdtr.cbGdt)
+                {
+                    if ((pCtx->tr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
+                    {
+                        STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadTr);
+                        return false;
+                    }
+                    else if ((pCtx->ldtr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
+                    {
+                        STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadLdt);
+                        return false;
+                    }
+                }
+                STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckPmOk);
+            }
+        }
+        else
+        {
+            if (   !CPUMIsGuestInLongModeEx(pCtx)
+                && !pVM->hm.s.vmx.fUnrestrictedGuest)
+            {
+                if (   !pVM->hm.s.fNestedPaging        /* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */
+                    ||  CPUMIsGuestInRealModeEx(pCtx)) /* Requires a fake TSS for real mode - stored in the VMM device heap */
+                    return false;
+
+                /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
+                if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr.Sel == 0)
+                    return false;
+
+                /*
+                 * The guest is about to complete the switch to protected mode. Wait a bit longer.
+                 * Windows XP; switch to protected mode; all selectors are marked not present
+                 * in the hidden registers (possible recompiler bug; see load_seg_vm).
+                 */
+                /** @todo Is this supposed recompiler bug still relevant with IEM? */
+                if (pCtx->cs.Attr.n.u1Present == 0)
+                    return false;
+                if (pCtx->ss.Attr.n.u1Present == 0)
+                    return false;
+
+                /*
+                 * Windows XP: possible same as above, but new recompiler requires new
+                 * heuristics? VT-x doesn't seem to like something about the guest state and
+                 * this stuff avoids it.
+                 */
+                /** @todo This check is actually wrong, it doesn't take the direction of the
+                 *        stack segment into account. But, it does the job for now. */
+                if (pCtx->rsp >= pCtx->ss.u32Limit)
+                    return false;
+            }
+        }
+    }
+
+    if (pVM->hm.s.vmx.fEnabled)
+    {
+        uint32_t uCr0Mask;
+
+        /* If bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
+        uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr0Fixed0;
+
+        /* We ignore the NE bit here on purpose; see HMR0.cpp for details. */
+        uCr0Mask &= ~X86_CR0_NE;
+
+        if (fSupportsRealMode)
+        {
+            /* We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
+            uCr0Mask &= ~(X86_CR0_PG|X86_CR0_PE);
+        }
+        else
+        {
+            /* We support protected mode without paging using identity mapping. */
+            uCr0Mask &= ~X86_CR0_PG;
+        }
+        if ((pCtx->cr0 & uCr0Mask) != uCr0Mask)
+            return false;
+
+        /* If bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
+        uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;
+        if ((pCtx->cr0 & uCr0Mask) != 0)
+            return false;
+
+        /* If bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
+        uCr0Mask  = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr4Fixed0;
+        uCr0Mask &= ~X86_CR4_VMXE;
+        if ((pCtx->cr4 & uCr0Mask) != uCr0Mask)
+            return false;
+
+        /* If bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
+        uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;
+        if ((pCtx->cr4 & uCr0Mask) != 0)
+            return false;
+
+        pVCpu->hm.s.fActive = true;
+        return true;
+    }
+
+    return false;
+}
+
Index: /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 73616)
+++ /trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp	(revision 73617)
@@ -4553,5 +4553,5 @@
         {
             Assert(pVM->hm.s.vmx.pRealModeTSS);
-            Assert(PDMVmmDevHeapIsEnabled(pVM));    /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
+            Assert(PDMVmmDevHeapIsEnabled(pVM));    /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
 
             /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
@@ -8272,7 +8272,5 @@
     if (   !pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest
         &&  CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx))
-    {
         pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
-    }
 
     /*
@@ -13171,5 +13169,4 @@
     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
 
-    int rc;
     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
@@ -13181,8 +13178,8 @@
 #endif
         /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
-        rc  = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
-        rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
-        rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
-        rc |= hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
+        int rc  = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
+        rc     |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
+        rc     |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
+        rc     |= hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
         AssertRCReturn(rc, rc);
         Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pCtx->cs.Sel, pCtx->rip,
@@ -13196,282 +13193,37 @@
     Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
 
-    /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
-    rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
+    int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
     AssertRCReturn(rc, rc);
 
-    PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
-    uint32_t cbOp     = 0;
-    PVM pVM           = pVCpu->CTX_SUFF(pVM);
-    bool fDbgStepping = pVCpu->hm.s.fSingleInstruction;
-    rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
-    if (RT_SUCCESS(rc))
-    {
-        rc = VINF_SUCCESS;
-        Assert(cbOp == pDis->cbInstr);
-        Log4Func(("Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pCtx->cs.Sel, pCtx->rip));
-        switch (pDis->pCurInstr->uOpcode)
-        {
-            case OP_CLI:
+    VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
+    if (rcStrict == VINF_SUCCESS)
+    {
+        if (!CPUMIsGuestInRealModeEx(pCtx))
+        {
+            /*
+             * The guest is no longer in real-mode, check if we can continue executing the
+             * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
+             */
+            if (HMVmxCanExecuteGuest(pVCpu, pCtx))
             {
-                pCtx->eflags.Bits.u1IF = 0;
-                pCtx->eflags.Bits.u1RF = 0;
-                pCtx->rip += pDis->cbInstr;
-                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
-                if (   !fDbgStepping
-                    && pCtx->eflags.Bits.u1TF)
-                {
-                    rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
-                    AssertRCReturn(rc, rc);
-                }
-                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
-                break;
+                Log4Func(("Mode changed but guest still suitable for executing using VT-x\n"));
+                pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
+                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
             }
-
-            case OP_STI:
+            else
             {
-                bool fOldIF = pCtx->eflags.Bits.u1IF;
-                pCtx->eflags.Bits.u1IF = 1;
-                pCtx->eflags.Bits.u1RF = 0;
-                pCtx->rip += pDis->cbInstr;
-                if (!fOldIF)
-                {
-                    EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
-                    Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
-                }
-                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
-                if (   !fDbgStepping
-                    && pCtx->eflags.Bits.u1TF)
-                {
-                    rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
-                    AssertRCReturn(rc, rc);
-                }
-                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
-                break;
+                Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
+                rcStrict = VINF_EM_RESCHEDULE;
             }
-
-            case OP_HLT:
-            {
-                rc = VINF_EM_HALT;
-                pCtx->rip += pDis->cbInstr;
-                pCtx->eflags.Bits.u1RF = 0;
-                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
-                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
-                break;
-            }
-
-            case OP_POPF:
-            {
-                Log4Func(("POPF CS:EIP %04x:%04RX64\n", pCtx->cs.Sel, pCtx->rip));
-                uint32_t cbParm;
-                uint32_t uMask;
-                bool     fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
-                if (pDis->fPrefix & DISPREFIX_OPSIZE)
-                {
-                    cbParm = 4;
-                    uMask  = 0xffffffff;
-                }
-                else
-                {
-                    cbParm = 2;
-                    uMask  = 0xffff;
-                }
-
-                /* Get the stack pointer & pop the contents of the stack onto Eflags. */
-                RTGCPTR   GCPtrStack = 0;
-                X86EFLAGS Eflags;
-                Eflags.u32 = 0;
-                rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
-                                  &GCPtrStack);
-                if (RT_SUCCESS(rc))
-                {
-                    Assert(sizeof(Eflags.u32) >= cbParm);
-                    rc = VBOXSTRICTRC_TODO(PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u32, cbParm, PGMACCESSORIGIN_HM));
-                    AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc)); /** @todo allow strict return codes here */
-                }
-                if (RT_FAILURE(rc))
-                {
-                    rc = VERR_EM_INTERPRETER;
-                    break;
-                }
-                Log4Func(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pCtx->rsp, uMask, pCtx->rip));
-                pCtx->eflags.u32 = (pCtx->eflags.u32 & ~((X86_EFL_POPF_BITS & uMask) | X86_EFL_RF))
-                                 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask);
-                pCtx->esp += cbParm;
-                pCtx->esp &= uMask;
-                pCtx->rip += pDis->cbInstr;
-                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
-                /* Generate a pending-debug exception when the guest stepping over POPF regardless of how
-                   POPF restores EFLAGS.TF. */
-                if (  !fDbgStepping
-                    && fGstStepping)
-                {
-                    rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
-                    AssertRCReturn(rc, rc);
-                }
-                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
-                break;
-            }
-
-            case OP_PUSHF:
-            {
-                uint32_t cbParm;
-                uint32_t uMask;
-                if (pDis->fPrefix & DISPREFIX_OPSIZE)
-                {
-                    cbParm = 4;
-                    uMask  = 0xffffffff;
-                }
-                else
-                {
-                    cbParm = 2;
-                    uMask  = 0xffff;
-                }
-
-                /* Get the stack pointer & push the contents of eflags onto the stack. */
-                RTGCPTR GCPtrStack = 0;
-                rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), (pCtx->esp - cbParm) & uMask,
-                                  SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
-                if (RT_FAILURE(rc))
-                {
-                    rc = VERR_EM_INTERPRETER;
-                    break;
-                }
-                X86EFLAGS Eflags = pCtx->eflags;
-                /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
-                Eflags.Bits.u1RF = 0;
-                Eflags.Bits.u1VM = 0;
-
-                rc = VBOXSTRICTRC_TODO(PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u, cbParm, PGMACCESSORIGIN_HM));
-                if (RT_UNLIKELY(rc != VINF_SUCCESS))
-                {
-                    AssertMsgFailed(("%Rrc\n", rc)); /** @todo allow strict return codes here */
-                    rc = VERR_EM_INTERPRETER;
-                    break;
-                }
-                Log4Func(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack));
-                pCtx->esp -= cbParm;
-                pCtx->esp &= uMask;
-                pCtx->rip += pDis->cbInstr;
-                pCtx->eflags.Bits.u1RF = 0;
-                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
-                if (  !fDbgStepping
-                    && pCtx->eflags.Bits.u1TF)
-                {
-                    rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
-                    AssertRCReturn(rc, rc);
-                }
-                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
-                break;
-            }
-
-            case OP_IRET:
-            {
-                /** @todo Handle 32-bit operand sizes and check stack limits. See Intel
-                 *        instruction reference. */
-                RTGCPTR  GCPtrStack    = 0;
-                uint32_t uMask         = 0xffff;
-                bool     fGstStepping  = RT_BOOL(pCtx->eflags.Bits.u1TF);
-                uint16_t aIretFrame[3];
-                if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
-                {
-                    rc = VERR_EM_INTERPRETER;
-                    break;
-                }
-                rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
-                                  &GCPtrStack);
-                if (RT_SUCCESS(rc))
-                {
-                    rc = VBOXSTRICTRC_TODO(PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame),
-                                                       PGMACCESSORIGIN_HM));
-                    AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc)); /** @todo allow strict return codes here */
-                }
-                if (RT_FAILURE(rc))
-                {
-                    rc = VERR_EM_INTERPRETER;
-                    break;
-                }
-                pCtx->eip                = 0;
-                pCtx->ip                 = aIretFrame[0];
-                pCtx->cs.Sel             = aIretFrame[1];
-                pCtx->cs.ValidSel        = aIretFrame[1];
-                pCtx->cs.u64Base         = (uint64_t)pCtx->cs.Sel << 4;
-                pCtx->eflags.u32         = (pCtx->eflags.u32 & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF))
-                                         | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
-                pCtx->sp                += sizeof(aIretFrame);
-                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
-                                                         | HM_CHANGED_GUEST_CS);
-                /* Generate a pending-debug exception when stepping over IRET regardless of how IRET modifies EFLAGS.TF. */
-                if (   !fDbgStepping
-                    && fGstStepping)
-                {
-                    rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
-                    AssertRCReturn(rc, rc);
-                }
-                Log4Func(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pCtx->cs.Sel, pCtx->ip));
-                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
-                break;
-            }
-
-            case OP_INT:
-            {
-                uint16_t uVector = pDis->Param1.uValue & 0xff;
-                hmR0VmxSetPendingIntN(pVCpu, uVector, pDis->cbInstr);
-                /* INT clears EFLAGS.TF, we must not set any pending debug exceptions here. */
-                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
-                break;
-            }
-
-            case OP_INTO:
-            {
-                if (pCtx->eflags.Bits.u1OF)
-                {
-                    hmR0VmxSetPendingXcptOF(pVCpu, pDis->cbInstr);
-                    /* INTO clears EFLAGS.TF, we must not set any pending debug exceptions here. */
-                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
-                }
-                else
-                {
-                    pCtx->eflags.Bits.u1RF = 0;
-                    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
-                }
-                break;
-            }
-
-            default:
-            {
-                pCtx->eflags.Bits.u1RF = 0; /* This is correct most of the time... */
-                VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pCtx), 0 /* pvFault */,
-                                                                    EMCODETYPE_SUPERVISOR);
-                rc = VBOXSTRICTRC_VAL(rc2);
-                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
-                /** @todo We have to set pending-debug exceptions here when the guest is
-                 *        single-stepping depending on the instruction that was interpreted. */
-
-                /*
-                 * HACK ALERT! Detect mode change and go to ring-3 to properly exit this
-                 *             real mode emulation stuff.
-                 */
-                if (   rc == VINF_SUCCESS
-                    && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
-                {
-                    Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
-                    /** @todo Exit fRealOnV86Active here w/o dropping back to ring-3. */
-                    rc = VINF_EM_RESCHEDULE;
-                }
-
-                Log4Func(("#GP rc=%Rrc\n", rc));
-                break;
-            }
-        }
-    }
-    else
-        rc = VERR_EM_INTERPRETER;
-
-    AssertMsg(   rc == VINF_SUCCESS
-              || rc == VERR_EM_INTERPRETER
-              || rc == VINF_EM_HALT
-              || rc == VINF_EM_RESCHEDULE
-              , ("#GP Unexpected rc=%Rrc\n", rc));
-    return rc;
+        }
+        else
+            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
+    }
+    else if (rcStrict == VINF_IEM_RAISED_XCPT)
+    {
+        rcStrict = VINF_SUCCESS;
+        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
+    }
+    return VBOXSTRICTRC_VAL(rcStrict);
 }
 
Index: /trunk/src/VBox/VMM/VMMR3/EM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/EM.cpp	(revision 73616)
+++ /trunk/src/VBox/VMM/VMMR3/EM.cpp	(revision 73617)
@@ -1451,5 +1451,5 @@
             if (VM_IS_HM_ENABLED(pVM))
             {
-                if (HMR3CanExecuteGuest(pVM, &pVCpu->cpum.GstCtx))
+                if (HMCanExecuteGuest(pVCpu, &pVCpu->cpum.GstCtx))
                     return EMSTATE_HM;
             }
Index: /trunk/src/VBox/VMM/VMMR3/EMHM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/EMHM.cpp	(revision 73616)
+++ /trunk/src/VBox/VMM/VMMR3/EMHM.cpp	(revision 73617)
@@ -86,5 +86,5 @@
     Assert(!(fFlags & ~EM_ONE_INS_FLAGS_MASK));
 
-    if (!HMR3CanExecuteGuest(pVM, &pVCpu->cpum.GstCtx))
+    if (!HMCanExecuteGuest(pVCpu, &pVCpu->cpum.GstCtx))
         return VINF_EM_RESCHEDULE;
 
Index: /trunk/src/VBox/VMM/VMMR3/HM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/HM.cpp	(revision 73616)
+++ /trunk/src/VBox/VMM/VMMR3/HM.cpp	(revision 73617)
@@ -2790,352 +2790,4 @@
 
 /**
- * Checks if a code selector (CS) is suitable for execution
- * within VMX when unrestricted execution isn't available.
- *
- * @returns true if selector is suitable for VMX, otherwise
- *        false.
- * @param   pSel        Pointer to the selector to check (CS).
- * @param   uStackDpl   The CPL, aka the DPL of the stack segment.
- */
-static bool hmR3IsCodeSelectorOkForVmx(PCPUMSELREG pSel, unsigned uStackDpl)
-{
-    /*
-     * Segment must be an accessed code segment, it must be present and it must
-     * be usable.
-     * Note! These are all standard requirements and if CS holds anything else
-     *       we've got buggy code somewhere!
-     */
-    AssertCompile(X86DESCATTR_TYPE == 0xf);
-    AssertMsgReturn(   (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P | X86DESCATTR_UNUSABLE))
-                    ==                 (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P),
-                    ("%#x\n", pSel->Attr.u),
-                    false);
-
-    /* For conforming segments, CS.DPL must be <= SS.DPL, while CS.DPL
-       must equal SS.DPL for non-confroming segments.
-       Note! This is also a hard requirement like above. */
-    AssertMsgReturn(  pSel->Attr.n.u4Type & X86_SEL_TYPE_CONF
-                    ? pSel->Attr.n.u2Dpl <= uStackDpl
-                    : pSel->Attr.n.u2Dpl == uStackDpl,
-                    ("u4Type=%#x u2Dpl=%u uStackDpl=%u\n", pSel->Attr.n.u4Type, pSel->Attr.n.u2Dpl, uStackDpl),
-                    false);
-
-    /*
-     * The following two requirements are VT-x specific:
-     *  - G bit must be set if any high limit bits are set.
-     *  - G bit must be clear if any low limit bits are clear.
-     */
-    if (   ((pSel->u32Limit & 0xfff00000) == 0x00000000 ||  pSel->Attr.n.u1Granularity)
-        && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
-        return true;
-    return false;
-}
-
-
-/**
- * Checks if a data selector (DS/ES/FS/GS) is suitable for
- * execution within VMX when unrestricted execution isn't
- * available.
- *
- * @returns true if selector is suitable for VMX, otherwise
- *        false.
- * @param   pSel        Pointer to the selector to check
- *                      (DS/ES/FS/GS).
- */
-static bool hmR3IsDataSelectorOkForVmx(PCPUMSELREG pSel)
-{
-    /*
-     * Unusable segments are OK.  These days they should be marked as such, as
-     * but as an alternative we for old saved states and AMD<->VT-x migration
-     * we also treat segments with all the attributes cleared as unusable.
-     */
-    if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
-        return true;
-
-    /** @todo tighten these checks. Will require CPUM load adjusting. */
-
-    /* Segment must be accessed. */
-    if (pSel->Attr.u & X86_SEL_TYPE_ACCESSED)
-    {
-        /* Code segments must also be readable. */
-        if (  !(pSel->Attr.u & X86_SEL_TYPE_CODE)
-            || (pSel->Attr.u & X86_SEL_TYPE_READ))
-        {
-            /* The S bit must be set. */
-            if (pSel->Attr.n.u1DescType)
-            {
-                /* Except for conforming segments, DPL >= RPL. */
-                if (   pSel->Attr.n.u2Dpl  >= (pSel->Sel & X86_SEL_RPL)
-                    || pSel->Attr.n.u4Type >= X86_SEL_TYPE_ER_ACC)
-                {
-                    /* Segment must be present. */
-                    if (pSel->Attr.n.u1Present)
-                    {
-                        /*
-                         * The following two requirements are VT-x specific:
-                         *   - G bit must be set if any high limit bits are set.
-                         *   - G bit must be clear if any low limit bits are clear.
-                         */
-                        if (   ((pSel->u32Limit & 0xfff00000) == 0x00000000 ||  pSel->Attr.n.u1Granularity)
-                            && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
-                            return true;
-                    }
-                }
-            }
-        }
-    }
-
-    return false;
-}
-
-
-/**
- * Checks if the stack selector (SS) is suitable for execution
- * within VMX when unrestricted execution isn't available.
- *
- * @returns true if selector is suitable for VMX, otherwise
- *        false.
- * @param   pSel        Pointer to the selector to check (SS).
- */
-static bool hmR3IsStackSelectorOkForVmx(PCPUMSELREG pSel)
-{
-    /*
-     * Unusable segments are OK.  These days they should be marked as such, as
-     * but as an alternative we for old saved states and AMD<->VT-x migration
-     * we also treat segments with all the attributes cleared as unusable.
-     */
-    /** @todo r=bird: actually all zeroes isn't gonna cut it... SS.DPL == CPL. */
-    if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
-        return true;
-
-    /*
-     * Segment must be an accessed writable segment, it must be present.
-     * Note! These are all standard requirements and if SS holds anything else
-     *       we've got buggy code somewhere!
-     */
-    AssertCompile(X86DESCATTR_TYPE == 0xf);
-    AssertMsgReturn(   (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P | X86_SEL_TYPE_CODE))
-                    ==                 (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P),
-                    ("%#x\n", pSel->Attr.u), false);
-
-    /* DPL must equal RPL.
-       Note! This is also a hard requirement like above. */
-    AssertMsgReturn(pSel->Attr.n.u2Dpl == (pSel->Sel & X86_SEL_RPL),
-                    ("u2Dpl=%u Sel=%#x\n", pSel->Attr.n.u2Dpl, pSel->Sel), false);
-
-    /*
-     * The following two requirements are VT-x specific:
-     *   - G bit must be set if any high limit bits are set.
-     *   - G bit must be clear if any low limit bits are clear.
-     */
-    if (   ((pSel->u32Limit & 0xfff00000) == 0x00000000 ||  pSel->Attr.n.u1Granularity)
-        && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
-        return true;
-    return false;
-}
-
-
-/**
- * Checks if we can currently use hardware accelerated mode.
- *
- * @returns true if we can currently use hardware acceleration, otherwise false.
- * @param   pVM         The cross context VM structure.
- * @param   pCtx        Pointer to the guest CPU context.
- */
-VMMR3DECL(bool) HMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx)
-{
-    PVMCPU pVCpu = VMMGetCpu(pVM);
-
-    Assert(HMIsEnabled(pVM));
-
-#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
-    if (   CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
-        || CPUMIsGuestVmxEnabled(pCtx))
-    {
-        Log(("HMR3CanExecuteGuest: In nested-guest mode - returning false"));
-        return false;
-    }
-#endif
-
-    /* AMD-V supports real & protected mode with or without paging. */
-    if (pVM->hm.s.svm.fEnabled)
-    {
-        pVCpu->hm.s.fActive = true;
-        return true;
-    }
-
-    pVCpu->hm.s.fActive = false;
-
-    /* Note! The context supplied by REM is partial. If we add more checks here, be sure to verify that REM provides this info! */
-    Assert(   (pVM->hm.s.vmx.fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS)
-           || (!pVM->hm.s.vmx.fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS));
-
-    bool fSupportsRealMode = pVM->hm.s.vmx.fUnrestrictedGuest || PDMVmmDevHeapIsEnabled(pVM);
-    if (!pVM->hm.s.vmx.fUnrestrictedGuest)
-    {
-        /*
-         * The VMM device heap is a requirement for emulating real mode or protected mode without paging with the unrestricted
-         * guest execution feature is missing (VT-x only).
-         */
-        if (fSupportsRealMode)
-        {
-            if (CPUMIsGuestInRealModeEx(pCtx))
-            {
-                /*
-                 * In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector
-                 * bases and limits, i.e. limit must be 64K and base must be selector * 16.
-                 * If this is not true, we cannot execute real mode as V86 and have to fall
-                 * back to emulation.
-                 */
-                if (   pCtx->cs.Sel != (pCtx->cs.u64Base >> 4)
-                    || pCtx->ds.Sel != (pCtx->ds.u64Base >> 4)
-                    || pCtx->es.Sel != (pCtx->es.u64Base >> 4)
-                    || pCtx->ss.Sel != (pCtx->ss.u64Base >> 4)
-                    || pCtx->fs.Sel != (pCtx->fs.u64Base >> 4)
-                    || pCtx->gs.Sel != (pCtx->gs.u64Base >> 4))
-                {
-                    STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelBase);
-                    return false;
-                }
-                if (   (pCtx->cs.u32Limit != 0xffff)
-                    || (pCtx->ds.u32Limit != 0xffff)
-                    || (pCtx->es.u32Limit != 0xffff)
-                    || (pCtx->ss.u32Limit != 0xffff)
-                    || (pCtx->fs.u32Limit != 0xffff)
-                    || (pCtx->gs.u32Limit != 0xffff))
-                {
-                    STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelLimit);
-                    return false;
-                }
-                STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckRmOk);
-            }
-            else
-            {
-                /*
-                 * Verify the requirements for executing code in protected mode. VT-x can't
-                 * handle the CPU state right after a switch from real to protected mode
-                 * (all sorts of RPL & DPL assumptions).
-                 */
-                if (pVCpu->hm.s.vmx.fWasInRealMode)
-                {
-                    /** @todo If guest is in V86 mode, these checks should be different! */
-                    if ((pCtx->cs.Sel & X86_SEL_RPL) != (pCtx->ss.Sel & X86_SEL_RPL))
-                    {
-                        STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRpl);
-                        return false;
-                    }
-                    if (   !hmR3IsCodeSelectorOkForVmx(&pCtx->cs, pCtx->ss.Attr.n.u2Dpl)
-                        || !hmR3IsDataSelectorOkForVmx(&pCtx->ds)
-                        || !hmR3IsDataSelectorOkForVmx(&pCtx->es)
-                        || !hmR3IsDataSelectorOkForVmx(&pCtx->fs)
-                        || !hmR3IsDataSelectorOkForVmx(&pCtx->gs)
-                        || !hmR3IsStackSelectorOkForVmx(&pCtx->ss))
-                    {
-                        STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel);
-                        return false;
-                    }
-                }
-                /* VT-x also chokes on invalid TR or LDTR selectors (minix). */
-                if (pCtx->gdtr.cbGdt)
-                {
-                    if ((pCtx->tr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
-                    {
-                        STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadTr);
-                        return false;
-                    }
-                    else if ((pCtx->ldtr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
-                    {
-                        STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadLdt);
-                        return false;
-                    }
-                }
-                STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckPmOk);
-            }
-        }
-        else
-        {
-            if (   !CPUMIsGuestInLongModeEx(pCtx)
-                && !pVM->hm.s.vmx.fUnrestrictedGuest)
-            {
-                if (   !pVM->hm.s.fNestedPaging        /* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */
-                    ||  CPUMIsGuestInRealModeEx(pCtx)) /* Requires a fake TSS for real mode - stored in the VMM device heap */
-                    return false;
-
-                /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
-                if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr.Sel == 0)
-                    return false;
-
-                /*
-                 * The guest is about to complete the switch to protected mode. Wait a bit longer.
-                 * Windows XP; switch to protected mode; all selectors are marked not present
-                 * in the hidden registers (possible recompiler bug; see load_seg_vm).
-                 */
-                /** @todo Is this supposed recompiler bug still relevant with IEM? */
-                if (pCtx->cs.Attr.n.u1Present == 0)
-                    return false;
-                if (pCtx->ss.Attr.n.u1Present == 0)
-                    return false;
-
-                /*
-                 * Windows XP: possible same as above, but new recompiler requires new
-                 * heuristics? VT-x doesn't seem to like something about the guest state and
-                 * this stuff avoids it.
-                 */
-                /** @todo This check is actually wrong, it doesn't take the direction of the
-                 *        stack segment into account. But, it does the job for now. */
-                if (pCtx->rsp >= pCtx->ss.u32Limit)
-                    return false;
-            }
-        }
-    }
-
-    if (pVM->hm.s.vmx.fEnabled)
-    {
-        uint32_t uCr0Mask;
-
-        /* If bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
-        uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr0Fixed0;
-
-        /* We ignore the NE bit here on purpose; see HMR0.cpp for details. */
-        uCr0Mask &= ~X86_CR0_NE;
-
-        if (fSupportsRealMode)
-        {
-            /* We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
-            uCr0Mask &= ~(X86_CR0_PG|X86_CR0_PE);
-        }
-        else
-        {
-            /* We support protected mode without paging using identity mapping. */
-            uCr0Mask &= ~X86_CR0_PG;
-        }
-        if ((pCtx->cr0 & uCr0Mask) != uCr0Mask)
-            return false;
-
-        /* If bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
-        uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;
-        if ((pCtx->cr0 & uCr0Mask) != 0)
-            return false;
-
-        /* If bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
-        uCr0Mask  = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr4Fixed0;
-        uCr0Mask &= ~X86_CR4_VMXE;
-        if ((pCtx->cr4 & uCr0Mask) != uCr0Mask)
-            return false;
-
-        /* If bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
-        uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;
-        if ((pCtx->cr4 & uCr0Mask) != 0)
-            return false;
-
-        pVCpu->hm.s.fActive = true;
-        return true;
-    }
-
-    return false;
-}
-
-
-/**
  * Checks if we need to reschedule due to VMM device heap changes.
  *
Index: /trunk/src/recompiler/VBoxREMWrapper.cpp
===================================================================
--- /trunk/src/recompiler/VBoxREMWrapper.cpp	(revision 73616)
+++ /trunk/src/recompiler/VBoxREMWrapper.cpp	(revision 73617)
@@ -688,10 +688,8 @@
     { REMPARMDESC_FLAGS_INT,        sizeof(RTGCPTR),            NULL }
 };
-static const REMPARMDESC g_aArgsHMR3CanExecuteGuest[] =
-{
-    { REMPARMDESC_FLAGS_INT,        sizeof(PVM),                NULL },
-    { REMPARMDESC_FLAGS_INT,        sizeof(uint32_t),           NULL },
-    { REMPARMDESC_FLAGS_INT,        sizeof(uint32_t),           NULL },
-    { REMPARMDESC_FLAGS_INT,        sizeof(uint32_t),           NULL }
+static const REMPARMDESC g_aArgsHMCanExecuteGuest[] =
+{
+    { REMPARMDESC_FLAGS_INT,        sizeof(PVMCPU),             NULL },
+    { REMPARMDESC_FLAGS_INT,        sizeof(PCPUMCTX),           NULL },
 };
 static const REMPARMDESC g_aArgsIOMIOPortRead[] =
@@ -1248,5 +1246,5 @@
     { "EMSetInhibitInterruptsPC",               VMM_FN(EMSetInhibitInterruptsPC),       &g_aArgsEMSetInhibitInterruptsPC[0],        RT_ELEMENTS(g_aArgsEMSetInhibitInterruptsPC),          REMFNDESC_FLAGS_RET_INT,    sizeof(int),        NULL },
     { "HMIsEnabledNotMacro",                    VMM_FN(HMIsEnabledNotMacro),            &g_aArgsVM[0],                              RT_ELEMENTS(g_aArgsVM),                                REMFNDESC_FLAGS_RET_INT,    sizeof(bool),       NULL },
-    { "HMR3CanExecuteGuest",                    VMM_FN(HMR3CanExecuteGuest),            &g_aArgsHMR3CanExecuteGuest[0],             RT_ELEMENTS(g_aArgsHMR3CanExecuteGuest),               REMFNDESC_FLAGS_RET_INT,    sizeof(bool),       NULL },
+    { "HMCanExecuteGuest",                      VMM_FN(HMCanExecuteGuest),              &g_aArgsHMCanExecuteGuest[0],               RT_ELEMENTS(g_aArgsHMCanExecuteGuest),                 REMFNDESC_FLAGS_RET_INT,    sizeof(bool),       NULL },
     { "IOMIOPortRead",                          VMM_FN(IOMIOPortRead),                  &g_aArgsIOMIOPortRead[0],                   RT_ELEMENTS(g_aArgsIOMIOPortRead),                     REMFNDESC_FLAGS_RET_INT,    sizeof(int),        NULL },
     { "IOMIOPortWrite",                         VMM_FN(IOMIOPortWrite),                 &g_aArgsIOMIOPortWrite[0],                  RT_ELEMENTS(g_aArgsIOMIOPortWrite),                    REMFNDESC_FLAGS_RET_INT,    sizeof(int),        NULL },
Index: /trunk/src/recompiler/VBoxRecompiler.c
===================================================================
--- /trunk/src/recompiler/VBoxRecompiler.c	(revision 73616)
+++ /trunk/src/recompiler/VBoxRecompiler.c	(revision 73617)
@@ -1438,5 +1438,5 @@
 
         /*
-         * Create partial context for HMR3CanExecuteGuest
+         * Create partial context for HMCanExecuteGuest.
          */
         pCtx->cr0            = env->cr[0];
@@ -1513,9 +1513,10 @@
         pCtx->msrEFER        = env->efer;
 
-        /* Hardware accelerated raw-mode:
-         *
+        /*
+         * Hardware accelerated mode:
          * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
          */
-        if (HMR3CanExecuteGuest(env->pVM, pCtx) == true)
+        PVMCPU pVCpu = &env->pVM->aCpus[0];
+        if (HMCanExecuteGuest(pVCpu, pCtx))
         {
             *piException = EXCP_EXECUTE_HM;
