Index: /trunk/src/VBox/Main/src-client/ConsoleImpl2.cpp
===================================================================
--- /trunk/src/VBox/Main/src-client/ConsoleImpl2.cpp	(revision 42436)
+++ /trunk/src/VBox/Main/src-client/ConsoleImpl2.cpp	(revision 42437)
@@ -4748,8 +4748,11 @@
         {
             VBOXHGCMSVCPARM Params[2];
-            pConsole->m_pVMMDev->hgcmHostCall("VBoxGuestPropSvc", guestProp::GET_DBGF_INFO_FN, 2, &Params[0]);
-            PFNDBGFHANDLEREXT pfnHandler = (PFNDBGFHANDLEREXT)(uintptr_t)Params[0].u.pointer.addr;
-            void *pService = (void*)Params[1].u.pointer.addr;
-            DBGFR3InfoRegisterExternal(pVM, "guestprops", "Display the guest properties", pfnHandler, pService);
+            int rc2 = pConsole->m_pVMMDev->hgcmHostCall("VBoxGuestPropSvc", guestProp::GET_DBGF_INFO_FN, 2, &Params[0]);
+            if (RT_SUCCESS(rc2))
+            {
+                PFNDBGFHANDLEREXT pfnHandler = (PFNDBGFHANDLEREXT)(uintptr_t)Params[0].u.pointer.addr;
+                void *pService = (void*)Params[1].u.pointer.addr;
+                DBGFR3InfoRegisterExternal(pVM, "guestprops", "Display the guest properties", pfnHandler, pService);
+            }
         }
 
Index: /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 42436)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 42437)
@@ -192,4 +192,36 @@
 
 /**
+ * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
+ * occation.
+ */
+#ifdef LOG_ENABLED
+# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
+    do { \
+        Log(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
+        return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
+    } while (0)
+#else
+# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
+    return VERR_IEM_ASPECT_NOT_IMPLEMENTED
+#endif
+
+/**
+ * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
+ * occation using the supplied logger statement.
+ *
+ * @param   a_LoggerArgs    What to log on failure.
+ */
+#ifdef LOG_ENABLED
+# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
+    do { \
+        LogFunc(a_LoggerArgs); \
+        return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
+    } while (0)
+#else
+# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
+    return VERR_IEM_ASPECT_NOT_IMPLEMENTED
+#endif
+
+/**
  * Call an opcode decoder function.
  *
@@ -1944,5 +1976,8 @@
         /* Check that there is sufficient space for the stack frame. */
         uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
-        AssertReturn(!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
+        if (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN)
+        {
+            IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */
+        }
 
         uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20;
@@ -2093,6 +2128,6 @@
 {
     NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
-    AssertMsgFailed(("V8086 exception / interrupt dispatching\n"));
-    return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
+    /** @todo implement me. */
+    IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("V8086 exception / interrupt dispatching\n"));
 }
 
@@ -2121,6 +2156,6 @@
 {
     NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
-    AssertMsgFailed(("long mode exception / interrupt dispatching\n"));
-    return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
+    /** @todo implement me. */
+    IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("long mode exception / interrupt dispatching\n"));
 }
 
@@ -2164,5 +2199,6 @@
 
         /** @todo double and tripple faults. */
-        AssertReturn(pIemCpu->cXcptRecursions < 3, VERR_IEM_ASPECT_NOT_IMPLEMENTED);
+        if (pIemCpu->cXcptRecursions >= 3)
+            IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
 
         /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
@@ -2600,4 +2636,20 @@
 }
 
+/**
+ * Complains about a stub.
+ *
+ * Providing two versions of this macro, one for daily use and one for use when
+ * working on IEM.
+ */
+#if 0
+# define IEMOP_BITCH_ABOUT_STUB() \
+    do { \
+        RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
+        iemOpStubMsg2(pIemCpu); \
+        RTAssertPanic(); \
+    } while (0)
+#else
+# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
+#endif
 
 /** Stubs an opcode. */
@@ -2605,7 +2657,5 @@
     FNIEMOP_DEF(a_Name) \
     { \
-        RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
-        iemOpStubMsg2(pIemCpu); \
-        RTAssertPanic(); \
+        IEMOP_BITCH_ABOUT_STUB(); \
         return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
     } \
@@ -2616,7 +2666,5 @@
     FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
     { \
-        RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
-        iemOpStubMsg2(pIemCpu); \
-        RTAssertPanic(); \
+        IEMOP_BITCH_ABOUT_STUB(); \
         NOREF(a_Name0); \
         return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
@@ -4310,6 +4358,5 @@
                 {
                     /** @todo implement expand down segments. */
-                    AssertFailed(/** @todo implement this */);
-                    return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
+                    IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n"));
                 }
             }
@@ -7843,4 +7890,16 @@
     }
 
+    if (rcStrict != VINF_SUCCESS)
+    {
+        if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
+            pIemCpu->cRetAspectNotImplemented++;
+        else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
+            pIemCpu->cRetInstrNotImplemented++;
+        else if (RT_SUCCESS(rcStrict))
+            pIemCpu->cRetInfStatuses++;
+        else
+            pIemCpu->cRetErrStatuses++;
+    }
+
     return rcStrict;
 }
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 42436)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 42437)
@@ -40,5 +40,5 @@
     {
         NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */
-        AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
+        IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement I/O permission bitmap\n"));
     }
     return VINF_SUCCESS;
@@ -778,5 +778,5 @@
 {
     /* Call various functions to do the work. */
-    AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
+    IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
 }
 
@@ -794,5 +794,5 @@
 {
     /* Call various functions to do the work. */
-    AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
+    IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
 }
 
@@ -810,5 +810,5 @@
 {
     /* Call various functions to do the work. */
-    AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
+    IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
 }
 
@@ -1899,4 +1899,31 @@
 
 /**
+ * Implements iret for protected mode returning to V8086 mode.
+ *
+ * @param   enmEffOpSize    The effective operand size.
+ * @param   uNewEip         The new EIP.
+ * @param   uNewCs          The new CS.
+ * @param   uNewFlags       The new EFLAGS.
+ * @param   uNewRsp         The RSP after the initial IRET frame.
+ */
+IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, IEMMODE, enmEffOpSize, uint32_t, uNewEip, uint16_t, uNewCs,
+                uint32_t, uNewFlags, uint64_t, uNewRsp)
+{
+    IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
+}
+
+
+/**
+ * Implements iret for protected mode returning via a nested task.
+ *
+ * @param   enmEffOpSize    The effective operand size.
+ */
+IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
+{
+    IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
+}
+
+
+/**
  * Implements iret for protected mode
  *
@@ -1908,302 +1935,289 @@
     NOREF(cbInstr);
 
-Log(("iemCImpl_iret_prot: rip=%#llx ds=%#x es=%#x\n", pCtx->rip, pCtx->ds.Sel, pCtx->es.Sel));
-
     /*
      * Nested task return.
      */
     if (pCtx->eflags.Bits.u1NT)
-    {
-        AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
-    }
+        return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
+
     /*
      * Normal return.
-     */
+     *
+     * Do the stack bits, but don't commit RSP before everything checks
+     * out right.
+     */
+    Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
+    VBOXSTRICTRC    rcStrict;
+    RTCPTRUNION     uFrame;
+    uint16_t        uNewCs;
+    uint32_t        uNewEip;
+    uint32_t        uNewFlags;
+    uint64_t        uNewRsp;
+    if (enmEffOpSize == IEMMODE_32BIT)
+    {
+        rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
+        if (rcStrict != VINF_SUCCESS)
+            return rcStrict;
+        uNewEip    = uFrame.pu32[0];
+        uNewCs     = (uint16_t)uFrame.pu32[1];
+        uNewFlags  = uFrame.pu32[2];
+    }
     else
     {
-        /*
-         * Do the stack bits, but don't commit RSP before everything checks
-         * out right.
-         */
-        Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
-        VBOXSTRICTRC    rcStrict;
-        RTCPTRUNION     uFrame;
-        uint16_t        uNewCs;
-        uint32_t        uNewEip;
-        uint32_t        uNewFlags;
-        uint64_t        uNewRsp;
+        rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
+        if (rcStrict != VINF_SUCCESS)
+            return rcStrict;
+        uNewEip    = uFrame.pu16[0];
+        uNewCs     = uFrame.pu16[1];
+        uNewFlags  = uFrame.pu16[2];
+    }
+    rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
+    if (rcStrict != VINF_SUCCESS)
+        return rcStrict;
+
+    /*
+     * We're hopefully not returning to V8086 mode...
+     */
+    if (   (uNewFlags & X86_EFL_VM)
+        && pIemCpu->uCpl == 0)
+        return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, enmEffOpSize, uNewEip, uNewCs, uNewFlags, uNewRsp);
+
+    /*
+     * Protected mode.
+     */
+    /* Read the CS descriptor. */
+    if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
+    {
+        Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
+        return iemRaiseGeneralProtectionFault0(pIemCpu);
+    }
+
+    IEMSELDESC DescCS;
+    rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
+    if (rcStrict != VINF_SUCCESS)
+    {
+        Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
+        return rcStrict;
+    }
+
+    /* Must be a code descriptor. */
+    if (!DescCS.Legacy.Gen.u1DescType)
+    {
+        Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
+        return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
+    }
+    if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
+    {
+        Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
+        return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
+    }
+
+    /* Privilege checks. */
+    if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
+    {
+        Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
+        return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
+    }
+    if (   (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
+        && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
+    {
+        Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
+        return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
+    }
+
+    /* Present? */
+    if (!DescCS.Legacy.Gen.u1Present)
+    {
+        Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
+        return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
+    }
+
+    uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
+
+    /*
+     * Return to outer level?
+     */
+    if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
+    {
+        uint16_t    uNewSS;
+        uint32_t    uNewESP;
         if (enmEffOpSize == IEMMODE_32BIT)
         {
-            rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
+            rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
             if (rcStrict != VINF_SUCCESS)
                 return rcStrict;
-            uNewEip    = uFrame.pu32[0];
-            uNewCs     = (uint16_t)uFrame.pu32[1];
-            uNewFlags  = uFrame.pu32[2];
+            uNewESP = uFrame.pu32[0];
+            uNewSS  = (uint16_t)uFrame.pu32[1];
         }
         else
         {
-            rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
+            rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
             if (rcStrict != VINF_SUCCESS)
                 return rcStrict;
-            uNewEip    = uFrame.pu16[0];
-            uNewCs     = uFrame.pu16[1];
-            uNewFlags  = uFrame.pu16[2];
-        }
-        rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
+            uNewESP = uFrame.pu16[0];
+            uNewSS  = uFrame.pu16[1];
+        }
+        rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
         if (rcStrict != VINF_SUCCESS)
             return rcStrict;
 
+        /* Read the SS descriptor. */
+        if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
+        {
+            Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
+            return iemRaiseGeneralProtectionFault0(pIemCpu);
+        }
+
+        IEMSELDESC DescSS;
+        rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS);
+        if (rcStrict != VINF_SUCCESS)
+        {
+            Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
+                 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
+            return rcStrict;
+        }
+
+        /* Privilege checks. */
+        if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
+        {
+            Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
+            return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
+        }
+        if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
+        {
+            Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
+                 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
+            return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
+        }
+
+        /* Must be a writeable data segment descriptor. */
+        if (!DescSS.Legacy.Gen.u1DescType)
+        {
+            Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
+                 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
+            return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
+        }
+        if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
+        {
+            Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
+                 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
+            return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
+        }
+
+        /* Present? */
+        if (!DescSS.Legacy.Gen.u1Present)
+        {
+            Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
+            return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
+        }
+
+        uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
+
+        /* Check EIP. */
+        if (uNewEip > cbLimitCS)
+        {
+            Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
+                 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
+            return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
+        }
+
         /*
-         * What are we returning to?
+         * Commit the changes, marking CS and SS accessed first since
+         * that may fail.
          */
-        if (   (uNewFlags & X86_EFL_VM)
-            && pIemCpu->uCpl == 0)
-        {
-            /* V8086 mode! */
-            AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
-        }
-        else
-        {
-            /*
-             * Protected mode.
-             */
-            /* Read the CS descriptor. */
-            if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
-            {
-                Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
-                return iemRaiseGeneralProtectionFault0(pIemCpu);
-            }
-
-            IEMSELDESC DescCS;
-            rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
+        if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
+        {
+            rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
             if (rcStrict != VINF_SUCCESS)
-            {
-                Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
                 return rcStrict;
-            }
-
-            /* Must be a code descriptor. */
-            if (!DescCS.Legacy.Gen.u1DescType)
-            {
-                Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
-                return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
-            }
-            if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
-            {
-                Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
-                return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
-            }
-
-            /* Privilege checks. */
-            if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
-            {
-                Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
-                return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
-            }
-            if (   (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
-                && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
-            {
-                Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
-                return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
-            }
-
-            /* Present? */
-            if (!DescCS.Legacy.Gen.u1Present)
-            {
-                Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
-                return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
-            }
-
-            uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
-
-            /*
-             * Return to outer level?
-             */
-            if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
-            {
-                uint16_t    uNewSS;
-                uint32_t    uNewESP;
-                if (enmEffOpSize == IEMMODE_32BIT)
-                {
-                    rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
-                    if (rcStrict != VINF_SUCCESS)
-                        return rcStrict;
-                    uNewESP = uFrame.pu32[0];
-                    uNewSS  = (uint16_t)uFrame.pu32[1];
-                }
-                else
-                {
-                    rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
-                    if (rcStrict != VINF_SUCCESS)
-                        return rcStrict;
-                    uNewESP = uFrame.pu16[0];
-                    uNewSS  = uFrame.pu16[1];
-                }
-                rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
-                if (rcStrict != VINF_SUCCESS)
-                    return rcStrict;
-
-                /* Read the SS descriptor. */
-                if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
-                {
-                    Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
-                    return iemRaiseGeneralProtectionFault0(pIemCpu);
-                }
-
-                IEMSELDESC DescSS;
-                rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS);
-                if (rcStrict != VINF_SUCCESS)
-                {
-                    Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
-                         uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
-                    return rcStrict;
-                }
-
-                /* Privilege checks. */
-                if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
-                {
-                    Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
-                    return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
-                }
-                if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
-                {
-                    Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
-                         uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
-                    return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
-                }
-
-                /* Must be a writeable data segment descriptor. */
-                if (!DescSS.Legacy.Gen.u1DescType)
-                {
-                    Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
-                         uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
-                    return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
-                }
-                if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
-                {
-                    Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
-                         uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
-                    return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
-                }
-
-                /* Present? */
-                if (!DescSS.Legacy.Gen.u1Present)
-                {
-                    Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
-                    return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
-                }
-
-                uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
-
-                /* Check EIP. */
-                if (uNewEip > cbLimitCS)
-                {
-                    Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
-                         uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
-                    return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
-                }
-
-                /*
-                 * Commit the changes, marking CS and SS accessed first since
-                 * that may fail.
-                 */
-                if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
-                {
-                    rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
-                    if (rcStrict != VINF_SUCCESS)
-                        return rcStrict;
-                    DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
-                }
-                if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
-                {
-                    rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
-                    if (rcStrict != VINF_SUCCESS)
-                        return rcStrict;
-                    DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
-                }
-
-                pCtx->rip           = uNewEip;
-                pCtx->cs.Sel        = uNewCs;
-                pCtx->cs.ValidSel   = uNewCs;
-                pCtx->cs.fFlags     = CPUMSELREG_FLAGS_VALID;
-                pCtx->cs.Attr.u     = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
-                pCtx->cs.u32Limit   = cbLimitCS;
-                pCtx->cs.u64Base    = X86DESC_BASE(&DescCS.Legacy);
-                pCtx->rsp           = uNewESP;
-                pCtx->ss.Sel        = uNewSS;
-                pCtx->ss.ValidSel   = uNewSS;
-                pCtx->ss.fFlags     = CPUMSELREG_FLAGS_VALID;
-                pCtx->ss.Attr.u     = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
-                pCtx->ss.u32Limit   = cbLimitSs;
-                pCtx->ss.u64Base    = X86DESC_BASE(&DescSS.Legacy);
-
-                uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF  | X86_EFL_SF
-                                     | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
-                if (enmEffOpSize != IEMMODE_16BIT)
-                    fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
-                if (pIemCpu->uCpl == 0)
-                    fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
-                else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
-                    fEFlagsMask |= X86_EFL_IF;
-                pCtx->eflags.u     &= ~fEFlagsMask;
-                pCtx->eflags.u     |= fEFlagsMask & uNewFlags;
-
-                pIemCpu->uCpl       = uNewCs & X86_SEL_RPL;
-                iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
-                iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
-                iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
-                iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
-
-                /* Done! */
-
-            }
-            /*
-             * Return to the same level.
-             */
-            else
-            {
-                /* Check EIP. */
-                if (uNewEip > cbLimitCS)
-                {
-                    Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
-                    return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
-                }
-
-                /*
-                 * Commit the changes, marking CS first since it may fail.
-                 */
-                if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
-                {
-                    rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
-                    if (rcStrict != VINF_SUCCESS)
-                        return rcStrict;
-                    DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
-                }
-
-                pCtx->rip           = uNewEip;
-                pCtx->cs.Sel        = uNewCs;
-                pCtx->cs.ValidSel   = uNewCs;
-                pCtx->cs.fFlags     = CPUMSELREG_FLAGS_VALID;
-                pCtx->cs.Attr.u     = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
-                pCtx->cs.u32Limit   = cbLimitCS;
-                pCtx->cs.u64Base    = X86DESC_BASE(&DescCS.Legacy);
-                pCtx->rsp           = uNewRsp;
-
-                uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF  | X86_EFL_SF
-                                     | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
-                if (enmEffOpSize != IEMMODE_16BIT)
-                    fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
-                if (pIemCpu->uCpl == 0)
-                    fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
-                else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
-                    fEFlagsMask |= X86_EFL_IF;
-                pCtx->eflags.u         &= ~fEFlagsMask;
-                pCtx->eflags.u         |= fEFlagsMask & uNewFlags;
-                /* Done! */
-            }
-        }
-    }
-
+            DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
+        }
+        if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
+        {
+            rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
+            if (rcStrict != VINF_SUCCESS)
+                return rcStrict;
+            DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
+        }
+
+        pCtx->rip           = uNewEip;
+        pCtx->cs.Sel        = uNewCs;
+        pCtx->cs.ValidSel   = uNewCs;
+        pCtx->cs.fFlags     = CPUMSELREG_FLAGS_VALID;
+        pCtx->cs.Attr.u     = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
+        pCtx->cs.u32Limit   = cbLimitCS;
+        pCtx->cs.u64Base    = X86DESC_BASE(&DescCS.Legacy);
+        pCtx->rsp           = uNewESP;
+        pCtx->ss.Sel        = uNewSS;
+        pCtx->ss.ValidSel   = uNewSS;
+        pCtx->ss.fFlags     = CPUMSELREG_FLAGS_VALID;
+        pCtx->ss.Attr.u     = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
+        pCtx->ss.u32Limit   = cbLimitSs;
+        pCtx->ss.u64Base    = X86DESC_BASE(&DescSS.Legacy);
+
+        uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF  | X86_EFL_SF
+                             | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
+        if (enmEffOpSize != IEMMODE_16BIT)
+            fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
+        if (pIemCpu->uCpl == 0)
+            fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
+        else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
+            fEFlagsMask |= X86_EFL_IF;
+        pCtx->eflags.u     &= ~fEFlagsMask;
+        pCtx->eflags.u     |= fEFlagsMask & uNewFlags;
+
+        pIemCpu->uCpl       = uNewCs & X86_SEL_RPL;
+        iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
+        iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
+        iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
+        iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
+
+        /* Done! */
+
+    }
+    /*
+     * Return to the same level.
+     */
+    else
+    {
+        /* Check EIP. */
+        if (uNewEip > cbLimitCS)
+        {
+            Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
+            return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
+        }
+
+        /*
+         * Commit the changes, marking CS first since it may fail.
+         */
+        if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
+        {
+            rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
+            if (rcStrict != VINF_SUCCESS)
+                return rcStrict;
+            DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
+        }
+
+        pCtx->rip           = uNewEip;
+        pCtx->cs.Sel        = uNewCs;
+        pCtx->cs.ValidSel   = uNewCs;
+        pCtx->cs.fFlags     = CPUMSELREG_FLAGS_VALID;
+        pCtx->cs.Attr.u     = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
+        pCtx->cs.u32Limit   = cbLimitCS;
+        pCtx->cs.u64Base    = X86DESC_BASE(&DescCS.Legacy);
+        pCtx->rsp           = uNewRsp;
+
+        uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF  | X86_EFL_SF
+                             | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
+        if (enmEffOpSize != IEMMODE_16BIT)
+            fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
+        if (pIemCpu->uCpl == 0)
+            fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
+        else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
+            fEFlagsMask |= X86_EFL_IF;
+        pCtx->eflags.u         &= ~fEFlagsMask;
+        pCtx->eflags.u         |= fEFlagsMask & uNewFlags;
+        /* Done! */
+    }
     return VINF_SUCCESS;
 }
@@ -2222,5 +2236,5 @@
 
     NOREF(pIemCpu); NOREF(cbInstr); NOREF(enmEffOpSize);
-    return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
+    IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
 }
 
@@ -2894,5 +2908,5 @@
         case 8:
             if (!IEM_VERIFICATION_ENABLED(pIemCpu))
-                AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
+                IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement CR8/TPR read\n")); /** @todo implement CR8 reading and writing. */
             else
                 crX = 0xff;
@@ -3177,5 +3191,5 @@
         case 8:
             if (!IEM_VERIFICATION_ENABLED(pIemCpu))
-                AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
+                IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement CR8/TPR read\n")); /** @todo implement CR8 reading and writing. */
             else
                 rcStrict = VINF_SUCCESS;
@@ -3584,5 +3598,5 @@
     {
         /** @todo I/O port permission bitmap check */
-        AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
+        IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement I/O permission bitmap checks.\n"));
     }
 
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h	(revision 42436)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h	(revision 42437)
@@ -625,5 +625,6 @@
 {
     NOREF(pIemCpu); NOREF(bRm);
-    AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
+    IEMOP_BITCH_ABOUT_STUB();
+    return VERR_IEM_INSTR_NOT_IMPLEMENTED;
 }
 
@@ -665,5 +666,6 @@
 {
     NOREF(pIemCpu); NOREF(bRm);
-    AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
+    IEMOP_BITCH_ABOUT_STUB();
+    return VERR_IEM_INSTR_NOT_IMPLEMENTED;
 }
 
@@ -673,5 +675,6 @@
 {
     NOREF(pIemCpu);
-    AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
+    IEMOP_BITCH_ABOUT_STUB();
+    return VERR_IEM_INSTR_NOT_IMPLEMENTED;
 }
 
@@ -681,5 +684,6 @@
 {
     NOREF(pIemCpu);
-    AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
+    IEMOP_BITCH_ABOUT_STUB();
+    return VERR_IEM_INSTR_NOT_IMPLEMENTED;
 }
 
@@ -862,5 +866,6 @@
 {
     NOREF(pIemCpu);
-    AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
+    IEMOP_BITCH_ABOUT_STUB();
+    return VERR_IEM_INSTR_NOT_IMPLEMENTED;
 }
 
@@ -870,5 +875,6 @@
 {
     NOREF(pIemCpu);
-    AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED);
+    IEMOP_BITCH_ABOUT_STUB();
+    return VERR_IEM_INSTR_NOT_IMPLEMENTED;
 }
 
@@ -973,6 +979,19 @@
 /** Opcode 0x0f 0x08. */
 FNIEMOP_STUB(iemOp_invd);
+
+
 /** Opcode 0x0f 0x09. */
-FNIEMOP_STUB(iemOp_wbinvd);
+FNIEMOP_DEF(iemOp_wbinvd)
+{
+    IEMOP_MNEMONIC("wbinvd");
+    IEMOP_HLP_NO_LOCK_PREFIX();
+    IEM_MC_BEGIN(0, 0);
+    IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
+    IEM_MC_ADVANCE_RIP();
+    IEM_MC_END();
+    return VINF_SUCCESS; /* ignore for now */
+}
+
+
 /** Opcode 0x0f 0x0b. */
 FNIEMOP_STUB(iemOp_ud2);
Index: /trunk/src/VBox/VMM/VMMR3/IEMR3.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/IEMR3.cpp	(revision 42436)
+++ /trunk/src/VBox/VMM/VMMR3/IEMR3.cpp	(revision 42437)
@@ -38,4 +38,19 @@
         pVCpu->iem.s.pCtxR0   = VM_R0_ADDR(pVM, pVCpu->iem.s.pCtxR3);
         pVCpu->iem.s.pCtxRC   = VM_RC_ADDR(pVM, pVCpu->iem.s.pCtxR3);
+
+        STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions,               STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+                        "Instructions interpreted",          "/IEM/CPU%u/cInstructions", idCpu);
+        STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits,             STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+                        "Potential exists",                  "/IEM/CPU%u/cPotentialExits", idCpu);
+        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented,    STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+                        "VERR_IEM_ASPECT_NOT_IMPLEMENTED",   "/IEM/CPU%u/cRetAspectNotImplemented", idCpu);
+        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented,     STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+                        "VERR_IEM_INSTR_NOT_IMPLEMENTED",    "/IEM/CPU%u/cRetInstrNotImplemented", idCpu);
+        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses,             STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+                        "Informational statuses returned",   "/IEM/CPU%u/cRetInfStatuses", idCpu);
+        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses,             STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
+                        "Error statuses returned",           "/IEM/CPU%u/cRetErrStatuses", idCpu);
+        STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten,                   STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
+                        "Approx bytes written",              "/IEM/CPU%u/cbWritten", idCpu);
     }
     return VINF_SUCCESS;
Index: /trunk/src/VBox/VMM/include/IEMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/IEMInternal.h	(revision 42436)
+++ /trunk/src/VBox/VMM/include/IEMInternal.h	(revision 42437)
@@ -231,4 +231,12 @@
      * This may contain uncommitted writes.  */
     uint32_t                cbWritten;
+    /** Counts the VERR_IEM_INSTR_NOT_IMPLEMENTED returns. */
+    uint32_t                cRetInstrNotImplemented;
+    /** Counts the VERR_IEM_ASPECT_NOT_IMPLEMENTED returns. */
+    uint32_t                cRetAspectNotImplemented;
+    /** Counts informational statuses returned (other than VINF_SUCCESS). */
+    uint32_t                cRetInfStatuses;
+    /** Counts other error statuses returned. */
+    uint32_t                cRetErrStatuses;
 #ifdef IEM_VERIFICATION_MODE
     /** The Number of I/O port reads that has been performed. */
Index: /trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp	(revision 42436)
+++ /trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp	(revision 42437)
@@ -77,4 +77,7 @@
 
 #define IEM_NOT_REACHED_DEFAULT_CASE_RET()                  default: return VERR_IPE_NOT_REACHED_DEFAULT_CASE
+#define IEM_RETURN_ASPECT_NOT_IMPLEMENTED()                 return IEM_RETURN_ASPECT_NOT_IMPLEMENTED
+#define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) return IEM_RETURN_ASPECT_NOT_IMPLEMENTED
+
 
 #define IEM_OPCODE_GET_NEXT_U8(a_pu8)                       do { *(a_pu8)  = g_bRandom; CHK_PTYPE(uint8_t  *, a_pu8);  } while (0)
@@ -99,4 +102,5 @@
 #define IEMOP_MNEMONIC(a_szMnemonic)                        do { } while (0)
 #define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps)              do { } while (0)
+#define IEMOP_BITCH_ABOUT_STUB()                            do { } while (0)
 #define FNIEMOP_STUB(a_Name) \
     FNIEMOP_DEF(a_Name) { return VERR_NOT_IMPLEMENTED; } \
