Index: /trunk/include/VBox/vmm/cpum.h
===================================================================
--- /trunk/include/VBox/vmm/cpum.h	(revision 45275)
+++ /trunk/include/VBox/vmm/cpum.h	(revision 45276)
@@ -485,4 +485,28 @@
 DECLASM(void)           CPUMGCCallV86Code(PCPUMCTXCORE pRegFrame);
 
+
+/**
+ * Get the current privilege level of the guest.
+ *
+ * @returns CPL
+ * @param   pVCpu       The current virtual CPU.
+ * @param   pRegFrame   Pointer to the register frame.
+ */
+VMMDECL(uint32_t)       CPUMRCGetGuestCPL(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame);
+
+#ifdef VBOX_WITH_RAW_RING1
+/**
+ * Transforms the guest CPU state to raw-ring mode.
+ *
+ * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
+ *
+ * @returns VBox status. (recompiler failure)
+ * @param   pVCpu       Pointer to the VMCPU.
+ * @param   pCtxCore    The context core (for trap usage).
+ * @see     @ref pg_raw
+ */
+VMMDECL(void)         CPUMRCRecheckRawState(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore);
+#endif
+
 /** @} */
 #endif /* IN_RC */
Index: /trunk/include/VBox/vmm/em.h
===================================================================
--- /trunk/include/VBox/vmm/em.h	(revision 45275)
+++ /trunk/include/VBox/vmm/em.h	(revision 45276)
@@ -133,4 +133,17 @@
 #define EMIsRawRing0Enabled(pVM) (!(pVM)->fRecompileSupervisor)
 
+#ifdef VBOX_WITH_RAW_RING1
+/**
+ * Checks if raw ring-1 execute mode is enabled.
+ *
+ * @returns true if enabled.
+ * @returns false if disabled.
+ * @param   pVM         The VM to operate on.
+ */
+#define EMIsRawRing1Enabled(pVM) ((pVM)->fRawRing1Enabled)
+#else
+#define EMIsRawRing1Enabled(pVM) false
+#endif
+
 /**
  * Checks if execution with hardware assisted virtualization is enabled.
Index: /trunk/include/VBox/vmm/selm.h
===================================================================
--- /trunk/include/VBox/vmm/selm.h	(revision 45275)
+++ /trunk/include/VBox/vmm/selm.h	(revision 45276)
@@ -107,4 +107,8 @@
 VMMR3DECL(bool)         SELMR3CheckTSS(PVM pVM);
 VMMR3DECL(int)          SELMR3DebugCheck(PVM pVM);
+#ifdef VBOX_WITH_SAFE_STR
+VMMR3DECL(bool)         SELMR3CheckShadowTR(PVM pVM);
+#endif
+
 /** @def SELMR3_DEBUG_CHECK
  * Invokes SELMR3DebugCheck in stricts builds. */
Index: /trunk/include/VBox/vmm/vm.h
===================================================================
--- /trunk/include/VBox/vmm/vm.h	(revision 45275)
+++ /trunk/include/VBox/vmm/vm.h	(revision 45276)
@@ -843,4 +843,6 @@
     /** Whether to recompile supervisor mode code or run it raw/hm. */
     bool                        fRecompileSupervisor;
+    /** Whether raw mode supports ring-1 code or not. */
+    bool                        fRawRing1Enabled;
     /** PATM enabled flag.
      * This is placed here for performance reasons. */
@@ -862,5 +864,5 @@
 
     /** Alignment padding.. */
-    uint32_t                    uPadding1;
+    uint8_t                     uPadding1[3];
 
     /** @name Debugging
Index: /trunk/include/VBox/vmm/vm.mac
===================================================================
--- /trunk/include/VBox/vmm/vm.mac	(revision 45275)
+++ /trunk/include/VBox/vmm/vm.mac	(revision 45276)
@@ -59,4 +59,5 @@
     .fRecompileUser         resb 1
     .fRecompileSupervisor   resb 1
+    .fRawRing1Enabled       resb 1
     .fPATMEnabled           resb 1
     .fCSAMEnabled           resb 1
@@ -66,5 +67,5 @@
     .fUseLargePages         resb 1
 
-    .uPadding1              resd 1
+    .uPadding1              resb 3
 
     .hTraceBufRC            RTRCPTR_RES 1
Index: /trunk/src/VBox/Main/Makefile.kmk
===================================================================
--- /trunk/src/VBox/Main/Makefile.kmk	(revision 45275)
+++ /trunk/src/VBox/Main/Makefile.kmk	(revision 45276)
@@ -56,4 +56,5 @@
 VBOX_MAIN_DEFS += \
 	$(if $(VBOX_WITH_RAW_MODE),VBOX_WITH_RAW_MODE,) \
+	$(if $(VBOX_WITH_RAW_RING1),VBOX_WITH_RAW_RING1,) \
 	$(if $(VBOX_WITH_NETFLT),VBOX_WITH_NETFLT,) \
 	$(if $(VBOX_WITH_COPYTOGUEST),VBOX_WITH_COPYTOGUEST,) \
Index: /trunk/src/VBox/Main/src-all/Global.cpp
===================================================================
--- /trunk/src/VBox/Main/src-all/Global.cpp	(revision 45275)
+++ /trunk/src/VBox/Main/src-all/Global.cpp	(revision 45276)
@@ -313,5 +313,9 @@
         StorageControllerType_PIIX4, StorageBus_IDE, ChipsetType_PIIX3, AudioControllerType_AC97  },
     { "Other",   "Other",             "QNX",                "QNX",
+#ifdef VBOX_WITH_RAW_RING1
+      VBOXOSTYPE_QNX,             VBOXOSHINT_NONE,
+#else
       VBOXOSTYPE_QNX,             VBOXOSHINT_HWVIRTEX,
+#endif
        512,   4,  4 * _1G64, NetworkAdapterType_Am79C973, 0, StorageControllerType_PIIX4, StorageBus_IDE,
       StorageControllerType_PIIX4, StorageBus_IDE, ChipsetType_PIIX3, AudioControllerType_AC97  },
Index: /trunk/src/VBox/Main/src-client/ConsoleImpl2.cpp
===================================================================
--- /trunk/src/VBox/Main/src-client/ConsoleImpl2.cpp	(revision 45275)
+++ /trunk/src/VBox/Main/src-client/ConsoleImpl2.cpp	(revision 45276)
@@ -810,4 +810,13 @@
         InsertConfigInteger(pRoot, "CSAMEnabled",          1);     /* boolean */
 #endif
+
+#ifdef VBOX_WITH_RAW_RING1
+        if (osTypeId == "QNX")
+        {
+            /* QNX needs special treatment in raw mode due to its use of ring-1. */
+            InsertConfigInteger(pRoot, "RawR1Enabled",     1);     /* boolean */
+        }
+#endif
+
         /* Not necessary, but to make sure these two settings end up in the release log. */
         BOOL fPageFusion = FALSE;
Index: /trunk/src/VBox/VMM/Makefile.kmk
===================================================================
--- /trunk/src/VBox/VMM/Makefile.kmk	(revision 45275)
+++ /trunk/src/VBox/VMM/Makefile.kmk	(revision 45276)
@@ -57,4 +57,11 @@
  VMM_COMMON_DEFS += VBOX_WITH_OLD_VTX_CODE
 endif
+ifdef VBOX_WITH_SAFE_STR 
+ VMM_COMMON_DEFS += VBOX_WITH_SAFE_STR 
+endif
+ifdef VBOX_WITH_RAW_RING1
+ VMM_COMMON_DEFS += VBOX_WITH_RAW_RING1
+endif
+
 # VMM_COMMON_DEFS += VBOX_WITH_NS_ACCOUNTING_STATS
 
Index: /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 45276)
@@ -27,4 +27,5 @@
 #include <VBox/vmm/pgm.h>
 #include <VBox/vmm/mm.h>
+#include <VBox/vmm/em.h>
 #if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
 # include <VBox/vmm/selm.h>
@@ -2656,6 +2657,19 @@
                 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
 #ifdef VBOX_WITH_RAW_MODE_NOT_R0
+# ifdef VBOX_WITH_RAW_RING1
+                if (pVCpu->cpum.s.fRawEntered)
+                {
+                    if (    EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM))
+                        &&  uCpl == 2)
+                        uCpl = 1;
+                    else
+                    if (uCpl == 1)
+                        uCpl = 0;
+                }
+                Assert(uCpl != 2);  /* ring 2 support not allowed anymore. */
+# else
                 if (uCpl == 1)
                     uCpl = 0;
+# endif
 #endif
             }
Index: /trunk/src/VBox/VMM/VMMAll/EMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/EMAll.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMAll/EMAll.cpp	(revision 45276)
@@ -54,4 +54,8 @@
 //# define VBOX_SAME_AS_EM
 //# define VBOX_COMPARE_IEM_LAST
+#endif
+
+#ifdef VBOX_WITH_RAW_RING1
+#define EM_EMULATE_SMSW
 #endif
 
@@ -1029,5 +1033,5 @@
 }
 
-#if defined(IN_RC) /*&& defined(VBOX_WITH_PATM)*/
+#ifdef IN_RC
 
 DECLINLINE(int) emRCStackRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, uint32_t cb)
@@ -1095,5 +1099,72 @@
 }
 
-#endif /* IN_RC && VBOX_WITH_PATM */
+/**
+ * IRET Emulation.
+ */
+static int emInterpretIret(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
+{
+#ifdef VBOX_WITH_RAW_RING1
+    NOREF(pvFault); NOREF(pcbSize);
+    if (EMIsRawRing1Enabled(pVM))
+    {
+        RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp;
+        RTGCUINTPTR eip, cs, esp, ss, eflags, uMask;
+        int         rc;
+        uint32_t    cpl, rpl;
+
+        /* We only execute 32-bits protected mode code in raw mode, so no need to bother to check for 16-bits code here. */
+        /* @todo: we don't verify all the edge cases that generate #GP faults */
+
+        Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
+        Assert(!CPUMIsGuestIn64BitCode(pVCpu));
+        /** @todo Rainy day: Test what happens when VERR_EM_INTERPRETER is returned by
+         *        this function.  Fear that it may guru on us, thus not converted to
+         *        IEM. */
+
+        rc  = emRCStackRead(pVM, pVCpu, pRegFrame, &eip,      (RTGCPTR)pIretStack      , 4);
+        rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &cs,       (RTGCPTR)(pIretStack + 4), 4);
+        rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &eflags,   (RTGCPTR)(pIretStack + 8), 4);
+        AssertRCReturn(rc, VERR_EM_INTERPRETER);
+        AssertReturn(eflags & X86_EFL_VM, VERR_EM_INTERPRETER);
+
+        /* Deal with V86 above. */
+        if (eflags & X86_EFL_VM)
+            return EMInterpretIretV86ForPatm(pVM, pVCpu, pRegFrame);
+
+        cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame);
+        rpl = cs & X86_SEL_RPL;
+
+        Log(("emInterpretIret: iret to CS:EIP=%04X:%08X eflags=%x\n", cs, eip, eflags));
+        if (rpl != cpl)
+        {
+            rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &esp,      (RTGCPTR)(pIretStack + 12), 4);
+            rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ss,       (RTGCPTR)(pIretStack + 16), 4);
+            AssertRCReturn(rc, VERR_EM_INTERPRETER);
+            Log(("emInterpretIret: return to different privilege level (rpl=%d cpl=%d)\n", rpl, cpl));
+            Log(("emInterpretIret: SS:ESP=%04X:08X\n", ss, esp));
+            pRegFrame->ss.Sel = ss;
+            pRegFrame->esp    = esp;
+        }
+        pRegFrame->cs.Sel = cs;
+        pRegFrame->eip    = eip;
+
+        /* Adjust CS & SS as required. */
+        CPUMRCRecheckRawState(pVCpu, pRegFrame);
+
+        /* Mask away all reserved bits */
+        uMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM | X86_EFL_AC | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_ID;
+        eflags &= uMask;
+
+        CPUMRawSetEFlags(pVCpu, eflags);
+        Assert((pRegFrame->eflags.u32 & (X86_EFL_IF|X86_EFL_IOPL)) == X86_EFL_IF);
+        return VINF_SUCCESS;
+    }
+#else
+    NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
+#endif
+    return VERR_EM_INTERPRETER;
+}
+
+#endif /* IN_RC */
 
 
@@ -2508,119 +2579,132 @@
         return VERR_EM_INTERPRETER;
 
-#ifdef IN_RC
-    if (TRPMHasTrap(pVCpu))
-    {
-        if (TRPMGetErrorCode(pVCpu) & X86_TRAP_PF_RW)
+    if (param1.type == DISQPV_TYPE_ADDRESS)
+    {
+        RTGCPTR pDest;
+        uint64_t val64;
+
+        switch(param1.type)
         {
-#else
-        /** @todo Make this the default and don't rely on TRPM information. */
-        if (param1.type == DISQPV_TYPE_ADDRESS)
+        case DISQPV_TYPE_IMMEDIATE:
+            if(!(param1.flags  & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
+                return VERR_EM_INTERPRETER;
+            /* fallthru */
+
+        case DISQPV_TYPE_ADDRESS:
+            pDest = (RTGCPTR)param1.val.val64;
+            pDest = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pDest);
+            break;
+
+        default:
+            AssertFailed();
+            return VERR_EM_INTERPRETER;
+        }
+
+        switch(param2.type)
         {
-#endif
-            RTGCPTR pDest;
-            uint64_t val64;
-
-            switch(param1.type)
+        case DISQPV_TYPE_IMMEDIATE: /* register type is translated to this one too */
+            val64 = param2.val.val64;
+            break;
+
+        default:
+            Log(("emInterpretMov: unexpected type=%d rip=%RGv\n", param2.type, (RTGCPTR)pRegFrame->rip));
+            return VERR_EM_INTERPRETER;
+        }
+#ifdef LOG_ENABLED
+        if (pDis->uCpuMode == DISCPUMODE_64BIT)
+            LogFlow(("EMInterpretInstruction at %RGv: OP_MOV %RGv <- %RX64 (%d) &val64=%RHv\n", (RTGCPTR)pRegFrame->rip, pDest, val64, param2.size, &val64));
+        else
+            LogFlow(("EMInterpretInstruction at %08RX64: OP_MOV %RGv <- %08X  (%d) &val64=%RHv\n", pRegFrame->rip, pDest, (uint32_t)val64, param2.size, &val64));
+#endif
+
+        Assert(param2.size <= 8 && param2.size > 0);
+        EM_ASSERT_FAULT_RETURN(pDest == pvFault, VERR_EM_INTERPRETER);
+        rc = emRamWrite(pVM, pVCpu, pRegFrame, pDest, &val64, param2.size);
+        if (RT_FAILURE(rc))
+            return VERR_EM_INTERPRETER;
+
+        *pcbSize = param2.size;
+    }
+#if defined(IN_RC) && defined(VBOX_WITH_RAW_RING1)
+    /* mov xx, cs instruction is dangerous in raw mode and replaced by an 'int3' by csam/patm. */
+    else if (   param1.type == DISQPV_TYPE_REGISTER
+             && param2.type == DISQPV_TYPE_REGISTER)
+    {
+        AssertReturn((pDis->Param1.fUse & (DISUSE_REG_GEN8|DISUSE_REG_GEN16|DISUSE_REG_GEN32)), VERR_EM_INTERPRETER);
+        AssertReturn(pDis->Param2.fUse == DISUSE_REG_SEG, VERR_EM_INTERPRETER);
+        AssertReturn(pDis->Param2.Base.idxSegReg == DISSELREG_CS, VERR_EM_INTERPRETER);
+
+        uint32_t u32Cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame);
+        uint32_t uValCS = (pRegFrame->cs.Sel & ~X86_SEL_RPL) | u32Cpl;
+
+        Log(("EMInterpretInstruction: OP_MOV cs=%x->%x\n", pRegFrame->cs.Sel, uValCS));
+        switch (param1.size)
+        {
+        case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg,  (uint8_t) uValCS); break;
+        case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)uValCS); break;
+        case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)uValCS); break;
+        default:
+            AssertFailed();
+            return VERR_EM_INTERPRETER;
+        }
+        AssertRCReturn(rc, rc);
+    }    
+#endif
+    else
+    { /* read fault */
+        RTGCPTR pSrc;
+        uint64_t val64;
+
+        /* Source */
+        switch(param2.type)
+        {
+        case DISQPV_TYPE_IMMEDIATE:
+            if(!(param2.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
+                return VERR_EM_INTERPRETER;
+            /* fallthru */
+
+        case DISQPV_TYPE_ADDRESS:
+            pSrc = (RTGCPTR)param2.val.val64;
+            pSrc = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param2, pSrc);
+            break;
+
+        default:
+            return VERR_EM_INTERPRETER;
+        }
+
+        Assert(param1.size <= 8 && param1.size > 0);
+        EM_ASSERT_FAULT_RETURN(pSrc == pvFault, VERR_EM_INTERPRETER);
+        rc = emRamRead(pVM, pVCpu, pRegFrame, &val64, pSrc, param1.size);
+        if (RT_FAILURE(rc))
+            return VERR_EM_INTERPRETER;
+
+        /* Destination */
+        switch(param1.type)
+        {
+        case DISQPV_TYPE_REGISTER:
+            switch(param1.size)
             {
-            case DISQPV_TYPE_IMMEDIATE:
-                if(!(param1.flags  & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
-                    return VERR_EM_INTERPRETER;
-                /* fallthru */
-
-            case DISQPV_TYPE_ADDRESS:
-                pDest = (RTGCPTR)param1.val.val64;
-                pDest = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param1, pDest);
-                break;
-
-            default:
-                AssertFailed();
-                return VERR_EM_INTERPRETER;
-            }
-
-            switch(param2.type)
-            {
-            case DISQPV_TYPE_IMMEDIATE: /* register type is translated to this one too */
-                val64 = param2.val.val64;
-                break;
-
-            default:
-                Log(("emInterpretMov: unexpected type=%d rip=%RGv\n", param2.type, (RTGCPTR)pRegFrame->rip));
-                return VERR_EM_INTERPRETER;
-            }
-#ifdef LOG_ENABLED
-            if (pDis->uCpuMode == DISCPUMODE_64BIT)
-                LogFlow(("EMInterpretInstruction at %RGv: OP_MOV %RGv <- %RX64 (%d) &val64=%RHv\n", (RTGCPTR)pRegFrame->rip, pDest, val64, param2.size, &val64));
-            else
-                LogFlow(("EMInterpretInstruction at %08RX64: OP_MOV %RGv <- %08X  (%d) &val64=%RHv\n", pRegFrame->rip, pDest, (uint32_t)val64, param2.size, &val64));
-#endif
-
-            Assert(param2.size <= 8 && param2.size > 0);
-            EM_ASSERT_FAULT_RETURN(pDest == pvFault, VERR_EM_INTERPRETER);
-            rc = emRamWrite(pVM, pVCpu, pRegFrame, pDest, &val64, param2.size);
-            if (RT_FAILURE(rc))
-                return VERR_EM_INTERPRETER;
-
-            *pcbSize = param2.size;
-        }
-        else
-        { /* read fault */
-            RTGCPTR pSrc;
-            uint64_t val64;
-
-            /* Source */
-            switch(param2.type)
-            {
-            case DISQPV_TYPE_IMMEDIATE:
-                if(!(param2.flags & (DISQPV_FLAG_32|DISQPV_FLAG_64)))
-                    return VERR_EM_INTERPRETER;
-                /* fallthru */
-
-            case DISQPV_TYPE_ADDRESS:
-                pSrc = (RTGCPTR)param2.val.val64;
-                pSrc = emConvertToFlatAddr(pVM, pRegFrame, pDis, &pDis->Param2, pSrc);
-                break;
-
+            case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg,  (uint8_t) val64); break;
+            case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)val64); break;
+            case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)val64); break;
+            case 8: rc = DISWriteReg64(pRegFrame, pDis->Param1.Base.idxGenReg, val64); break;
             default:
                 return VERR_EM_INTERPRETER;
             }
-
-            Assert(param1.size <= 8 && param1.size > 0);
-            EM_ASSERT_FAULT_RETURN(pSrc == pvFault, VERR_EM_INTERPRETER);
-            rc = emRamRead(pVM, pVCpu, pRegFrame, &val64, pSrc, param1.size);
             if (RT_FAILURE(rc))
-                return VERR_EM_INTERPRETER;
-
-            /* Destination */
-            switch(param1.type)
-            {
-            case DISQPV_TYPE_REGISTER:
-                switch(param1.size)
-                {
-                case 1: rc = DISWriteReg8(pRegFrame, pDis->Param1.Base.idxGenReg,  (uint8_t) val64); break;
-                case 2: rc = DISWriteReg16(pRegFrame, pDis->Param1.Base.idxGenReg, (uint16_t)val64); break;
-                case 4: rc = DISWriteReg32(pRegFrame, pDis->Param1.Base.idxGenReg, (uint32_t)val64); break;
-                case 8: rc = DISWriteReg64(pRegFrame, pDis->Param1.Base.idxGenReg, val64); break;
-                default:
-                    return VERR_EM_INTERPRETER;
-                }
-                if (RT_FAILURE(rc))
-                    return rc;
-                break;
-
-            default:
-                return VERR_EM_INTERPRETER;
-            }
+                return rc;
+            break;
+
+        default:
+            return VERR_EM_INTERPRETER;
+        }
 #ifdef LOG_ENABLED
-            if (pDis->uCpuMode == DISCPUMODE_64BIT)
-                LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %RX64 (%d)\n", pSrc, val64, param1.size));
-            else
-                LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %08X (%d)\n", pSrc, (uint32_t)val64, param1.size));
-#endif
-        }
-        return VINF_SUCCESS;
-#ifdef IN_RC
-    }
-    return VERR_EM_INTERPRETER;
-#endif
+        if (pDis->uCpuMode == DISCPUMODE_64BIT)
+            LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %RX64 (%d)\n", pSrc, val64, param1.size));
+        else
+            LogFlow(("EMInterpretInstruction: OP_MOV %RGv -> %08X (%d)\n", pSrc, (uint32_t)val64, param1.size));
+#endif
+    }
+    return VINF_SUCCESS;
 }
 
@@ -3015,14 +3099,4 @@
 #endif /* IN_RC */
 
-
-/**
- * IRET Emulation.
- */
-static int emInterpretIret(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize)
-{
-    /* only allow direct calls to EMInterpretIret for now */
-    NOREF(pVM); NOREF(pVCpu); NOREF(pDis); NOREF(pRegFrame); NOREF(pvFault); NOREF(pcbSize);
-    return VERR_EM_INTERPRETER;
-}
 
 /**
@@ -3597,11 +3671,21 @@
         /* Get the current privilege level. */
         uint32_t cpl = CPUMGetGuestCPL(pVCpu);
-        if (    cpl != 0
-            &&  pDis->pCurInstr->uOpcode != OP_RDTSC)    /* rdtsc requires emulation in ring 3 as well */
+#ifdef VBOX_WITH_RAW_RING1
+        if (   !EMIsRawRing1Enabled(pVM)
+            || cpl > 1
+            || pRegFrame->eflags.Bits.u2IOPL > cpl
+           )
         {
-            Log(("WARNING: refusing instruction emulation for user-mode code!!\n"));
-            STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedUserMode));
-            return VERR_EM_INTERPRETER;
+#endif
+            if (    cpl != 0
+                &&  pDis->pCurInstr->uOpcode != OP_RDTSC)    /* rdtsc requires emulation in ring 3 as well */
+            {
+                Log(("WARNING: refusing instruction emulation for user-mode code!!\n"));
+                STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedUserMode));
+                return VERR_EM_INTERPRETER;
+            }
+#ifdef VBOX_WITH_RAW_RING1
         }
+#endif
     }
     else
@@ -3835,8 +3919,8 @@
         INTERPRET_CASE(OP_STI,Sti);
         INTERPRET_CASE(OP_XADD, XAdd);
+        INTERPRET_CASE(OP_IRET,Iret);
 #endif
         INTERPRET_CASE(OP_CMPXCHG8B, CmpXchg8b);
         INTERPRET_CASE(OP_HLT,Hlt);
-        INTERPRET_CASE(OP_IRET,Iret);
         INTERPRET_CASE(OP_WBINVD,WbInvd);
 #ifdef VBOX_WITH_STATISTICS
Index: /trunk/src/VBox/VMM/VMMAll/PATMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PATMAll.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMAll/PATMAll.cpp	(revision 45276)
@@ -627,7 +627,7 @@
     case OP_MOV:
         if (fPatchFlags & PATMFL_IDTHANDLER)
-        {
             pszInstr = "mov (Int/Trap Handler)";
-        }
+        else
+            pszInstr = "mov (cs)";
         break;
     case OP_SYSENTER:
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllBth.h	(revision 45276)
@@ -352,4 +352,5 @@
             }
             /* Unhandled part of a monitored page */
+            Log(("Unhandled part of monitored page %RGv\n", pvFault));
         }
         else
Index: /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp	(revision 45276)
@@ -756,5 +756,5 @@
 
     /* Non-supervisor mode write means it's used for something else. */
-    if (CPUMGetGuestCPL(pVCpu) != 0)
+    if (CPUMGetGuestCPL(pVCpu) == 3)
         return true;
 
Index: /trunk/src/VBox/VMM/VMMAll/SELMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/SELMAll.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMAll/SELMAll.cpp	(revision 45276)
@@ -838,4 +838,19 @@
 }
 
+#ifdef VBOX_WITH_RAW_RING1
+/**
+ * Sets ss:esp for ring1 in main Hypervisor's TSS.
+ *
+ * @param   pVM     Pointer to the VM.
+ * @param   ss      Ring2 SS register value. Pass 0 if invalid.
+ * @param   esp     Ring2 ESP register value.
+ */
+void selmSetRing2Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
+{
+    Assert((ss & 3) == 2 || esp == 0);
+    pVM->selm.s.Tss.ss2  = ss;
+    pVM->selm.s.Tss.esp2 = (uint32_t)esp;
+}
+#endif
 
 #ifdef VBOX_WITH_RAW_MODE_NOT_R0
@@ -855,6 +870,8 @@
     PVMCPU pVCpu = &pVM->aCpus[0];
 
+#ifdef SELM_TRACK_GUEST_TSS_CHANGES
     if (pVM->selm.s.fSyncTSSRing0Stack)
     {
+#endif
         RTGCPTR GCPtrTss = pVM->selm.s.GCPtrGuestTss;
         int     rc;
@@ -913,5 +930,7 @@
         selmSetRing1Stack(pVM, tss.ss0 | 1, (RTGCPTR32)tss.esp0);
         pVM->selm.s.fSyncTSSRing0Stack = false;
-    }
+#ifdef SELM_TRACK_GUEST_TSS_CHANGES
+    }
+#endif
 
     *pSS  = pVM->selm.s.Tss.ss1;
Index: /trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp	(revision 45276)
@@ -27,4 +27,5 @@
 #include <VBox/vmm/selm.h>
 #include <VBox/vmm/stam.h>
+#include <VBox/vmm/dbgf.h>
 #include "TRPMInternal.h"
 #include <VBox/vmm/vm.h>
@@ -593,5 +594,5 @@
                 pTrapStack = (uint32_t *)(uintptr_t)pTrapStackGC;
 #else
-                Assert(eflags.Bits.u1VM || (pRegFrame->ss.Sel & X86_SEL_RPL) == 0 || (pRegFrame->ss.Sel & X86_SEL_RPL) == 3);
+                Assert(eflags.Bits.u1VM || (pRegFrame->ss.Sel & X86_SEL_RPL) == 0 || (pRegFrame->ss.Sel & X86_SEL_RPL) == 3 || (EMIsRawRing1Enabled(pVM) && (pRegFrame->ss.Sel & X86_SEL_RPL) == 1));
                 /* Check maximum amount we need (10 when executing in V86 mode) */
                 if ((pTrapStackGC >> PAGE_SHIFT) != ((pTrapStackGC - 10*sizeof(uint32_t)) >> PAGE_SHIFT)) /* fail if we cross a page boundary */
@@ -624,7 +625,16 @@
                     if (!fConforming && dpl < cpl)
                     {
-                        if ((pRegFrame->ss.Sel & X86_SEL_RPL) == 1 && !eflags.Bits.u1VM)
-                            pTrapStack[--idx] = pRegFrame->ss.Sel & ~1;    /* Mask away traces of raw ring execution (ring 1). */
+#ifdef IN_RC /* Only in GC mode we still see tracing of our ring modifications */
+                        if (    (pRegFrame->ss.Sel & X86_SEL_RPL) == 1 
+                            &&  !eflags.Bits.u1VM)
+                            pTrapStack[--idx] = pRegFrame->ss.Sel & ~1;         /* Mask away traces of raw ring 0 execution (ring 1). */
+# ifdef VBOX_WITH_RAW_RING1
                         else
+                        if (    EMIsRawRing1Enabled(pVM)
+                            &&  (pRegFrame->ss.Sel & X86_SEL_RPL) == 2)
+                            pTrapStack[--idx] = (pRegFrame->ss.Sel & ~2) | 1;   /* Mask away traces of raw ring 1 execution (ring 2). */
+# endif
+                        else
+#endif  /* IN_RC */
                             pTrapStack[--idx] = pRegFrame->ss.Sel;
 
@@ -635,8 +645,16 @@
                     /* Note: Not really necessary as we grab include those bits in the trap/irq handler trampoline */
                     pTrapStack[--idx] = eflags.u32;
-
-                    if ((pRegFrame->cs.Sel & X86_SEL_RPL) == 1 && !eflags.Bits.u1VM)
-                        pTrapStack[--idx] = pRegFrame->cs.Sel & ~1;    /* Mask away traces of raw ring execution (ring 1). */
+#ifdef IN_RC /* Only in GC mode we still see tracing of our ring modifications */
+                    if (    (pRegFrame->cs.Sel & X86_SEL_RPL) == 1 
+                        &&  !eflags.Bits.u1VM)
+                        pTrapStack[--idx] = pRegFrame->cs.Sel & ~1;         /* Mask away traces of raw ring execution (ring 1). */
+# ifdef VBOX_WITH_RAW_RING1
                     else
+                    if (    EMIsRawRing1Enabled(pVM)
+                        &&  (pRegFrame->cs.Sel & X86_SEL_RPL) == 2)
+                        pTrapStack[--idx] = (pRegFrame->cs.Sel & ~2) | 1;   /* Mask away traces of raw ring 1 execution (ring 2). */
+# endif
+                    else
+#endif  /* IN_RC */
                         pTrapStack[--idx] = pRegFrame->cs.Sel;
 
@@ -660,4 +678,8 @@
                     /* Mask away dangerous flags for the trap/interrupt handler. */
                     eflags.u32 &= ~(X86_EFL_TF | X86_EFL_VM | X86_EFL_RF | X86_EFL_NT);
+#ifdef DEBUG
+                    if (DBGFIsStepping(pVCpu))
+                        eflags.u32 |= X86_EFL_TF;
+#endif
 
                     /* Turn off interrupts for interrupt gates. */
@@ -669,5 +691,5 @@
 #ifdef DEBUG
                     for (int j = idx; j < 0; j++)
-                        Log4(("Stack %RRv pos %02d: %08x\n", &pTrapStack[j], j, pTrapStack[j]));
+                        LogFlow(("Stack %RRv pos %02d: %08x\n", &pTrapStack[j], j, pTrapStack[j]));
 
                     Log4(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
Index: /trunk/src/VBox/VMM/VMMR3/CPUM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 45276)
@@ -42,4 +42,5 @@
 #include <VBox/vmm/pdmapi.h>
 #include <VBox/vmm/mm.h>
+#include <VBox/vmm/em.h>
 #include <VBox/vmm/selm.h>
 #include <VBox/vmm/dbgf.h>
@@ -4192,5 +4193,6 @@
      * Are we in Ring-0?
      */
-    if (    pCtxCore->ss.Sel && (pCtxCore->ss.Sel & X86_SEL_RPL) == 0
+    if (    pCtxCore->ss.Sel 
+        &&  (pCtxCore->ss.Sel & X86_SEL_RPL) == 0
         &&  !pCtxCore->eflags.Bits.u1VM)
     {
@@ -4204,11 +4206,24 @@
          */
         pCtxCore->ss.Sel |= 1;
-        if (pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 0)
+        if (    pCtxCore->cs.Sel 
+            &&  (pCtxCore->cs.Sel & X86_SEL_RPL) == 0)
             pCtxCore->cs.Sel |= 1;
     }
     else
     {
+#ifdef VBOX_WITH_RAW_RING1
+        if (    EMIsRawRing1Enabled(pVM)
+            &&  !pCtxCore->eflags.Bits.u1VM
+            &&  (pCtxCore->ss.Sel & X86_SEL_RPL) == 1)
+        {
+            /* Set CPL to Ring-2. */
+            pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 2;
+            if (pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
+                pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 2;
+        }
+#else
         AssertMsg((pCtxCore->ss.Sel & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM,
                   ("ring-1 code not supported\n"));
+#endif
         /*
          * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
@@ -4221,6 +4236,5 @@
      */
     AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
-    AssertReleaseMsg(   pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss.Sel & X86_SEL_RPL)
-                     || pCtxCore->eflags.Bits.u1VM,
+    AssertReleaseMsg(pCtxCore->eflags.Bits.u2IOPL == 0, 
                      ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
     Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP));
@@ -4231,4 +4245,5 @@
     return VINF_SUCCESS;
 }
+
 
 
@@ -4300,13 +4315,41 @@
         if (!pCtxCore->eflags.Bits.u1VM)
         {
-            /** @todo See what happens if we remove this. */
-            if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
-                pCtxCore->ds.Sel &= ~X86_SEL_RPL;
-            if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
-                pCtxCore->es.Sel &= ~X86_SEL_RPL;
-            if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
-                pCtxCore->fs.Sel &= ~X86_SEL_RPL;
-            if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
-                pCtxCore->gs.Sel &= ~X86_SEL_RPL;
+#ifdef VBOX_WITH_RAW_RING1
+            if (    EMIsRawRing1Enabled(pVM)
+                &&  (pCtxCore->ss.Sel & X86_SEL_RPL) == 2)
+            {
+                /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
+                /** @todo See what happens if we remove this. */
+                if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 2)
+                    pCtxCore->ds.Sel = (pCtxCore->ds.Sel & ~X86_SEL_RPL) | 1;
+                if ((pCtxCore->es.Sel & X86_SEL_RPL) == 2)
+                    pCtxCore->es.Sel = (pCtxCore->es.Sel & ~X86_SEL_RPL) | 1;
+                if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 2)
+                    pCtxCore->fs.Sel = (pCtxCore->fs.Sel & ~X86_SEL_RPL) | 1;
+                if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 2)
+                    pCtxCore->gs.Sel = (pCtxCore->gs.Sel & ~X86_SEL_RPL) | 1;
+
+                /*
+                 * Ring-2 selector => Ring-1.
+                 */
+                pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 1;
+                if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 2)
+                    pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 1;
+            }
+            else
+            {
+#endif
+                /** @todo See what happens if we remove this. */
+                if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1)
+                    pCtxCore->ds.Sel &= ~X86_SEL_RPL;
+                if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1)
+                    pCtxCore->es.Sel &= ~X86_SEL_RPL;
+                if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1)
+                    pCtxCore->fs.Sel &= ~X86_SEL_RPL;
+                if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1)
+                    pCtxCore->gs.Sel &= ~X86_SEL_RPL;
+#ifdef VBOX_WITH_RAW_RING1
+            }
+#endif
         }
     }
Index: /trunk/src/VBox/VMM/VMMR3/CSAM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/CSAM.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMR3/CSAM.cpp	(revision 45276)
@@ -847,4 +847,12 @@
         break;
 
+    /* removing breaks win2k guests? */
+    case OP_IRET:
+#ifdef VBOX_WITH_RAW_RING1
+        if (EMIsRawRing1Enabled(pVM))
+            break;
+#endif
+        /* no break */
+
     case OP_ILLUD2:
         /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue. */
@@ -852,8 +860,4 @@
     case OP_INT3:
     case OP_INVALID:
-#if 1
-    /* removing breaks win2k guests? */
-    case OP_IRET:
-#endif
         return VINF_SUCCESS;
     }
@@ -919,10 +923,35 @@
     }
 
+#ifdef VBOX_WITH_RAW_RING1
+    case OP_MOV:
+        /* mov xx, CS is a dangerous instruction as our raw ring usage leaks through. */
+        if (    EMIsRawRing1Enabled(pVM)
+            &&  (pCpu->Param2.fUse & DISUSE_REG_SEG) 
+            &&  (pCpu->Param2.Base.idxSegReg == DISSELREG_CS))
+        {
+            Log(("CSAM: Patching dangerous 'mov xx, cs' instruction at %RGv with an int3\n", pCurInstrGC));
+            if (PATMR3HasBeenPatched(pVM, pCurInstrGC) == false)
+            {
+                rc = PATMR3InstallPatch(pVM, pCurInstrGC, (pPage->fCode32) ? PATMFL_CODE32 : 0);
+                if (RT_FAILURE(rc))
+                {
+                    Log(("PATMR3InstallPatch failed with %d\n", rc));
+                    return VWRN_CONTINUE_ANALYSIS;
+                }
+            }
+            return VWRN_CONTINUE_ANALYSIS;
+        }
+        break;
+#endif
+
     case OP_PUSH:
+        /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) &&  (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
         if (pCpu->pCurInstr->fParam1 != OP_PARM_REG_CS)
             break;
 
         /* no break */
+#ifndef VBOX_WITH_SAFE_STR
     case OP_STR:
+#endif
     case OP_LSL:
     case OP_LAR:
@@ -2642,5 +2671,6 @@
 
             rc = PATMR3InstallPatch(pVM, pHandler, fPatchFlags);
-            if (RT_SUCCESS(rc) || rc == VERR_PATM_ALREADY_PATCHED)
+            if (    RT_SUCCESS(rc) 
+                ||  rc == VERR_PATM_ALREADY_PATCHED)
             {
                 Log(("Gate handler 0x%X is SAFE!\n", iGate));
Index: /trunk/src/VBox/VMM/VMMR3/DBGFDisas.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/DBGFDisas.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMR3/DBGFDisas.cpp	(revision 45276)
@@ -661,5 +661,10 @@
         RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrCurrentLog failed with rc=%Rrc\n", rc);
     if (pszPrefix && *pszPrefix)
-        RTLogPrintf("%s-CPU%u: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
+    {
+        if (pVCpu->CTX_SUFF(pVM)->cCpus > 1)
+            RTLogPrintf("%s-CPU%u: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
+        else
+            RTLogPrintf("%s: %s\n", pszPrefix, szBuf);
+    }
     else
         RTLogPrintf("%s\n", szBuf);
@@ -692,5 +697,10 @@
         RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrLog(, %RTsel, %RGv) failed with rc=%Rrc\n", Sel, GCPtr, rc);
     if (pszPrefix && *pszPrefix)
-        RTLogPrintf("%s-CPU%u: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
+    {
+        if (pVCpu->CTX_SUFF(pVM)->cCpus > 1)
+            RTLogPrintf("%s-CPU%u: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
+        else
+            RTLogPrintf("%s: %s\n", pszPrefix, szBuf);
+    }
     else
         RTLogPrintf("%s\n", szBuf);
Index: /trunk/src/VBox/VMM/VMMR3/EM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/EM.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMR3/EM.cpp	(revision 45276)
@@ -124,4 +124,12 @@
     pVM->fRecompileSupervisor = RT_SUCCESS(rc) ? !fEnabled : false;
     Log(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool\n", pVM->fRecompileUser, pVM->fRecompileSupervisor));
+
+#ifdef VBOX_WITH_RAW_RING1
+    rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR1Enabled", &fEnabled);
+    pVM->fRawRing1Enabled = RT_SUCCESS(rc) ? fEnabled : false;
+    Log(("EMR3Init: fRawRing1Enabled=%RTbool\n", pVM->fRawRing1Enabled));
+#else
+    pVM->fRawRing1Enabled = false;      /* disabled by default. */
+#endif
 
 #ifdef VBOX_WITH_REM
@@ -268,4 +276,6 @@
         EM_REG_COUNTER_USED(&pStats->StatRZLmsw,                 "/EM/CPU%d/RZ/Interpret/Success/Lmsw",       "The number of times LMSW was successfully interpreted.");
         EM_REG_COUNTER_USED(&pStats->StatR3Lmsw,                 "/EM/CPU%d/R3/Interpret/Success/Lmsw",       "The number of times LMSW was successfully interpreted.");
+        EM_REG_COUNTER_USED(&pStats->StatRZSmsw,                 "/EM/CPU%d/RZ/Interpret/Success/Smsw",       "The number of times SMSW was successfully interpreted.");
+        EM_REG_COUNTER_USED(&pStats->StatR3Smsw,                 "/EM/CPU%d/R3/Interpret/Success/Smsw",       "The number of times SMSW was successfully interpreted.");
 
         EM_REG_COUNTER(&pStats->StatRZInterpretFailed,           "/EM/CPU%d/RZ/Interpret/Failed",            "The number of times an instruction was not interpreted.");
@@ -322,4 +332,6 @@
         EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw,           "/EM/CPU%d/RZ/Interpret/Failed/Lmsw",       "The number of times LMSW was not interpreted.");
         EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw,           "/EM/CPU%d/R3/Interpret/Failed/Lmsw",       "The number of times LMSW was not interpreted.");
+        EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw,           "/EM/CPU%d/RZ/Interpret/Failed/Smsw",       "The number of times SMSW was not interpreted.");
+        EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw,           "/EM/CPU%d/R3/Interpret/Failed/Smsw",       "The number of times SMSW was not interpreted.");
 
         EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc,           "/EM/CPU%d/RZ/Interpret/Failed/Misc",       "The number of times some misc instruction was encountered.");
@@ -938,5 +950,5 @@
 static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
 {
-    LogFlow(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu),  CPUMGetGuestEIP(pVCpu)));
+    Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu),  CPUMGetGuestEIP(pVCpu)));
 
 #ifdef VBOX_WITH_REM
@@ -958,5 +970,5 @@
 #endif
 
-    LogFlow(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu),  CPUMGetGuestEIP(pVCpu)));
+    Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu),  CPUMGetGuestEIP(pVCpu)));
     return rc;
 }
@@ -1182,5 +1194,5 @@
     {
         DBGFR3PrgStep(pVCpu);
-        DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS: ");
+        DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
         emR3RemStep(pVM, pVCpu);
         if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
@@ -1308,4 +1320,16 @@
             return EMSTATE_REM;
 
+# ifdef VBOX_WITH_RAW_RING1
+        /* Only ring 0 and 1 supervisor code. */
+        if (EMIsRawRing1Enabled(pVM))
+        {
+            if ((uSS & X86_SEL_RPL) == 2)   /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
+            {
+                Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
+                return EMSTATE_REM;
+            }
+        }
+        else
+# endif
         /* Only ring 0 supervisor code. */
         if ((uSS & X86_SEL_RPL) != 0)
@@ -1334,4 +1358,7 @@
         {
             Log2(("raw r0 mode forced: patch code\n"));
+# ifdef VBOX_WITH_SAFE_STR
+            Assert(pCtx->tr.Sel);
+# endif
             return EMSTATE_RAW;
         }
@@ -1346,4 +1373,5 @@
 # endif
 
+# ifndef VBOX_WITH_RAW_RING1
         /** @todo still necessary??? */
         if (EFlags.Bits.u2IOPL != 0)
@@ -1352,4 +1380,5 @@
             return EMSTATE_REM;
         }
+# endif
     }
 
@@ -1387,4 +1416,12 @@
         return EMSTATE_REM;
     }
+
+# ifdef VBOX_WITH_SAFE_STR
+    if (pCtx->tr.Sel == 0)
+    {
+        Log(("Raw mode refused -> TR=0\n"));
+        return EMSTATE_REM;
+    }
+# endif
 
     /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
Index: /trunk/src/VBox/VMM/VMMR3/EMHM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/EMHM.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMR3/EMHM.cpp	(revision 45276)
@@ -137,5 +137,5 @@
     {
         DBGFR3PrgStep(pVCpu);
-        DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS: ");
+        DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
         rc = emR3HmStep(pVM, pVCpu);
         if (    rc != VINF_SUCCESS
Index: /trunk/src/VBox/VMM/VMMR3/EMRaw.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/EMRaw.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMR3/EMRaw.cpp	(revision 45276)
@@ -159,5 +159,5 @@
     PCPUMCTX    pCtx   = pVCpu->em.s.pCtx;
     bool        fGuest = pVCpu->em.s.enmState != EMSTATE_DEBUG_HYPER;
-#ifndef DEBUG_sandervl
+#ifndef DEBUG_sander
     Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr\n", fGuest ? CPUMGetGuestCS(pVCpu) : CPUMGetHyperCS(pVCpu),
          fGuest ? CPUMGetGuestEIP(pVCpu) : CPUMGetHyperEIP(pVCpu), fGuest ? CPUMGetGuestEFlags(pVCpu) : CPUMGetHyperEFlags(pVCpu)));
@@ -196,5 +196,5 @@
         else
             rc = VMMR3RawRunGC(pVM, pVCpu);
-#ifndef DEBUG_sandervl
+#ifndef DEBUG_sander
         Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr - GC rc %Rrc\n", fGuest ? CPUMGetGuestCS(pVCpu) : CPUMGetHyperCS(pVCpu),
              fGuest ? CPUMGetGuestEIP(pVCpu) : CPUMGetHyperEIP(pVCpu), fGuest ? CPUMGetGuestEFlags(pVCpu) : CPUMGetHyperEFlags(pVCpu), rc));
@@ -237,7 +237,8 @@
     {
         DBGFR3PrgStep(pVCpu);
-        DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS: ");
+        DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
         rc = emR3RawStep(pVM, pVCpu);
-        if (rc != VINF_SUCCESS)
+        if (   rc != VINF_SUCCESS
+            && rc != VINF_EM_DBG_STEPPED)
             break;
     }
@@ -950,5 +951,5 @@
     {
         DBGFR3_INFO_LOG(pVM, "cpumguest", "PRIV");
-        DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Privileged instr: ");
+        DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Privileged instr");
     }
 #endif
@@ -1090,5 +1091,5 @@
                     {
                         DBGFR3_INFO_LOG(pVM, "cpumguest", "PRIV");
-                        DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Privileged instr: ");
+                        DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Privileged instr");
                     }
 #endif
@@ -1361,5 +1362,9 @@
         Assert(REMR3QueryPendingInterrupt(pVM, pVCpu) == REM_NO_PENDING_IRQ);
 # endif
+# ifdef VBOX_WITH_RAW_RING1
+        Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL) == 3 || (pCtx->ss.Sel & X86_SEL_RPL) == 0 || (EMIsRawRing1Enabled(pVM) && (pCtx->ss.Sel & X86_SEL_RPL) == 1));
+# else
         Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL) == 3 || (pCtx->ss.Sel & X86_SEL_RPL) == 0);
+# endif
         AssertMsg(   (pCtx->eflags.u32 & X86_EFL_IF)
                   || PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip),
@@ -1429,11 +1434,14 @@
             Log(("RV86: %04x:%08x IF=%d VMFlags=%x\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
         else if ((pCtx->ss.Sel & X86_SEL_RPL) == 1)
-            Log(("RR0: %08x ESP=%08x EFL=%x IF=%d/%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n",
-                 pCtx->eip, pCtx->esp, CPUMRawGetEFlags(pVCpu), !!(pGCState->uVMFlags & X86_EFL_IF), pCtx->eflags.Bits.u1IF,
+            Log(("RR0: %x:%08x ESP=%x:%08x EFL=%x IF=%d/%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n",
+                 pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, CPUMRawGetEFlags(pVCpu), !!(pGCState->uVMFlags & X86_EFL_IF), pCtx->eflags.Bits.u1IF,
                  pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss.Sel & X86_SEL_RPL), CSAMIsPageScanned(pVM, (RTGCPTR)pCtx->eip)));
+# ifdef VBOX_WITH_RAW_RING1
+        else if ((pCtx->ss.Sel & X86_SEL_RPL) == 2)
+            Log(("RR1: %x:%08x ESP=%x:%08x IF=%d VMFlags=%x CPL=%x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, (pCtx->ss.Sel & X86_SEL_RPL)));
+# endif
         else if ((pCtx->ss.Sel & X86_SEL_RPL) == 3)
-            Log(("RR3: %08x ESP=%08x IF=%d VMFlags=%x\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
+            Log(("RR3: %x:%08x ESP=%x:%08x IF=%d VMFlags=%x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
 #endif /* LOG_ENABLED */
-
 
 
@@ -1543,5 +1551,5 @@
             ||  VMCPU_FF_ISPENDING(pVCpu, ~VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
         {
-            Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL) != 1);
+            Assert(pCtx->eflags.Bits.u1VM || (EMIsRawRing1Enabled(pVM) ? ((pCtx->ss.Sel & X86_SEL_RPL) != 2) : ((pCtx->ss.Sel & X86_SEL_RPL) != 1)));
 
             STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatRAWTotal, a);
Index: /trunk/src/VBox/VMM/VMMR3/PATM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PATM.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMR3/PATM.cpp	(revision 45276)
@@ -1535,4 +1535,9 @@
         break;
 
+#ifdef VBOX_WITH_SAFE_STR   /* @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
+    case OP_STR:
+        break;
+#endif
+
     default:
         if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
@@ -1645,4 +1650,9 @@
     case OP_RETN:
         return VINF_SUCCESS;
+
+#ifdef VBOX_WITH_SAFE_STR   /* @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
+    case OP_STR:
+        break;
+#endif
 
     case OP_POPF:
@@ -1806,4 +1816,5 @@
 
     case OP_POP:
+        /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) &&  (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
         if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_SS)
         {
@@ -1913,4 +1924,5 @@
 
     case OP_PUSH:
+        /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) &&  (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
         if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_CS)
         {
@@ -1947,4 +1959,8 @@
 
     case OP_STR:
+#ifdef VBOX_WITH_SAFE_STR   /* @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table and move OP_STR into #ifndef */
+        /* Now safe because our shadow TR entry is identical to the guest's. */
+        goto duplicate_instr;
+#endif
     case OP_SLDT:
         rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
@@ -4442,5 +4458,7 @@
             break;
 
+#ifndef VBOX_WITH_SAFE_STR
         case OP_STR:
+#endif
         case OP_SGDT:
         case OP_SLDT:
@@ -4453,4 +4471,7 @@
         case OP_VERR:
         case OP_IRET:
+#ifdef VBOX_WITH_RAW_RING1
+        case OP_MOV:
+#endif
             rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
             break;
Index: /trunk/src/VBox/VMM/VMMR3/PATMA.asm
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PATMA.asm	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMR3/PATMA.asm	(revision 45276)
@@ -1262,5 +1262,5 @@
 
     ; force ring 1 CS RPL
-    or      dword [esp+8], 1
+    or      dword [esp+8], 1        ;-> @todo we leave traces or raw mode if we jump back to the host context to handle pending interrupts! (below)
 iret_notring0:
 
@@ -1443,4 +1443,302 @@
     DD      PATM_FIXUP
     DD      PATMIretTable - PATMIretStart
+    DD      PATM_IRET_FUNCTION
+    DD      0
+    DD      PATM_VMFLAGS
+    DD      0
+    DD      PATM_VMFLAGS
+    DD      0
+    DD      PATM_VMFLAGS
+    DD      0
+    DD      PATM_TEMP_EAX
+    DD      0
+    DD      PATM_TEMP_ECX
+    DD      0
+    DD      PATM_TEMP_RESTORE_FLAGS
+    DD      0
+    DD      PATM_PENDINGACTION
+    DD      0
+    DD      0ffffffffh
+SECTION .text
+
+;;****************************************************
+;; Abstract:
+;;
+;; if eflags.NT==0 && iretstack.eflags.VM==0 && iretstack.eflags.IOPL==0
+;; then
+;;     if return to ring 0 (iretstack.new_cs & 3 == 0)
+;;     then
+;;          if iretstack.new_eflags.IF == 1 && iretstack.new_eflags.IOPL == 0
+;;          then
+;;              iretstack.new_cs |= 1
+;;          else
+;;              int 3
+;;     endif
+;;     uVMFlags &= ~X86_EFL_IF
+;;     iret
+;; else
+;;     int 3
+;;****************************************************
+;;
+; Stack:
+;
+; esp + 32 - GS         (V86 only)
+; esp + 28 - FS         (V86 only)
+; esp + 24 - DS         (V86 only)
+; esp + 20 - ES         (V86 only)
+; esp + 16 - SS         (if transfer to outer ring)
+; esp + 12 - ESP        (if transfer to outer ring)
+; esp + 8  - EFLAGS
+; esp + 4  - CS
+; esp      - EIP
+;;
+BEGINPROC   PATMIretRing1Replacement
+PATMIretRing1Start:
+    mov     dword [ss:PATM_INTERRUPTFLAG], 0
+    pushfd
+
+%ifdef PATM_LOG_PATCHIRET
+    push    eax
+    push    ecx
+    push    edx
+    lea     edx, dword [ss:esp+12+4]        ;3 dwords + pushed flags -> iret eip
+    mov     eax, PATM_ACTION_LOG_IRET
+    lock    or dword [ss:PATM_PENDINGACTION], eax
+    mov     ecx, PATM_ACTION_MAGIC
+    db      0fh, 0bh        ; illegal instr (hardcoded assumption in PATMHandleIllegalInstrTrap)
+    pop     edx
+    pop     ecx
+    pop     eax
+%endif
+
+    test    dword [esp], X86_EFL_NT
+    jnz     near iretring1_fault1
+
+    ; we can't do an iret to v86 code, as we run with CPL=1. The iret would attempt a protected mode iret and (most likely) fault.
+    test    dword [esp+12], X86_EFL_VM
+    jnz     near iretring1_return_to_v86
+
+    ;;!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+    ;;@todo: not correct for iret back to ring 2!!!!!
+    ;;!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+    test    dword [esp+8], 2
+    jnz     iretring1_checkpendingirq
+
+    test    dword [esp+12], X86_EFL_IF
+    jz      near iretring1_clearIF
+
+iretring1_checkpendingirq:
+
+; if interrupts are pending, then we must go back to the host context to handle them!
+; Note: This is very important as pending pic interrupts can be overridden by apic interrupts if we don't check early enough (Fedora 5 boot)
+; @@todo fix this properly, so we can dispatch pending interrupts in GC
+    test    dword [ss:PATM_VM_FORCEDACTIONS], VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
+    jz      iretring1_continue
+
+; Go to our hypervisor trap handler to dispatch the pending irq
+    mov     dword [ss:PATM_TEMP_EAX], eax
+    mov     dword [ss:PATM_TEMP_ECX], ecx
+    mov     dword [ss:PATM_TEMP_EDI], edi
+    mov     dword [ss:PATM_TEMP_RESTORE_FLAGS], PATM_RESTORE_EAX | PATM_RESTORE_ECX | PATM_RESTORE_EDI
+    mov     eax, PATM_ACTION_PENDING_IRQ_AFTER_IRET
+    lock    or dword [ss:PATM_PENDINGACTION], eax
+    mov     ecx, PATM_ACTION_MAGIC
+    mov     edi, PATM_CURINSTRADDR
+
+    popfd
+    db      0fh, 0bh        ; illegal instr (hardcoded assumption in PATMHandleIllegalInstrTrap)
+    ; does not return
+
+iretring1_continue:
+
+    test    dword [esp+8], 2
+    jnz     iretring1_notring01
+
+    test    dword [esp+8], 1
+    jz      iretring1_ring0
+
+    ; ring 1 return change CS & SS RPL to 2 from 1
+    and     dword [esp+8], ~1       ; CS
+    or      dword [esp+8], 2
+
+    and     dword [esp+20], ~1      ; SS
+    or      dword [esp+20], 2
+
+    jmp     short iretring1_notring01
+iretring1_ring0:
+    ; force ring 1 CS RPL
+    or      dword [esp+8], 1
+
+iretring1_notring01:
+    ; This section must *always* be executed (!!)
+    ; Extract the IOPL from the return flags, save them to our virtual flags and
+    ; put them back to zero
+    ; @note we assume iretd doesn't fault!!!
+    push    eax
+    mov     eax, dword [esp+16]
+    and     eax, X86_EFL_IOPL
+    and     dword [ss:PATM_VMFLAGS], ~X86_EFL_IOPL
+    or      dword [ss:PATM_VMFLAGS], eax
+    pop     eax
+    and     dword [esp+12], ~X86_EFL_IOPL
+
+    ; Set IF again; below we make sure this won't cause problems.
+    or      dword [ss:PATM_VMFLAGS], X86_EFL_IF
+
+    ; make sure iret is executed fully (including the iret below; cli ... iret can otherwise be interrupted)
+    mov     dword [ss:PATM_INHIBITIRQADDR], PATM_CURINSTRADDR
+
+    popfd
+    mov     dword [ss:PATM_INTERRUPTFLAG], 1
+    iretd
+    PATM_INT3
+
+iretring1_fault:
+    popfd
+    mov     dword [ss:PATM_INTERRUPTFLAG], 1
+    PATM_INT3
+
+iretring1_fault1:
+    nop
+    popfd
+    mov     dword [ss:PATM_INTERRUPTFLAG], 1
+    PATM_INT3
+
+iretring1_clearIF:
+    push    dword [esp+4]           ; eip to return to
+    pushfd
+    push    eax
+    push    PATM_FIXUP
+    DB      0E8h                    ; call
+    DD      PATM_IRET_FUNCTION
+    add     esp, 4                  ; pushed address of jump table
+
+    cmp     eax, 0
+    je      near iretring1_fault3
+
+    mov     dword [esp+12+4], eax   ; stored eip in iret frame
+    pop     eax
+    popfd
+    add     esp, 4                  ; pushed eip
+
+    ; This section must *always* be executed (!!)
+    ; Extract the IOPL from the return flags, save them to our virtual flags and
+    ; put them back to zero
+    push    eax
+    mov     eax, dword [esp+16]
+    and     eax, X86_EFL_IOPL
+    and     dword [ss:PATM_VMFLAGS], ~X86_EFL_IOPL
+    or      dword [ss:PATM_VMFLAGS], eax
+    pop     eax
+    and     dword [esp+12], ~X86_EFL_IOPL
+
+    ; Clear IF
+    and     dword [ss:PATM_VMFLAGS], ~X86_EFL_IF
+    popfd
+
+    test    dword [esp+8], 1
+    jz      iretring1_clearIF_ring0
+
+    ; ring 1 return change CS & SS RPL to 2 from 1
+    and     dword [esp+8], ~1       ; CS
+    or      dword [esp+8], 2
+
+    and     dword [esp+20], ~1      ; SS
+    or      dword [esp+20], 2
+                                                ; the patched destination code will set PATM_INTERRUPTFLAG after the return!
+    iretd
+
+iretring1_clearIF_ring0:
+    ; force ring 1 CS RPL
+    or      dword [esp+8], 1
+                                                ; the patched destination code will set PATM_INTERRUPTFLAG after the return!
+    iretd
+
+iretring1_return_to_v86:
+    test    dword [esp+12], X86_EFL_IF
+    jz      iretring1_fault
+
+    ; Go to our hypervisor trap handler to perform the iret to v86 code
+    mov     dword [ss:PATM_TEMP_EAX], eax
+    mov     dword [ss:PATM_TEMP_ECX], ecx
+    mov     dword [ss:PATM_TEMP_RESTORE_FLAGS], PATM_RESTORE_EAX | PATM_RESTORE_ECX
+    mov     eax, PATM_ACTION_DO_V86_IRET
+    lock    or dword [ss:PATM_PENDINGACTION], eax
+    mov     ecx, PATM_ACTION_MAGIC
+
+    popfd
+
+    db      0fh, 0bh        ; illegal instr (hardcoded assumption in PATMHandleIllegalInstrTrap)
+    ; does not return
+
+
+iretring1_fault3:
+    pop     eax
+    popfd
+    add     esp, 4                  ; pushed eip
+    jmp     iretring1_fault
+
+align   4
+PATMIretRing1Table:
+    DW      PATM_MAX_JUMPTABLE_ENTRIES          ; nrSlots
+    DW      0                                   ; ulInsertPos
+    DD      0                                   ; cAddresses
+    TIMES PATCHJUMPTABLE_SIZE DB 0              ; lookup slots
+
+PATMIretRing1End:
+ENDPROC     PATMIretRing1Replacement
+
+SECTION .data
+; Patch record for 'iretd'
+GLOBALNAME PATMIretRing1Record
+    RTCCPTR_DEF PATMIretRing1Start
+    DD      0
+    DD      0
+    DD      0
+    DD      PATMIretRing1End- PATMIretRing1Start
+%ifdef PATM_LOG_PATCHIRET
+    DD      26
+%else
+    DD      25
+%endif
+    DD      PATM_INTERRUPTFLAG
+    DD      0
+%ifdef PATM_LOG_PATCHIRET
+    DD      PATM_PENDINGACTION
+    DD      0
+%endif
+    DD      PATM_VM_FORCEDACTIONS
+    DD      0
+    DD      PATM_TEMP_EAX
+    DD      0
+    DD      PATM_TEMP_ECX
+    DD      0
+    DD      PATM_TEMP_EDI
+    DD      0
+    DD      PATM_TEMP_RESTORE_FLAGS
+    DD      0
+    DD      PATM_PENDINGACTION
+    DD      0
+    DD      PATM_CURINSTRADDR
+    DD      0
+    DD      PATM_VMFLAGS
+    DD      0
+    DD      PATM_VMFLAGS
+    DD      0
+    DD      PATM_VMFLAGS
+    DD      0
+    DD      PATM_INHIBITIRQADDR
+    DD      0
+    DD      PATM_CURINSTRADDR
+    DD      0
+    DD      PATM_INTERRUPTFLAG
+    DD      0
+    DD      PATM_INTERRUPTFLAG
+    DD      0
+    DD      PATM_INTERRUPTFLAG
+    DD      0
+    DD      PATM_FIXUP
+    DD      PATMIretRing1Table - PATMIretRing1Start
     DD      PATM_IRET_FUNCTION
     DD      0
Index: /trunk/src/VBox/VMM/VMMR3/PATMPatch.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PATMPatch.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMR3/PATMPatch.cpp	(revision 45276)
@@ -27,4 +27,5 @@
 #include <VBox/vmm/cpum.h>
 #include <VBox/vmm/mm.h>
+#include <VBox/vmm/em.h>
 #include <VBox/vmm/trpm.h>
 #include <VBox/param.h>
@@ -436,8 +437,14 @@
 
     AssertMsg(fSizeOverride == false, ("operand size override!!\n"));
-
     callInfo.pCurInstrGC = pCurInstrGC;
 
-    size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretRecord, 0, false, &callInfo);
+#ifdef VBOX_WITH_RAW_RING1
+    if (EMIsRawRing1Enabled(pVM))
+    {
+        size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretRing1Record, 0, false, &callInfo);
+    }
+    else
+#endif
+        size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretRecord, 0, false, &callInfo);
 
     PATCHGEN_EPILOG(pPatch, size);
@@ -1074,18 +1081,25 @@
 int patmPatchGenIntEntry(PVM pVM, PPATCHINFO pPatch, RTRCPTR pIntHandlerGC)
 {
-    uint32_t size;
     int rc = VINF_SUCCESS;
 
-    PATCHGEN_PROLOG(pVM, pPatch);
-
-    /* Add lookup record for patch to guest address translation */
-    patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pIntHandlerGC, PATM_LOOKUP_PATCH2GUEST);
-
-    /* Generate entrypoint for the interrupt handler (correcting CS in the interrupt stack frame) */
-    size = patmPatchGenCode(pVM, pPatch, pPB,
-                            (pPatch->flags & PATMFL_INTHANDLER_WITH_ERRORCODE) ? &PATMIntEntryRecordErrorCode : &PATMIntEntryRecord,
-                            0, false);
-
-    PATCHGEN_EPILOG(pPatch, size);
+#ifdef VBOX_WITH_RAW_RING1
+    if (!EMIsRawRing1Enabled(pVM))    /* direct passthru of interrupts is not allowed in the ring-1 support case as we can't deal with the ring-1/2 ambiguity in the patm asm code and we don't need it either as TRPMForwardTrap takes care of the details. */
+    {
+#endif
+        uint32_t size;
+        PATCHGEN_PROLOG(pVM, pPatch);
+
+        /* Add lookup record for patch to guest address translation */
+        patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pIntHandlerGC, PATM_LOOKUP_PATCH2GUEST);
+
+        /* Generate entrypoint for the interrupt handler (correcting CS in the interrupt stack frame) */
+        size = patmPatchGenCode(pVM, pPatch, pPB,
+                                (pPatch->flags & PATMFL_INTHANDLER_WITH_ERRORCODE) ? &PATMIntEntryRecordErrorCode : &PATMIntEntryRecord,
+                                0, false);
+
+        PATCHGEN_EPILOG(pPatch, size);
+#ifdef VBOX_WITH_RAW_RING1
+    }
+#endif
 
     // Interrupt gates set IF to 0
@@ -1107,4 +1121,6 @@
 {
     uint32_t size;
+
+    Assert(!EMIsRawRing1Enabled(pVM));
 
     PATCHGEN_PROLOG(pVM, pPatch);
Index: /trunk/src/VBox/VMM/VMMR3/PATMSSM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PATMSSM.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMR3/PATMSSM.cpp	(revision 45276)
@@ -810,5 +810,5 @@
         patmR3PatchConvertSSM2Mem(pPatchRec, &patch);
 
-        Log(("Restoring patch %RRv -> %RRv\n", pPatchRec->patch.pPrivInstrGC, patmInfo.pPatchMemGC + pPatchRec->patch.pPatchBlockOffset));
+        Log(("Restoring patch %RRv -> %RRv state %x\n", pPatchRec->patch.pPrivInstrGC, patmInfo.pPatchMemGC + pPatchRec->patch.pPatchBlockOffset, pPatchRec->patch.uState));
         bool ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
         Assert(ret);
Index: /trunk/src/VBox/VMM/VMMR3/PGMHandler.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/PGMHandler.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMR3/PGMHandler.cpp	(revision 45276)
@@ -494,5 +494,7 @@
         {
             pgmUnlock(pVM);
+#ifndef DEBUG_sander
             AssertMsgFailed(("Range %#x not found!\n", GCPtr));
+#endif
             return VERR_INVALID_PARAMETER;
         }
Index: /trunk/src/VBox/VMM/VMMR3/SELM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/SELM.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMR3/SELM.cpp	(revision 45276)
@@ -80,13 +80,4 @@
 #include <iprt/string.h>
 
-
-/**
- * Enable or disable tracking of Shadow GDT/LDT/TSS.
- * @{
- */
-#define SELM_TRACK_SHADOW_GDT_CHANGES
-#define SELM_TRACK_SHADOW_LDT_CHANGES
-#define SELM_TRACK_SHADOW_TSS_CHANGES
-/** @} */
 
 
@@ -565,9 +556,11 @@
      * Uninstall guest GDT/LDT/TSS write access handlers.
      */
-    int rc;
+    int rc = VINF_SUCCESS;
     if (pVM->selm.s.GuestGdtr.pGdt != RTRCPTR_MAX && pVM->selm.s.fGDTRangeRegistered)
     {
+#ifdef SELM_TRACK_GUEST_GDT_CHANGES
         rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
         AssertRC(rc);
+#endif
         pVM->selm.s.GuestGdtr.pGdt = RTRCPTR_MAX;
         pVM->selm.s.GuestGdtr.cbGdt = 0;
@@ -576,12 +569,16 @@
     if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX)
     {
+#ifdef SELM_TRACK_GUEST_LDT_CHANGES
         rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
         AssertRC(rc);
+#endif
         pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX;
     }
     if (pVM->selm.s.GCPtrGuestTss != RTRCPTR_MAX)
     {
+#ifdef SELM_TRACK_GUEST_TSS_CHANGES
         rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
         AssertRC(rc);
+#endif
         pVM->selm.s.GCPtrGuestTss = RTRCPTR_MAX;
         pVM->selm.s.GCSelTss      = RTSEL_MAX;
@@ -619,6 +616,8 @@
     if (pVM->selm.s.GuestGdtr.pGdt != RTRCPTR_MAX && pVM->selm.s.fGDTRangeRegistered)
     {
+#ifdef SELM_TRACK_GUEST_GDT_CHANGES
         rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
         AssertRC(rc);
+#endif
         pVM->selm.s.GuestGdtr.pGdt = RTRCPTR_MAX;
         pVM->selm.s.GuestGdtr.cbGdt = 0;
@@ -627,12 +626,16 @@
     if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX)
     {
+#ifdef SELM_TRACK_GUEST_LDT_CHANGES
         rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
         AssertRC(rc);
+#endif
         pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX;
     }
     if (pVM->selm.s.GCPtrGuestTss != RTRCPTR_MAX)
     {
+#ifdef SELM_TRACK_GUEST_TSS_CHANGES
         rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
         AssertRC(rc);
+#endif
         pVM->selm.s.GCPtrGuestTss = RTRCPTR_MAX;
         pVM->selm.s.GCSelTss      = RTSEL_MAX;
@@ -953,4 +956,13 @@
     }
 
+#ifdef VBOX_WITH_SAFE_STR
+    /** Use the guest's TR selector to plug the str virtualization hole. */
+    if (CPUMGetGuestTR(pVCpu, NULL) != 0)
+    {
+        Log(("SELM: Use guest TSS selector %x\n", CPUMGetGuestTR(pVCpu, NULL)));
+        aHyperSel[SELM_HYPER_SEL_TSS] = CPUMGetGuestTR(pVCpu, NULL);
+    }
+#endif
+
     /*
      * Work thru the copied GDT entries adjusting them for correct virtualization.
@@ -960,5 +972,5 @@
     {
         if (pGDTE->Gen.u1Present)
-            selmGuestToShadowDesc(pGDTE);
+            selmGuestToShadowDesc(pVM, pGDTE);
 
         /* Next GDT entry. */
@@ -990,5 +1002,11 @@
         VMR3Relocate(pVM, 0);
     }
-    else if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE)
+    else 
+#ifdef VBOX_WITH_SAFE_STR
+    if (    cbEffLimit >= SELM_HYPER_DEFAULT_BASE
+        ||  CPUMGetGuestTR(pVCpu, NULL) != 0)       /* Our shadow TR entry was overwritten when we synced the guest's GDT. */
+#else
+    if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE)
+#endif
         /* We overwrote all entries above, so we have to save them again. */
         selmR3SetupHyperGDTSelectors(pVM);
@@ -1011,5 +1029,5 @@
     {
         Log(("SELMR3UpdateFromCPUM: Guest's GDT is changed to pGdt=%016RX64 cbGdt=%08X\n", GDTR.pGdt, GDTR.cbGdt));
-
+#ifdef SELM_TRACK_GUEST_GDT_CHANGES
         /*
          * [Re]Register write virtual handler for guest's GDT.
@@ -1025,7 +1043,24 @@
                                          0, selmR3GuestGDTWriteHandler, "selmRCGuestGDTWriteHandler", 0,
                                          "Guest GDT write access handler");
+# ifdef VBOX_WITH_RAW_RING1
+        /* Some guest OSes (QNX) share code and the GDT on the same page; PGMR3HandlerVirtualRegister doesn't support more than one handler, so we kick out the 
+         * PATM handler as this one is more important. 
+         * @todo fix this properly in PGMR3HandlerVirtualRegister
+         */
+        if (rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT)
+        {
+            LogRel(("selmR3UpdateShadowGdt: Virtual handler conflict %RGv -> kick out PATM handler for the higher priority GDT page monitor\n", GDTR.pGdt));
+            rc = PGMHandlerVirtualDeregister(pVM, GDTR.pGdt & PAGE_BASE_GC_MASK);
+            AssertRC(rc);
+
+            rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE,
+                                             GDTR.pGdt, GDTR.pGdt + GDTR.cbGdt /* already inclusive */,
+                                             0, selmR3GuestGDTWriteHandler, "selmRCGuestGDTWriteHandler", 0,
+                                             "Guest GDT write access handler");
+        }
+# endif
         if (RT_FAILURE(rc))
             return rc;
-
+#endif
         /* Update saved Guest GDTR. */
         pVM->selm.s.GuestGdtr = GDTR;
@@ -1137,4 +1172,5 @@
                  pVM->selm.s.GCPtrGuestLdt, pVM->selm.s.cbLdtLimit, GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt));
 
+#ifdef SELM_TRACK_GUEST_LDT_CHANGES
             /*
              * [Re]Register write virtual handler for guest's GDT.
@@ -1146,8 +1182,8 @@
                 AssertRC(rc);
             }
-#ifdef DEBUG
+# ifdef DEBUG
             if (pDesc->Gen.u1Present)
                 Log(("LDT selector marked not present!!\n"));
-#endif
+# endif
             rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrLdt, GCPtrLdt + cbLdt /* already inclusive */,
                                              0, selmR3GuestLDTWriteHandler, "selmRCGuestLDTWriteHandler", 0, "Guest LDT write access handler");
@@ -1166,5 +1202,7 @@
                 return rc;
             }
-
+#else
+            pVM->selm.s.GCPtrGuestLdt = GCPtrLdt;
+#endif
             pVM->selm.s.cbLdtLimit = cbLdt;
         }
@@ -1205,5 +1243,5 @@
     /** @todo investigate how intel handle various operations on half present cross page entries. */
     off = GCPtrLdt & (sizeof(X86DESC) - 1);
-    AssertMsg(!off, ("LDT is not aligned on entry size! GCPtrLdt=%08x\n", GCPtrLdt));
+////    AssertMsg(!off, ("LDT is not aligned on entry size! GCPtrLdt=%08x\n", GCPtrLdt));
 
     /* Note: Do not skip the first selector; unlike the GDT, a zero LDT selector is perfectly valid. */
@@ -1239,5 +1277,5 @@
             {
                 if (pLDTE->Gen.u1Present)
-                    selmGuestToShadowDesc(pLDTE);
+                    selmGuestToShadowDesc(pVM, pLDTE);
 
                 /* Next LDT entry. */
@@ -1438,5 +1476,5 @@
 }
 
-
+#ifdef SELM_TRACK_GUEST_GDT_CHANGES
 /**
  * \#PF Handler callback for virtual access handler ranges.
@@ -1465,6 +1503,7 @@
     return VINF_PGM_HANDLER_DO_DEFAULT;
 }
-
-
+#endif
+
+#ifdef SELM_TRACK_GUEST_LDT_CHANGES
 /**
  * \#PF Handler callback for virtual access handler ranges.
@@ -1493,6 +1532,8 @@
     return VINF_PGM_HANDLER_DO_DEFAULT;
 }
-
-
+#endif
+
+
+#ifdef SELM_TRACK_GUEST_TSS_CHANGES
 /**
  * \#PF Handler callback for virtual access handler ranges.
@@ -1526,5 +1567,5 @@
     return VINF_PGM_HANDLER_DO_DEFAULT;
 }
-
+#endif
 
 /**
@@ -1675,4 +1716,15 @@
             selmSetRing1Stack(pVM, Tss.ss0 | 1, Tss.esp0);
             pVM->selm.s.fSyncTSSRing0Stack = fNoRing1Stack = false;
+
+#ifdef VBOX_WITH_RAW_RING1
+            /* Update our TSS structure for the guest's ring 2 stack */
+            selmSetRing2Stack(pVM, (Tss.ss1 & ~1) | 2, Tss.esp1);
+
+            if (    (pVM->selm.s.Tss.ss2 != ((Tss.ss1 & ~2) | 1))
+                ||  pVM->selm.s.Tss.esp2 != Tss.esp1)
+            {
+                Log(("SELMR3SyncTSS: Updating TSS ring 1 stack to %04X:%08X from %04X:%08X\n", Tss.ss1, Tss.esp1, (pVM->selm.s.Tss.ss2 & ~2) | 1, pVM->selm.s.Tss.esp2));
+            }
+#endif
         }
     }
@@ -1711,4 +1763,5 @@
         if (cbMonitoredTss != 0)
         {
+#ifdef SELM_TRACK_GUEST_TSS_CHANGES
             rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrTss, GCPtrTss + cbMonitoredTss - 1,
                                              0, selmR3GuestTSSWriteHandler,
@@ -1716,8 +1769,30 @@
             if (RT_FAILURE(rc))
             {
+# ifdef VBOX_WITH_RAW_RING1
+                /* Some guest OSes (QNX) share code and the TSS on the same page; PGMR3HandlerVirtualRegister doesn't support more than one handler, so we kick out the 
+                 * PATM handler as this one is more important. 
+                 * @todo fix this properly in PGMR3HandlerVirtualRegister
+                 */
+                if (rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT)
+                {
+                    LogRel(("SELMR3SyncTSS: Virtual handler conflict %RGv -> kick out PATM handler for the higher priority TSS page monitor\n", GCPtrTss));
+                    rc = PGMHandlerVirtualDeregister(pVM, GCPtrTss & PAGE_BASE_GC_MASK);
+                    AssertRC(rc);
+
+                    rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrTss, GCPtrTss + cbMonitoredTss - 1,
+                                                     0, selmR3GuestTSSWriteHandler,
+                                                     "selmRCGuestTSSWriteHandler", 0, "Guest TSS write access handler");
+                    if (RT_FAILURE(rc))
+                    {
+                        STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
+                        return rc;
+                    }
+                }
+# else
                 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
                 return rc;
-            }
-
+# endif
+           }
+#endif
             /* Update saved Guest TSS info. */
             pVM->selm.s.GCPtrGuestTss       = GCPtrTss;
@@ -1888,5 +1963,5 @@
 VMMR3DECL(bool) SELMR3CheckTSS(PVM pVM)
 {
-#ifdef VBOX_STRICT
+#if defined(VBOX_STRICT) && defined(SELM_TRACK_GUEST_TSS_CHANGES)
     PVMCPU pVCpu = VMMGetCpu(pVM);
 
@@ -2019,4 +2094,46 @@
 #endif /* !VBOX_STRICT */
 }
+
+# ifdef VBOX_WITH_SAFE_STR
+/**
+ * Validates the RawR0 TR shadow GDT entry
+ *
+ * @returns true if it matches.
+ * @returns false and assertions on mismatch..
+ * @param   pVM     Pointer to the VM.
+ */
+VMMR3DECL(bool) SELMR3CheckShadowTR(PVM pVM)
+{
+#  ifdef VBOX_STRICT
+    PX86DESC paGdt = pVM->selm.s.paGdtR3;
+
+    /*
+     * TSS descriptor
+     */
+    PX86DESC pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] >> 3];
+    RTRCPTR RCPtrTSS = VM_RC_ADDR(pVM, &pVM->selm.s.Tss);
+
+    if (    pDesc->Gen.u16BaseLow      != RT_LOWORD(RCPtrTSS)
+        ||  pDesc->Gen.u8BaseHigh1     != RT_BYTE3(RCPtrTSS)
+        ||  pDesc->Gen.u8BaseHigh2     != RT_BYTE4(RCPtrTSS)
+        ||  pDesc->Gen.u16LimitLow     != sizeof(VBOXTSS) - 1
+        ||  pDesc->Gen.u4LimitHigh     != 0
+        ||  (pDesc->Gen.u4Type         != X86_SEL_TYPE_SYS_386_TSS_AVAIL && pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
+        ||  pDesc->Gen.u1DescType      != 0 /* system */
+        ||  pDesc->Gen.u2Dpl           != 0 /* supervisor */
+        ||  pDesc->Gen.u1Present       != 1
+        ||  pDesc->Gen.u1Available     != 0
+        ||  pDesc->Gen.u1Long          != 0
+        ||  pDesc->Gen.u1DefBig        != 0
+        ||  pDesc->Gen.u1Granularity   != 0 /* byte limit */
+        )
+    {
+        AssertFailed();
+        return false;
+    }
+#  endif
+    return true;
+}
+# endif 
 
 #endif /* VBOX_WITH_RAW_MODE */
Index: /trunk/src/VBox/VMM/VMMR3/TRPM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/TRPM.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMR3/TRPM.cpp	(revision 45276)
@@ -1330,5 +1330,9 @@
     }
 
-    if (EMIsRawRing0Enabled(pVM))
+    if (    EMIsRawRing0Enabled(pVM)
+#ifdef VBOX_WITH_RAW_RING1
+        && !EMIsRawRing1Enabled(pVM)    /* can't deal with the ambiguity of ring 1 & 2 in the patch code. */
+#endif
+       )
     {
         /*
Index: /trunk/src/VBox/VMM/VMMR3/VMM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/VMM.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMR3/VMM.cpp	(revision 45276)
@@ -1257,4 +1257,7 @@
             EMR3FatalError(pVCpu, VERR_VMM_HYPER_CR3_MISMATCH);
         PGMMapCheck(pVM);
+# ifdef VBOX_WITH_SAFE_STR
+        SELMR3CheckShadowTR(pVM);
+# endif
 #endif
         int rc;
Index: /trunk/src/VBox/VMM/VMMRC/CPUMRC.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMRC/CPUMRC.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMRC/CPUMRC.cpp	(revision 45276)
@@ -25,4 +25,5 @@
 #include <VBox/vmm/patm.h>
 #include <VBox/vmm/trpm.h>
+#include <VBox/vmm/em.h>
 #include "CPUMInternal.h"
 #include <VBox/vmm/vm.h>
@@ -106,2 +107,108 @@
     //Log2(("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
 }
+
+
+/**
+ * Get the current privilege level of the guest.
+ *
+ * @returns CPL
+ * @param   pVCpu       The current virtual CPU.
+ * @param   pRegFrame   Pointer to the register frame.
+ */
+VMMDECL(uint32_t) CPUMRCGetGuestCPL(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
+{
+    /*
+     * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
+     *
+     * Note! We used to check CS.DPL here, assuming it was always equal to
+     * CPL even if a conforming segment was loaded.  But this truned out to
+     * only apply to older AMD-V.  With VT-x we had an ACP2 regression
+     * during install after a far call to ring 2 with VT-x.  Then on newer
+     * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
+     * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
+     *
+     * So, forget CS.DPL, always use SS.DPL.
+     *
+     * Note! The SS RPL is always equal to the CPL, while the CS RPL
+     * isn't necessarily equal if the segment is conforming.
+     * See section 4.11.1 in the AMD manual.
+     */
+    uint32_t uCpl;
+    if (!pRegFrame->eflags.Bits.u1VM)
+    {
+        uCpl = (pRegFrame->ss.Sel & X86_SEL_RPL);
+#ifdef VBOX_WITH_RAW_MODE_NOT_R0
+# ifdef VBOX_WITH_RAW_RING1
+        if (pVCpu->cpum.s.fRawEntered)
+        {
+            if (    EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM))
+                &&  uCpl == 2)
+                uCpl = 1;
+            else
+            if (uCpl == 1)
+                uCpl = 0;
+        }
+        Assert(uCpl != 2);  /* ring 2 support not allowed anymore. */
+# else
+        if (uCpl == 1)
+            uCpl = 0;
+# endif
+#endif
+    }
+    else
+        uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
+
+    return uCpl;
+}
+
+#ifdef VBOX_WITH_RAW_RING1
+/**
+ * Transforms the guest CPU state to raw-ring mode.
+ *
+ * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
+ *
+ * @returns VBox status. (recompiler failure)
+ * @param   pVCpu       Pointer to the VMCPU.
+ * @param   pCtxCore    The context core (for trap usage).
+ * @see     @ref pg_raw
+ */
+VMMDECL(void) CPUMRCRecheckRawState(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
+{
+    /*
+     * Are we in Ring-0?
+     */
+    if (    pCtxCore->ss.Sel 
+        &&  (pCtxCore->ss.Sel & X86_SEL_RPL) == 0
+        &&  !pCtxCore->eflags.Bits.u1VM)
+    {
+        /*
+         * Set CPL to Ring-1.
+         */
+        pCtxCore->ss.Sel |= 1;
+        if (    pCtxCore->cs.Sel 
+            &&  (pCtxCore->cs.Sel & X86_SEL_RPL) == 0)
+            pCtxCore->cs.Sel |= 1;
+    }
+    else
+    {
+        if (    EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM))
+            &&  !pCtxCore->eflags.Bits.u1VM
+            &&  (pCtxCore->ss.Sel & X86_SEL_RPL) == 1)
+        {
+            /* Set CPL to Ring-2. */
+            pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 2;
+            if (pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
+                pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 2;
+        }
+    }
+
+    /*
+     * Assert sanity.
+     */
+    AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
+    AssertReleaseMsg(pCtxCore->eflags.Bits.u2IOPL == 0, 
+                     ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));
+
+    pCtxCore->eflags.u32        |= X86_EFL_IF; /* paranoia */
+}
+#endif /* VBOX_WITH_RAW_RING1 */
Index: /trunk/src/VBox/VMM/VMMRC/PATMRC.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMRC/PATMRC.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMRC/PATMRC.cpp	(revision 45276)
@@ -155,5 +155,9 @@
 
     /* Very important check -> otherwise we have a security leak. */
+#ifdef VBOX_WITH_RAW_RING1
+    AssertReturn(!pRegFrame->eflags.Bits.u1VM && (pRegFrame->ss.Sel & X86_SEL_RPL) <= (unsigned) (EMIsRawRing1Enabled(pVM) ? 2 : 1), VERR_ACCESS_DENIED);
+#else
     AssertReturn(!pRegFrame->eflags.Bits.u1VM && (pRegFrame->ss.Sel & X86_SEL_RPL) == 1, VERR_ACCESS_DENIED);
+#endif
     Assert(PATMIsPatchGCAddr(pVM, pRegFrame->eip));
 
@@ -455,5 +459,9 @@
     int rc;
 
+#ifdef VBOX_WITH_RAW_RING1
+    AssertReturn(!pRegFrame->eflags.Bits.u1VM && ((pRegFrame->ss.Sel & X86_SEL_RPL) == 1 || (EMIsRawRing1Enabled(pVM) && (pRegFrame->ss.Sel & X86_SEL_RPL) == 2)), VERR_ACCESS_DENIED);
+#else
     AssertReturn(!pRegFrame->eflags.Bits.u1VM && (pRegFrame->ss.Sel & X86_SEL_RPL) == 1, VERR_ACCESS_DENIED);
+#endif
 
     /* Int 3 in PATM generated code? (most common case) */
@@ -490,4 +498,8 @@
             case OP_CPUID:
             case OP_IRET:
+#ifdef VBOX_WITH_RAW_RING1
+            case OP_SMSW:
+            case OP_MOV:     /* mov xx, CS  */
+#endif
                 break;
 
@@ -498,5 +510,7 @@
             case OP_LSL:
             case OP_LAR:
+#ifndef VBOX_WITH_RAW_RING1
             case OP_SMSW:
+#endif
             case OP_VERW:
             case OP_VERR:
Index: /trunk/src/VBox/VMM/VMMRC/SELMRC.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMRC/SELMRC.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMRC/SELMRC.cpp	(revision 45276)
@@ -44,5 +44,5 @@
 #endif
 
-
+#ifdef SELM_TRACK_GUEST_GDT_CHANGES
 /**
  * Synchronizes one GDT entry (guest -> shadow).
@@ -123,5 +123,5 @@
      * Convert the guest selector to a shadow selector and update the shadow GDT.
      */
-    selmGuestToShadowDesc(&Desc);
+    selmGuestToShadowDesc(pVM, &Desc);
     PX86DESC pShwDescr = &pVM->selm.s.paGdtRC[iGDTEntry];
     //Log(("O: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(*pShwDescr)), X86DESC_LIMIT(*pShwDescr), (pShwDescr->au32[1] >> 8) & 0xFFFF ));
@@ -306,6 +306,7 @@
     return rc;
 }
-
-
+#endif /* SELM_TRACK_GUEST_GDT_CHANGES */
+
+#ifdef SELM_TRACK_GUEST_LDT_CHANGES
 /**
  * \#PF Virtual Handler callback for Guest write access to the Guest's own LDT.
@@ -330,6 +331,7 @@
     return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT;
 }
-
-
+#endif
+
+#ifdef SELM_TRACK_GUEST_TSS_CHANGES
 /**
  * Read wrapper used by selmRCGuestTSSWriteHandler.
@@ -382,5 +384,6 @@
     uint32_t cb;
     int rc = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
-    if (RT_SUCCESS(rc) && cb)
+    if (    RT_SUCCESS(rc) 
+        &&  cb)
     {
         rc = VINF_SUCCESS;
@@ -403,4 +406,20 @@
             STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
         }
+#ifdef VBOX_WITH_RAW_RING1
+        else
+        if (    EMIsRawRing1Enabled(pVM)
+            &&  PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS(&pGuestTss->padding_ss1)
+            &&  PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS((uint8_t *)pGuestTss + offRange)
+            &&  (    pGuestTss->esp1 !=  pVM->selm.s.Tss.esp2
+                 ||  pGuestTss->ss1  != ((pVM->selm.s.Tss.ss2 & ~2) | 1)) /* undo raw-r1 */
+           )
+        {
+            Log(("selmRCGuestTSSWriteHandler: R1 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
+                 (RTSEL)((pVM->selm.s.Tss.ss2 & ~2) | 1), (RTGCPTR)pVM->selm.s.Tss.esp2, (RTSEL)pGuestTss->ss1, (RTGCPTR)pGuestTss->esp1));
+            pVM->selm.s.Tss.esp2 = pGuestTss->esp1;
+            pVM->selm.s.Tss.ss2  = (pGuestTss->ss1 & ~1) | 2;
+            STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
+        }
+#endif
         /* Handle misaligned TSS in a safe manner (just in case). */
         else if (   offRange >= RT_UOFFSETOF(VBOXTSS, esp0)
@@ -492,6 +511,7 @@
     return rc;
 }
-
-
+#endif /* SELM_TRACK_GUEST_TSS_CHANGES */
+
+#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
 /**
  * \#PF Virtual Handler callback for Guest write access to the VBox shadow GDT.
@@ -512,6 +532,7 @@
     return VERR_SELM_SHADOW_GDT_WRITE;
 }
-
-
+#endif
+
+#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
 /**
  * \#PF Virtual Handler callback for Guest write access to the VBox shadow LDT.
@@ -533,6 +554,7 @@
     return VERR_SELM_SHADOW_LDT_WRITE;
 }
-
-
+#endif
+
+#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
 /**
  * \#PF Virtual Handler callback for Guest write access to the VBox shadow TSS.
@@ -553,3 +575,4 @@
     return VERR_SELM_SHADOW_TSS_WRITE;
 }
-
+#endif
+
Index: /trunk/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp	(revision 45275)
+++ /trunk/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp	(revision 45276)
@@ -304,5 +304,5 @@
     PVM         pVM   = TRPMCPU_2_VM(pTrpmCpu);
     PVMCPU      pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
-    LogFlow(("TRPMGC01: cs:eip=%04x:%08x uDr6=%RTreg EFL=%x\n", pRegFrame->cs.Sel, pRegFrame->eip, uDr6, CPUMRawGetEFlags(pVCpu)));
+    //LogFlow(("TRPMGC01: cs:eip=%04x:%08x uDr6=%RTreg EFL=%x\n", pRegFrame->cs.Sel, pRegFrame->eip, uDr6, CPUMRawGetEFlags(pVCpu)));
     TRPM_ENTER_DBG_HOOK(1);
 
@@ -445,5 +445,10 @@
      * PATM is using INT3s, let them have a go first.
      */
+#ifdef VBOX_WITH_RAW_RING1
+    if (    (   (pRegFrame->ss.Sel & X86_SEL_RPL) == 1
+             || (EMIsRawRing1Enabled(pVM) && (pRegFrame->ss.Sel & X86_SEL_RPL) == 2))
+#else
     if (    (pRegFrame->ss.Sel & X86_SEL_RPL) == 1
+#endif
         &&  !pRegFrame->eflags.Bits.u1VM)
     {
@@ -523,5 +528,9 @@
     PGMRZDynMapStartAutoSet(pVCpu);
 
+#ifdef VBOX_WITH_RAW_RING1
+    if (CPUMGetGuestCPL(pVCpu) <= (unsigned)(EMIsRawRing1Enabled(pVM) ? 1 : 0))
+#else
     if (CPUMGetGuestCPL(pVCpu) == 0)
+#endif
     {
         /*
@@ -949,5 +958,6 @@
         {
             uint32_t efl = CPUMRawGetEFlags(pVCpu);
-            if (X86_EFL_GET_IOPL(efl) >= (unsigned)(pRegFrame->ss.Sel & X86_SEL_RPL))
+            uint32_t cpl = CPUMRCGetGuestCPL(pVCpu, pRegFrame);
+            if (X86_EFL_GET_IOPL(efl) >= cpl)
             {
                 LogFlow(("trpmGCTrap0dHandlerRing3: CLI/STI -> REM\n"));
@@ -955,5 +965,5 @@
                 return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RESCHEDULE_REM, pRegFrame);
             }
-            LogFlow(("trpmGCTrap0dHandlerRing3: CLI/STI -> #GP(0)\n"));
+            LogFlow(("trpmGCTrap0dHandlerRing3: CLI/STI -> #GP(0) iopl=%x, cpl=%x\n", X86_EFL_GET_IOPL(efl), cpl));
             break;
         }
@@ -1096,5 +1106,5 @@
     if (eflags.Bits.u2IOPL != 3)
     {
-        Assert(eflags.Bits.u2IOPL == 0);
+        Assert(EMIsRawRing1Enabled(pVM) || eflags.Bits.u2IOPL == 0);
 
         rc = TRPMForwardTrap(pVCpu, pRegFrame, 0xD, 0, TRPM_TRAP_HAS_ERRORCODE, TRPM_TRAP, 0xd);
Index: /trunk/src/VBox/VMM/include/PATMA.h
===================================================================
--- /trunk/src/VBox/VMM/include/PATMA.h	(revision 45275)
+++ /trunk/src/VBox/VMM/include/PATMA.h	(revision 45276)
@@ -146,4 +146,5 @@
 extern PATCHASMRECORD PATMPushf16Record;
 extern PATCHASMRECORD PATMIretRecord;
+extern PATCHASMRECORD PATMIretRing1Record;
 extern PATCHASMRECORD PATMCpuidRecord;
 extern PATCHASMRECORD PATMLoopRecord;
Index: /trunk/src/VBox/VMM/include/SELMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/SELMInternal.h	(revision 45275)
+++ /trunk/src/VBox/VMM/include/SELMInternal.h	(revision 45276)
@@ -25,4 +25,5 @@
 #include <VBox/log.h>
 #include <iprt/x86.h>
+#include <VBox/vmm/em.h>
 
 
@@ -33,4 +34,23 @@
  * @{
  */
+
+/**
+ * Enable or disable tracking of Shadow GDT/LDT/TSS.
+ * @{
+ */
+#define SELM_TRACK_SHADOW_GDT_CHANGES
+#define SELM_TRACK_SHADOW_LDT_CHANGES
+#define SELM_TRACK_SHADOW_TSS_CHANGES
+/** @} */
+
+/**
+ * Enable or disable tracking of Guest GDT/LDT/TSS.
+ * @{
+ */
+#define SELM_TRACK_GUEST_GDT_CHANGES
+#define SELM_TRACK_GUEST_LDT_CHANGES
+#define SELM_TRACK_GUEST_TSS_CHANGES
+/** @} */
+
 
 /** The number of GDTS allocated for our GDT. (full size) */
@@ -203,4 +223,7 @@
 
 void           selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp);
+#ifdef VBOX_WITH_RAW_RING1
+void           selmSetRing2Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp);
+#endif
 
 RT_C_DECLS_END
@@ -362,7 +385,8 @@
  * Converts a guest GDT or LDT entry to a shadow table entry.
  *
+ * @param   pVM                 The VM handle.
  * @param   pDesc       Guest entry on input, shadow entry on return.
  */
-DECL_FORCE_INLINE(void) selmGuestToShadowDesc(PX86DESC pDesc)
+DECL_FORCE_INLINE(void) selmGuestToShadowDesc(PVM pVM, PX86DESC pDesc)
 {
     /*
@@ -391,4 +415,15 @@
             pDesc->Gen.u1Available = 1;
         }
+# ifdef VBOX_WITH_RAW_RING1
+        else
+        if (    pDesc->Gen.u2Dpl == 1
+//            &&  EMIsRawRing1Enabled(pVM)
+            &&      (pDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
+                !=  (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
+        {
+            pDesc->Gen.u2Dpl       = 2;
+            pDesc->Gen.u1Available = 1;
+        }
+# endif
         else
             pDesc->Gen.u1Available = 0;
Index: /trunk/src/recompiler/Makefile.kmk
===================================================================
--- /trunk/src/recompiler/Makefile.kmk	(revision 45275)
+++ /trunk/src/recompiler/Makefile.kmk	(revision 45276)
@@ -70,4 +70,7 @@
 ifdef IEM_VERIFICATION_MODE
  VBoxRemPrimary_DEFS          += IEM_VERIFICATION_MODE
+endif
+ifdef VBOX_WITH_RAW_RING1
+ VBoxRemPrimary_DEFS          += VBOX_WITH_RAW_RING1
 endif
 VBoxRemPrimary_DEFS.linux      = _GNU_SOURCE
Index: /trunk/src/recompiler/VBoxRecompiler.c
===================================================================
--- /trunk/src/recompiler/VBoxRecompiler.c	(revision 45275)
+++ /trunk/src/recompiler/VBoxRecompiler.c	(revision 45276)
@@ -1371,5 +1371,5 @@
          */
         case EXCP_EXECUTE_RAW:
-            Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
+            Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW pc=%RGv\n", pVM->rem.s.Env.eip));
             rc = VINF_EM_RESCHEDULE_RAW;
             break;
@@ -1633,4 +1633,16 @@
         }
 
+# ifdef VBOX_WITH_RAW_RING1
+        /* Only ring 0 and 1 supervisor code. */
+        if (EMIsRawRing1Enabled(env->pVM))
+        {
+            if (((fFlags >> HF_CPL_SHIFT) & 3) == 2)   /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
+            {
+                Log2(("raw r0 mode refused: CPL %d\n", (fFlags >> HF_CPL_SHIFT) & 3));
+                return false;
+            }
+        }
+        else
+# endif
         // Only R0
         if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
@@ -1665,4 +1677,11 @@
 #endif
 
+#ifndef VBOX_WITH_RAW_RING1
+        if (((env->eflags >> IOPL_SHIFT) & 3) != 0)
+        {
+            Log2(("raw r0 mode refused: IOPL %d\n", ((env->eflags >> IOPL_SHIFT) & 3)));
+            return false;
+        }
+#endif
         env->state |= CPU_RAW_RING0;
     }
@@ -1764,5 +1783,5 @@
     if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
         return;
-    Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
+    LogFlow(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
     Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
 
Index: /trunk/src/recompiler/target-i386/op_helper.c
===================================================================
--- /trunk/src/recompiler/target-i386/op_helper.c	(revision 45275)
+++ /trunk/src/recompiler/target-i386/op_helper.c	(revision 45276)
@@ -232,5 +232,7 @@
 #ifdef VBOX
     /* Trying to load a selector with CPL=1? */
-    if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
+    /* @todo this is a hack to correct the incorrect checking order for pending interrupts in the patm iret replacement code (corrected in the ring-1 version) */
+    /* @todo in theory the iret could fault and we'd still need this. */
+    if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0) && !EMIsRawRing1Enabled(env->pVM))
     {
         Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
@@ -238,5 +240,5 @@
     }
 #endif /* VBOX */
-
+        
     if (selector & 0x4)
         dt = &env->ldt;
@@ -345,5 +347,5 @@
     if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
     {
-        Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
+        Log(("RPL 1 -> sel %04X -> %04X (tss_load_seg)\n", selector, selector & 0xfffc));
         selector = selector & 0xfffc;
     }
@@ -2556,7 +2558,9 @@
 
 #ifdef VBOX
-    Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
-         (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
+    Log(("helper_ltr: pc=%RGv old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
+         (RTGCPTR)env->eip, (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
          env->tr.flags, (RTSEL)(selector & 0xffff)));
+    ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request,
+                    CPU_INTERRUPT_EXTERNAL_EXIT);
 #endif
     selector &= 0xffff;
@@ -2637,5 +2641,5 @@
     if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
     {
-        Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
+        Log(("RPL 1 -> sel %04X -> %04X (helper_load_seg)\n", selector, selector & 0xfffc));
         selector = selector & 0xfffc;
     }
@@ -3169,9 +3173,10 @@
         if (is_iret) {
             POPL(ssp, sp, sp_mask, new_eflags);
+#define LOG_GROUP LOG_GROUP_REM
 #if defined(VBOX) && defined(DEBUG)
-            printf("iret: new CS     %04X\n", new_cs);
-            printf("iret: new EIP    %08X\n", (uint32_t)new_eip);
-            printf("iret: new EFLAGS %08X\n", new_eflags);
-            printf("iret: EAX=%08x\n", (uint32_t)EAX);
+            Log(("iret: new CS     %04X (old=%x)\n", new_cs, env->segs[R_CS].selector));
+            Log(("iret: new EIP    %08X\n", (uint32_t)new_eip));
+            Log(("iret: new EFLAGS %08X\n", new_eflags));
+            Log(("iret: EAX=%08x\n", (uint32_t)EAX));
 #endif
             if (new_eflags & VM_MASK)
@@ -3181,9 +3186,29 @@
         if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
         {
-# ifdef DEBUG
-            printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
+# ifdef VBOX_WITH_RAW_RING1
+            if (   !EMIsRawRing1Enabled(env->pVM)
+                ||  env->segs[R_CS].selector == (new_cs & 0xfffc))
+            {
+                Log(("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc));
+                new_cs = new_cs & 0xfffc;
+            }
+            else 
+            {
+                /* Ugly assumption: assume a genuine switch to ring-1. */
+                Log(("Genuine switch to ring-1 (iret)\n"));
+            }
+# else
+            Log(("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc));
+            new_cs = new_cs & 0xfffc;
 # endif
-            new_cs = new_cs & 0xfffc;
-        }
+        }
+# ifdef VBOX_WITH_RAW_RING1
+        else
+        if ((new_cs & 0x3) == 2 && (env->state & CPU_RAW_RING0) && EMIsRawRing1Enabled(env->pVM))
+        {
+            Log(("RPL 2 -> new_cs %04X -> %04X\n", new_cs, (new_cs & 0xfffc) | 1));
+            new_cs = (new_cs & 0xfffc) | 1;
+        }
+# endif
 #endif
     } else {
@@ -3200,5 +3225,5 @@
     {
 #if defined(VBOX) && defined(DEBUG)
-        printf("new_cs & 0xfffc) == 0\n");
+        Log(("new_cs & 0xfffc) == 0\n"));
 #endif
         raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
@@ -3207,5 +3232,5 @@
     {
 #if defined(VBOX) && defined(DEBUG)
-        printf("load_segment failed\n");
+        Log(("load_segment failed\n"));
 #endif
         raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
@@ -3215,5 +3240,5 @@
     {
 #if defined(VBOX) && defined(DEBUG)
-        printf("e2 mask %08x\n", e2);
+        Log(("e2 mask %08x\n", e2));
 #endif
         raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
@@ -3224,14 +3249,15 @@
     {
 #if defined(VBOX) && defined(DEBUG)
-        printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
+        Log(("rpl < cpl (%d vs %d)\n", rpl, cpl));
 #endif
         raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
     }
     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+
     if (e2 & DESC_C_MASK) {
         if (dpl > rpl)
         {
 #if defined(VBOX) && defined(DEBUG)
-            printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
+            Log(("dpl > rpl (%d vs %d)\n", dpl, rpl));
 #endif
             raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
@@ -3241,5 +3267,5 @@
         {
 #if defined(VBOX) && defined(DEBUG)
-            printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
+            Log(("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2));
 #endif
             raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
@@ -3249,5 +3275,5 @@
     {
 #if defined(VBOX) && defined(DEBUG)
-        printf("DESC_P_MASK e2=%08x\n", e2);
+        Log(("DESC_P_MASK e2=%08x\n", e2));
 #endif
         raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
