Index: /trunk/include/VBox/vmm/cpum.h
===================================================================
--- /trunk/include/VBox/vmm/cpum.h	(revision 42406)
+++ /trunk/include/VBox/vmm/cpum.h	(revision 42407)
@@ -191,4 +191,5 @@
 VMMDECL(bool)       CPUMIsGuestInLongMode(PVMCPU pVCpu);
 VMMDECL(bool)       CPUMIsGuestInPAEMode(PVMCPU pVCpu);
+VMM_INT_DECL(bool)  CPUMIsGuestInRawMode(PVMCPU pVCpu);
 
 #ifndef VBOX_WITHOUT_UNNAMED_UNIONS
@@ -252,5 +253,5 @@
     if (!(pCtx->msrEFER & MSR_K6_EFER_LMA))
         return false;
-    if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(&pCtx->cs))
+    if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(NULL, &pCtx->cs))
         return CPUMIsGuestIn64BitCodeSlow(pCtx);
     return pCtx->cs.Attr.n.u1Long;
Index: /trunk/include/VBox/vmm/cpumctx.h
===================================================================
--- /trunk/include/VBox/vmm/cpumctx.h	(revision 42406)
+++ /trunk/include/VBox/vmm/cpumctx.h	(revision 42407)
@@ -81,6 +81,21 @@
 
 /** Checks if the hidden parts of the selector register are valid. */
-#define CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pSelReg) (   ((a_pSelReg)->fFlags & CPUMSELREG_FLAGS_VALID) \
-                                                      && (a_pSelReg)->ValidSel == (a_pSelReg)->Sel )
+#ifdef VBOX_WITH_RAW_MODE_NOT_R0
+# define CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSelReg) \
+    (   ((a_pSelReg)->fFlags & CPUMSELREG_FLAGS_VALID) \
+     && (   (a_pSelReg)->ValidSel == (a_pSelReg)->Sel \
+         || (   (a_pVCpu) != NULL \
+             && (a_pSelReg)->ValidSel == ((a_pSelReg)->Sel & X86_SEL_MASK_RPL) \
+             && ((a_pSelReg)->Sel      & X86_SEL_RPL) == 1 \
+             && ((a_pSelReg)->ValidSel & X86_SEL_RPL) == 0 \
+             && CPUMIsGuestInRawMode(a_pVCpu) \
+            ) \
+        ) \
+    )
+#else
+# define CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSelReg) \
+    (   ((a_pSelReg)->fFlags & CPUMSELREG_FLAGS_VALID) \
+     && (a_pSelReg)->ValidSel == (a_pSelReg)->Sel  )
+#endif
 
 /** Old type used for the hidden register part.
Index: /trunk/include/VBox/vmm/selm.h
===================================================================
--- /trunk/include/VBox/vmm/selm.h	(revision 42406)
+++ /trunk/include/VBox/vmm/selm.h	(revision 42407)
@@ -4,5 +4,5 @@
 
 /*
- * Copyright (C) 2006-2010 Oracle Corporation
+ * Copyright (C) 2006-2012 Oracle Corporation
  *
  * This file is part of VirtualBox Open Source Edition (OSE), as
@@ -39,18 +39,18 @@
  */
 
-VMMDECL(RTSEL)      SELMGetTrap8Selector(PVM pVM);
-VMMDECL(void)       SELMSetTrap8EIP(PVM pVM, uint32_t u32EIP);
-VMMDECL(int)        SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp);
-VMMDECL(RTGCPTR)    SELMGetGuestTSS(PVM pVM);
-VMMDECL(RTSEL)      SELMGetHyperCS(PVM pVM);
-VMMDECL(RTSEL)      SELMGetHyperCS64(PVM pVM);
-VMMDECL(RTSEL)      SELMGetHyperDS(PVM pVM);
-VMMDECL(RTSEL)      SELMGetHyperTSS(PVM pVM);
-VMMDECL(RTSEL)      SELMGetHyperTSSTrap08(PVM pVM);
-VMMDECL(RTRCPTR)    SELMGetHyperGDT(PVM pVM);
-VMMDECL(int)        SELMGetTSSInfo(PVM pVM, PVMCPU pVCpu, PRTGCUINTPTR pGCPtrTss, PRTGCUINTPTR pcbTss, bool *pfCanHaveIOBitmap);
-VMMDECL(RTGCPTR)    SELMToFlat(PVM pVM, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr);
-VMMDECL(RTGCPTR)    SELMToFlatBySel(PVM pVM, RTSEL Sel, RTGCPTR Addr);
-VMMDECL(void)       SELMShadowCR3Changed(PVM pVM, PVMCPU pVCpu);
+VMMDECL(RTSEL)          SELMGetTrap8Selector(PVM pVM);
+VMMDECL(void)           SELMSetTrap8EIP(PVM pVM, uint32_t u32EIP);
+VMMDECL(int)            SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp);
+VMMDECL(RTGCPTR)        SELMGetGuestTSS(PVM pVM);
+VMMDECL(RTSEL)          SELMGetHyperCS(PVM pVM);
+VMMDECL(RTSEL)          SELMGetHyperCS64(PVM pVM);
+VMMDECL(RTSEL)          SELMGetHyperDS(PVM pVM);
+VMMDECL(RTSEL)          SELMGetHyperTSS(PVM pVM);
+VMMDECL(RTSEL)          SELMGetHyperTSSTrap08(PVM pVM);
+VMMDECL(RTRCPTR)        SELMGetHyperGDT(PVM pVM);
+VMMDECL(int)            SELMGetTSSInfo(PVM pVM, PVMCPU pVCpu, PRTGCUINTPTR pGCPtrTss, PRTGCUINTPTR pcbTss, bool *pfCanHaveIOBitmap);
+VMMDECL(RTGCPTR)        SELMToFlat(PVM pVM, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr);
+VMMDECL(RTGCPTR)        SELMToFlatBySel(PVM pVM, RTSEL Sel, RTGCPTR Addr);
+VMMDECL(void)           SELMShadowCR3Changed(PVM pVM, PVMCPU pVCpu);
 
 /** Flags for SELMToFlatEx().
@@ -74,13 +74,13 @@
 /** @} */
 
-VMMDECL(int)        SELMToFlatEx(PVMCPU pVCpu, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr, uint32_t fFlags,
-                                 PRTGCPTR ppvGC);
-VMMDECL(int)        SELMToFlatBySelEx(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL Sel, RTGCPTR Addr, uint32_t fFlags,
-                                      PRTGCPTR ppvGC, uint32_t *pcb);
-VMMDECL(int)        SELMValidateAndConvertCSAddr(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL SelCPL, RTSEL SelCS, PCPUMSELREG pSRegCS,
-                                                 RTGCPTR Addr, PRTGCPTR ppvFlat);
-VMMDECL(int)        SELMGetLDTFromSel(PVM pVM, RTSEL SelLdt, PRTGCPTR ppvLdt, unsigned *pcbLimit);
+VMMDECL(int)            SELMToFlatEx(PVMCPU pVCpu, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr, uint32_t fFlags,
+                                     PRTGCPTR ppvGC);
+VMMDECL(int)            SELMToFlatBySelEx(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL Sel, RTGCPTR Addr, uint32_t fFlags,
+                                          PRTGCPTR ppvGC, uint32_t *pcb);
+VMMDECL(int)            SELMValidateAndConvertCSAddr(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL SelCPL, RTSEL SelCS,
+                                                     PCPUMSELREG pSRegCS, RTGCPTR Addr, PRTGCPTR ppvFlat);
+VMMDECL(int)            SELMGetLDTFromSel(PVM pVM, RTSEL SelLdt, PRTGCPTR ppvLdt, unsigned *pcbLimit);
 #ifdef VBOX_WITH_RAW_MODE
-VMM_INT_DECL(void)  SELMLoadHiddenSelectorReg(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg);
+VMM_INT_DECL(void)      SELMLoadHiddenSelectorReg(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg);
 #endif
 
@@ -91,21 +91,21 @@
  * @{
  */
-VMMR3DECL(int)      SELMR3Init(PVM pVM);
-VMMR3DECL(int)      SELMR3InitFinalize(PVM pVM);
-VMMR3DECL(void)     SELMR3Relocate(PVM pVM);
-VMMR3DECL(int)      SELMR3Term(PVM pVM);
-VMMR3DECL(void)     SELMR3Reset(PVM pVM);
-VMMR3DECL(int)      SELMR3UpdateFromCPUM(PVM pVM, PVMCPU pVCpu);
-VMMR3DECL(int)      SELMR3SyncTSS(PVM pVM, PVMCPU pVCpu);
-VMMR3DECL(int)      SELMR3GetSelectorInfo(PVM pVM, PVMCPU pVCpu, RTSEL Sel, PDBGFSELINFO pSelInfo);
-VMMR3DECL(int)      SELMR3GetShadowSelectorInfo(PVM pVM, RTSEL Sel, PDBGFSELINFO pSelInfo);
-VMMR3DECL(void)     SELMR3DisableMonitoring(PVM pVM);
-VMMR3DECL(void)     SELMR3DumpDescriptor(X86DESC  Desc, RTSEL Sel, const char *pszMsg);
-VMMR3DECL(void)     SELMR3DumpHyperGDT(PVM pVM);
-VMMR3DECL(void)     SELMR3DumpHyperLDT(PVM pVM);
-VMMR3DECL(void)     SELMR3DumpGuestGDT(PVM pVM);
-VMMR3DECL(void)     SELMR3DumpGuestLDT(PVM pVM);
-VMMR3DECL(bool)     SELMR3CheckTSS(PVM pVM);
-VMMR3DECL(int)      SELMR3DebugCheck(PVM pVM);
+VMMR3DECL(int)          SELMR3Init(PVM pVM);
+VMMR3DECL(int)          SELMR3InitFinalize(PVM pVM);
+VMMR3DECL(void)         SELMR3Relocate(PVM pVM);
+VMMR3DECL(int)          SELMR3Term(PVM pVM);
+VMMR3DECL(void)         SELMR3Reset(PVM pVM);
+VMMR3DECL(VBOXSTRICTRC) SELMR3UpdateFromCPUM(PVM pVM, PVMCPU pVCpu);
+VMMR3DECL(int)          SELMR3SyncTSS(PVM pVM, PVMCPU pVCpu);
+VMMR3DECL(int)          SELMR3GetSelectorInfo(PVM pVM, PVMCPU pVCpu, RTSEL Sel, PDBGFSELINFO pSelInfo);
+VMMR3DECL(int)          SELMR3GetShadowSelectorInfo(PVM pVM, RTSEL Sel, PDBGFSELINFO pSelInfo);
+VMMR3DECL(void)         SELMR3DisableMonitoring(PVM pVM);
+VMMR3DECL(void)         SELMR3DumpDescriptor(X86DESC  Desc, RTSEL Sel, const char *pszMsg);
+VMMR3DECL(void)         SELMR3DumpHyperGDT(PVM pVM);
+VMMR3DECL(void)         SELMR3DumpHyperLDT(PVM pVM);
+VMMR3DECL(void)         SELMR3DumpGuestGDT(PVM pVM);
+VMMR3DECL(void)         SELMR3DumpGuestLDT(PVM pVM);
+VMMR3DECL(bool)         SELMR3CheckTSS(PVM pVM);
+VMMR3DECL(int)          SELMR3DebugCheck(PVM pVM);
 /** @def SELMR3_DEBUG_CHECK
  * Invokes SELMR3DebugCheck in stricts builds. */
Index: /trunk/include/VBox/vmm/vm.h
===================================================================
--- /trunk/include/VBox/vmm/vm.h	(revision 42406)
+++ /trunk/include/VBox/vmm/vm.h	(revision 42407)
@@ -496,5 +496,7 @@
  * @param   fFlag   The flag to check.
  */
-#define VM_FF_ISSET(pVM, fFlag)             (((pVM)->fGlobalForcedActions & (fFlag)) == (fFlag))
+#define VM_FF_IS_SET(pVM, fFlag)            (((pVM)->fGlobalForcedActions & (fFlag)) == (fFlag))
+/** @deprecated  */
+#define VM_FF_ISSET(pVM, fFlag)             VM_FF_IS_SET(pVM, fFlag)
 
 /** @def VMCPU_FF_ISSET
@@ -504,5 +506,7 @@
  * @param   fFlag   The flag to check.
  */
-#define VMCPU_FF_ISSET(pVCpu, fFlag)        (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag))
+#define VMCPU_FF_IS_SET(pVCpu, fFlag)       (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag))
+/** @deprecated  */
+#define VMCPU_FF_ISSET(pVCpu, fFlag)        VMCPU_FF_IS_SET(pVCpu, fFlag)
 
 /** @def VM_FF_ISPENDING
@@ -512,5 +516,7 @@
  * @param   fFlags  The flags to check for.
  */
-#define VM_FF_ISPENDING(pVM, fFlags)        ((pVM)->fGlobalForcedActions & (fFlags))
+#define VM_FF_IS_PENDING(pVM, fFlags)       ((pVM)->fGlobalForcedActions & (fFlags))
+/** @deprecated  */
+#define VM_FF_ISPENDING(pVM, fFlags)        VM_FF_IS_PENDING(pVM, fFlags)
 
 /** @def VM_FF_TESTANDCLEAR
@@ -522,5 +528,7 @@
  * @param   iBit    Bit position to check and clear
  */
-#define VM_FF_TESTANDCLEAR(pVM, iBit)        (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, iBit##_BIT))
+#define VM_FF_TEST_AND_CLEAR(pVM, iBit)     (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, iBit##_BIT))
+/** @deprecated  */
+#define VM_FF_TESTANDCLEAR(pVM, iBit)       (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, iBit##_BIT))
 
 /** @def VMCPU_FF_TESTANDCLEAR
@@ -532,5 +540,7 @@
  * @param   iBit    Bit position to check and clear
  */
-#define VMCPU_FF_TESTANDCLEAR(pVCpu, iBit)    (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, iBit##_BIT))
+#define VMCPU_FF_TEST_AND_CLEAR(pVCpu, iBit) (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, iBit##_BIT))
+/** @deprecated  */
+#define VMCPU_FF_TESTANDCLEAR(pVCpu, iBit)  (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, iBit##_BIT))
 
 /** @def VMCPU_FF_ISPENDING
@@ -540,5 +550,7 @@
  * @param   fFlags  The flags to check for.
  */
-#define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags))
+#define VMCPU_FF_IS_PENDING(pVCpu, fFlags)  ((pVCpu)->fLocalForcedActions & (fFlags))
+/** @deprecated  */
+#define VMCPU_FF_ISPENDING(pVCpu, fFlags)   VMCPU_FF_IS_PENDING(pVCpu, fFlags)
 
 /** @def VM_FF_ISPENDING
@@ -942,5 +954,5 @@
         struct SELM s;
 #endif
-        uint8_t     padding[576];       /* multiple of 64 */
+        uint8_t     padding[768];       /* multiple of 64 */
     } selm;
 
@@ -1066,5 +1078,5 @@
 
     /** Padding for aligning the cpu array on a page boundary. */
-    uint8_t         abAlignment2[734];
+    uint8_t         abAlignment2[542];
 
     /* ---- end small stuff ---- */
Index: /trunk/include/iprt/x86.h
===================================================================
--- /trunk/include/iprt/x86.h	(revision 42406)
+++ /trunk/include/iprt/x86.h	(revision 42407)
@@ -2385,21 +2385,32 @@
  * Return the base address of a descriptor.
  */
-#define X86DESC_BASE(desc) /*ASM-NOINC*/ \
-        (  ((uint32_t)((desc).Gen.u8BaseHigh2) << 24) \
-         | (           (desc).Gen.u8BaseHigh1  << 16) \
-         | (           (desc).Gen.u16BaseLow        ) )
+#define X86DESC_BASE(a_pDesc) /*ASM-NOINC*/ \
+        (  ((uint32_t)((a_pDesc)->Gen.u8BaseHigh2) << 24) \
+         | (           (a_pDesc)->Gen.u8BaseHigh1  << 16) \
+         | (           (a_pDesc)->Gen.u16BaseLow        ) )
 
 /** @def X86DESC_LIMIT
  * Return the limit of a descriptor.
  */
-#define X86DESC_LIMIT(desc) /*ASM-NOINC*/ \
-        (  ((uint32_t)((desc).Gen.u4LimitHigh) << 16) \
-         | (           (desc).Gen.u16LimitLow       ) )
+#define X86DESC_LIMIT(a_pDesc) /*ASM-NOINC*/ \
+        (  ((uint32_t)((a_pDesc)->Gen.u4LimitHigh) << 16) \
+         | (           (a_pDesc)->Gen.u16LimitLow       ) )
+
+/** @def X86DESC_LIMIT_G
+ * Return the limit of a descriptor with the granularity bit taken into account.
+ * @returns Selector limit (uint32_t).
+ * @param   a_pDesc     Pointer to the descriptor.
+ */
+#define X86DESC_LIMIT_G(a_pDesc) /*ASM-NOINC*/ \
+        ( (a_pDesc)->Gen.u1Granularity \
+         ? ( ( ((uint32_t)(a_pDesc)->Gen.u4LimitHigh << 16) | (a_pDesc)->Gen.u16LimitLow ) << 12 ) | UINT32_C(0xfff) \
+         :     ((uint32_t)(a_pDesc)->Gen.u4LimitHigh << 16) | (a_pDesc)->Gen.u16LimitLow \
+        )
 
 /** @def X86DESC_GET_HID_ATTR
  * Get the descriptor attributes for the hidden register.
  */
-#define X86DESC_GET_HID_ATTR(desc) /*ASM-NOINC*/ \
-        ( (desc.u >> (16+16+8)) & UINT32_C(0xf0ff) ) /** @todo do we have a define for 0xf0ff? */
+#define X86DESC_GET_HID_ATTR(a_pDesc) /*ASM-NOINC*/ \
+        ( ((a_pDesc)->u >> (16+16+8)) & UINT32_C(0xf0ff) ) /** @todo do we have a define for 0xf0ff? */
 
 #ifndef VBOX_FOR_DTRACE_LIB
@@ -2578,9 +2589,9 @@
  * Return the base of a 64-bit descriptor.
  */
-#define X86DESC64_BASE(desc) /*ASM-NOINC*/ \
-        (  ((uint64_t)((desc).Gen.u32BaseHigh3) << 32) \
-         | ((uint32_t)((desc).Gen.u8BaseHigh2)  << 24) \
-         | (           (desc).Gen.u8BaseHigh1   << 16) \
-         | (           (desc).Gen.u16BaseLow         ) )
+#define X86DESC64_BASE(a_pDesc) /*ASM-NOINC*/ \
+        (  ((uint64_t)((a_pDesc)->Gen.u32BaseHigh3) << 32) \
+         | ((uint32_t)((a_pDesc)->Gen.u8BaseHigh2)  << 24) \
+         | (           (a_pDesc)->Gen.u8BaseHigh1   << 16) \
+         | (           (a_pDesc)->Gen.u16BaseLow         ) )
 
 
@@ -2947,7 +2958,12 @@
 
 /**
- * The mask used to mask off the table indicator and CPL of an selector.
+ * The mask used to mask off the table indicator and RPL of an selector.
  */
 #define X86_SEL_MASK        0xfff8U
+
+/**
+ * The mask used to mask off the RPL of an selector.
+ */
+#define X86_SEL_MASK_RPL    0xfffcU
 
 /**
Index: /trunk/src/VBox/Debugger/DBGCEmulateCodeView.cpp
===================================================================
--- /trunk/src/VBox/Debugger/DBGCEmulateCodeView.cpp	(revision 42406)
+++ /trunk/src/VBox/Debugger/DBGCEmulateCodeView.cpp	(revision 42407)
@@ -1758,8 +1758,6 @@
         const char *pszGranularity = pDesc->Gen.u1Granularity ? "G" : " ";
         const char *pszBig = pDesc->Gen.u1DefBig ? "BIG" : "   ";
-        uint32_t u32Base = X86DESC_BASE(*pDesc);
-        uint32_t cbLimit = X86DESC_LIMIT(*pDesc);
-        if (pDesc->Gen.u1Granularity)
-            cbLimit <<= PAGE_SHIFT;
+        uint32_t u32Base = X86DESC_BASE(pDesc);
+        uint32_t cbLimit = X86DESC_LIMIT_G(pDesc);
 
         rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, "%04x %s Bas=%08x Lim=%08x DPL=%d %s %s %s %s AVL=%d L=%d%s\n",
@@ -1815,6 +1813,6 @@
                 const char *pszLong        = pDesc->Gen.u1Long ? "LONG" : "   ";
 
-                uint64_t u32Base = X86DESC64_BASE(*pDesc);
-                uint32_t cbLimit = X86DESC_LIMIT(*pDesc);
+                uint64_t u32Base = X86DESC64_BASE(pDesc);
+                uint32_t cbLimit = X86DESC_LIMIT_G(pDesc);
 
                 rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, "%04x %s Bas=%016RX64 Lim=%08x DPL=%d %s %s %s %sAVL=%d R=%d%s\n",
Index: /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 42406)
+++ /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 42407)
@@ -65,13 +65,13 @@
  */
 #if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
-# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg, a_fIsCS) \
+# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
     do \
     { \
-        if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pSReg)) \
-            cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg, a_fIsCS); \
+        if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
+            cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
     } while (0)
 #else
-# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg, a_fIsCS) \
-    Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pSReg));
+# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
+    Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
 #endif
 
@@ -85,10 +85,10 @@
  * @param   pVCpu       The current Virtual CPU.
  * @param   pSReg       The selector register to lazily load hidden parts of.
- * @param   fIsCS
- */
-static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg, bool fIsCS)
-{
-    Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSReg));
+ */
+static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
+{
+    Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
     Assert(!HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM)));
+    Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
 
     if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
@@ -96,7 +96,8 @@
         /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
         pSReg->Attr.u               = 0;
+        pSReg->Attr.n.u4Type        = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
         pSReg->Attr.n.u1DescType    = 1; /* code/data segment */
+        pSReg->Attr.n.u2Dpl         = 3;
         pSReg->Attr.n.u1Present     = 1;
-        pSReg->Attr.n.u4Type        = fIsCS ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
         pSReg->u32Limit             = 0x0000ffff;
         pSReg->u64Base              = (uint32_t)pSReg->Sel << 4;
@@ -140,6 +141,6 @@
 VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
 {
-    CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs, true);
-    CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss, false);
+    CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
+    CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
 }
 
@@ -152,5 +153,5 @@
 VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
 {
-    CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg, pSReg == &pVCpu->cpum.s.Guest.cs);
+    CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
 }
 
@@ -565,5 +566,8 @@
 VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
 {
-    pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
+    pVCpu->cpum.s.Guest.ldtr.Sel      = ldtr;
+    /* The caller will set more hidden bits if it has them. */
+    pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
+    pVCpu->cpum.s.Guest.ldtr.fFlags   = 0;
     pVCpu->cpum.s.fChanged  |= CPUM_CHANGED_LDTR;
     return VINF_SUCCESS;
@@ -2332,5 +2336,5 @@
     if (!CPUMIsGuestInLongMode(pVCpu))
         return false;
-    CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs, true);
+    CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
     return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
 }
@@ -2349,4 +2353,16 @@
 }
 
+#ifdef VBOX_WITH_RAW_MODE_NOT_R0
+/**
+ *
+ * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
+ *          really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
+ * @param   pVCpu       The current virtual CPU.
+ */
+VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
+{
+    return pVCpu->cpum.s.fRawEntered;
+}
+#endif
 
 #ifdef VBOX_WITH_RAW_MODE_NOT_R0
@@ -2561,5 +2577,5 @@
         if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
         {
-            if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(&pVCpu->cpum.s.Guest.ss))
+            if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
                 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
             else
@@ -2620,5 +2636,5 @@
     }
 
-    CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs, true);
+    CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
     if (   pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
         && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
@@ -2643,5 +2659,5 @@
     }
 
-    CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs, true);
+    CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
     if (   pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
         && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
Index: /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 42406)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 42407)
@@ -1536,10 +1536,4 @@
     }
 
-    if (   (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
-        || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
-    {
-        Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
-        return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
-    }
     if (    (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
         || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
@@ -1914,7 +1908,5 @@
                            ? Idte.Gate.u16OffsetLow
                            : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
-    uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy);
-    if (DescCS.Legacy.Gen.u1Granularity)
-        cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
+    uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
     if (uNewEip > cbLimitCS)
     {
@@ -1951,7 +1943,5 @@
 
         /* Check that there is sufficient space for the stack frame. */
-        uint32_t cbLimitSS = X86DESC_LIMIT(DescSS.Legacy);
-        if (DescSS.Legacy.Gen.u1Granularity)
-            cbLimitSS = (cbLimitSS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
+        uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
         AssertReturn(!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
 
@@ -1972,5 +1962,5 @@
         RTPTRUNION uStackFrame;
         rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
-                             uNewEsp - cbStackFrame + X86DESC_BASE(DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
+                             uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
         if (rcStrict != VINF_SUCCESS)
             return rcStrict;
@@ -2016,6 +2006,6 @@
         pCtx->ss.fFlags         = CPUMSELREG_FLAGS_VALID;
         pCtx->ss.u32Limit       = cbLimitSS;
-        pCtx->ss.u64Base        = X86DESC_BASE(DescSS.Legacy);
-        pCtx->ss.Attr.u         = X86DESC_GET_HID_ATTR(DescSS.Legacy);
+        pCtx->ss.u64Base        = X86DESC_BASE(&DescSS.Legacy);
+        pCtx->ss.Attr.u         = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
         pCtx->rsp               = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
         pIemCpu->uCpl           = uNewCpl;
@@ -2064,6 +2054,6 @@
     pCtx->cs.fFlags         = CPUMSELREG_FLAGS_VALID;
     pCtx->cs.u32Limit       = cbLimitCS;
-    pCtx->cs.u64Base        = X86DESC_BASE(DescCS.Legacy);
-    pCtx->cs.Attr.u         = X86DESC_GET_HID_ATTR(DescCS.Legacy);
+    pCtx->cs.u64Base        = X86DESC_BASE(&DescCS.Legacy);
+    pCtx->cs.Attr.u         = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
 
     pCtx->rip               = uNewEip;
@@ -2682,8 +2672,8 @@
     }
 #ifdef VBOX_WITH_RAW_MODE_NOT_R0
-    if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSReg))
+    if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
         CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
 #else
-    Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSReg));
+    Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
 #endif
     return pSReg;
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 42406)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 42407)
@@ -986,7 +986,5 @@
        here, but that is ruled out by offSeg being 32-bit, right?) */
     uint64_t u64Base;
-    uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
-    if (Desc.Legacy.Gen.u1Granularity)
-        cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
+    uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
     if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
         u64Base = 0;
@@ -998,5 +996,5 @@
             return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
         }
-        u64Base = X86DESC_BASE(Desc.Legacy);
+        u64Base = X86DESC_BASE(&Desc.Legacy);
     }
 
@@ -1021,5 +1019,5 @@
     pCtx->cs.ValidSel    = pCtx->cs.Sel;
     pCtx->cs.fFlags      = CPUMSELREG_FLAGS_VALID;
-    pCtx->cs.Attr.u      = X86DESC_GET_HID_ATTR(Desc.Legacy);
+    pCtx->cs.Attr.u      = X86DESC_GET_HID_ATTR(&Desc.Legacy);
     pCtx->cs.u32Limit    = cbLimit;
     pCtx->cs.u64Base     = u64Base;
@@ -1180,8 +1178,5 @@
     /* Limit / canonical check. */
     uint64_t u64Base;
-    uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
-    if (Desc.Legacy.Gen.u1Granularity)
-        cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
-
+    uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
     if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
     {
@@ -1200,5 +1195,5 @@
             return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
         }
-        u64Base = X86DESC_BASE(Desc.Legacy);
+        u64Base = X86DESC_BASE(&Desc.Legacy);
     }
 
@@ -1245,5 +1240,5 @@
     pCtx->cs.ValidSel    = pCtx->cs.Sel;
     pCtx->cs.fFlags      = CPUMSELREG_FLAGS_VALID;
-    pCtx->cs.Attr.u      = X86DESC_GET_HID_ATTR(Desc.Legacy);
+    pCtx->cs.Attr.u      = X86DESC_GET_HID_ATTR(&Desc.Legacy);
     pCtx->cs.u32Limit    = cbLimit;
     pCtx->cs.u64Base     = u64Base;
@@ -1485,14 +1480,9 @@
 
         /* Calc SS limit.*/
-        uint32_t cbLimitSs = X86DESC_LIMIT(DescSs.Legacy);
-        if (DescSs.Legacy.Gen.u1Granularity)
-            cbLimitSs = (cbLimitSs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
-
+        uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
 
         /* Is RIP canonical or within CS.limit? */
         uint64_t u64Base;
-        uint32_t cbLimitCs = X86DESC_LIMIT(DescCs.Legacy);
-        if (DescCs.Legacy.Gen.u1Granularity)
-            cbLimitCs = (cbLimitCs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
+        uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
 
         if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
@@ -1513,5 +1503,5 @@
                 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
             }
-            u64Base = X86DESC_BASE(DescCs.Legacy);
+            u64Base = X86DESC_BASE(&DescCs.Legacy);
         }
 
@@ -1553,5 +1543,5 @@
         pCtx->cs.ValidSel       = uNewCs;
         pCtx->cs.fFlags         = CPUMSELREG_FLAGS_VALID;
-        pCtx->cs.Attr.u         = X86DESC_GET_HID_ATTR(DescCs.Legacy);
+        pCtx->cs.Attr.u         = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
         pCtx->cs.u32Limit       = cbLimitCs;
         pCtx->cs.u64Base        = u64Base;
@@ -1560,10 +1550,10 @@
         pCtx->ss.ValidSel       = uNewOuterSs;
         pCtx->ss.fFlags         = CPUMSELREG_FLAGS_VALID;
-        pCtx->ss.Attr.u         = X86DESC_GET_HID_ATTR(DescSs.Legacy);
+        pCtx->ss.Attr.u         = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
         pCtx->ss.u32Limit       = cbLimitSs;
         if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
             pCtx->ss.u64Base    = 0;
         else
-            pCtx->ss.u64Base    = X86DESC_BASE(DescSs.Legacy);
+            pCtx->ss.u64Base    = X86DESC_BASE(&DescSs.Legacy);
 
         pIemCpu->uCpl           = (uNewCs & X86_SEL_RPL);
@@ -1588,7 +1578,5 @@
         /* Limit / canonical check. */
         uint64_t u64Base;
-        uint32_t cbLimitCs = X86DESC_LIMIT(DescCs.Legacy);
-        if (DescCs.Legacy.Gen.u1Granularity)
-            cbLimitCs = (cbLimitCs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
+        uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
 
         if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
@@ -1608,5 +1596,5 @@
                 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
             }
-            u64Base = X86DESC_BASE(DescCs.Legacy);
+            u64Base = X86DESC_BASE(&DescCs.Legacy);
         }
 
@@ -1638,5 +1626,5 @@
         pCtx->cs.ValidSel   = uNewCs;
         pCtx->cs.fFlags     = CPUMSELREG_FLAGS_VALID;
-        pCtx->cs.Attr.u     = X86DESC_GET_HID_ATTR(DescCs.Legacy);
+        pCtx->cs.Attr.u     = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
         pCtx->cs.u32Limit   = cbLimitCs;
         pCtx->cs.u64Base    = u64Base;
@@ -2019,7 +2007,5 @@
             }
 
-            uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy);
-            if (DescCS.Legacy.Gen.u1Granularity)
-                cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
+            uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
 
             /*
@@ -2100,7 +2086,5 @@
                 }
 
-                uint32_t cbLimitSs = X86DESC_LIMIT(DescSS.Legacy);
-                if (DescSS.Legacy.Gen.u1Granularity)
-                    cbLimitSs = (cbLimitSs << PAGE_SHIFT) | PAGE_OFFSET_MASK;
+                uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
 
                 /* Check EIP. */
@@ -2135,14 +2119,14 @@
                 pCtx->cs.ValidSel   = uNewCs;
                 pCtx->cs.fFlags     = CPUMSELREG_FLAGS_VALID;
-                pCtx->cs.Attr.u     = X86DESC_GET_HID_ATTR(DescCS.Legacy);
+                pCtx->cs.Attr.u     = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
                 pCtx->cs.u32Limit   = cbLimitCS;
-                pCtx->cs.u64Base    = X86DESC_BASE(DescCS.Legacy);
+                pCtx->cs.u64Base    = X86DESC_BASE(&DescCS.Legacy);
                 pCtx->rsp           = uNewESP;
                 pCtx->ss.Sel        = uNewSS;
                 pCtx->ss.ValidSel   = uNewSS;
                 pCtx->ss.fFlags     = CPUMSELREG_FLAGS_VALID;
-                pCtx->ss.Attr.u     = X86DESC_GET_HID_ATTR(DescSS.Legacy);
+                pCtx->ss.Attr.u     = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
                 pCtx->ss.u32Limit   = cbLimitSs;
-                pCtx->ss.u64Base    = X86DESC_BASE(DescSS.Legacy);
+                pCtx->ss.u64Base    = X86DESC_BASE(&DescSS.Legacy);
 
                 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF  | X86_EFL_SF
@@ -2193,7 +2177,7 @@
                 pCtx->cs.ValidSel   = uNewCs;
                 pCtx->cs.fFlags     = CPUMSELREG_FLAGS_VALID;
-                pCtx->cs.Attr.u     = X86DESC_GET_HID_ATTR(DescCS.Legacy);
+                pCtx->cs.Attr.u     = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
                 pCtx->cs.u32Limit   = cbLimitCS;
-                pCtx->cs.u64Base    = X86DESC_BASE(DescCS.Legacy);
+                pCtx->cs.u64Base    = X86DESC_BASE(&DescCS.Legacy);
                 pCtx->rsp           = uNewRsp;
 
@@ -2357,10 +2341,4 @@
     if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
     {
-        if (   (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
-            || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
-        {
-            Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
-            return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
-        }
         if (    (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
             || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
@@ -2423,14 +2401,11 @@
 
     /* The base and limit. */
+    uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
     uint64_t u64Base;
-    uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);
-    if (Desc.Legacy.Gen.u1Granularity)
-        cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
-
     if (   pIemCpu->enmCpuMode == IEMMODE_64BIT
         && iSegReg < X86_SREG_FS)
         u64Base = 0;
     else
-        u64Base = X86DESC_BASE(Desc.Legacy);
+        u64Base = X86DESC_BASE(&Desc.Legacy);
 
     /*
@@ -2448,5 +2423,5 @@
     /* commit */
     *pSel = uSel;
-    pHid->Attr.u   = X86DESC_GET_HID_ATTR(Desc.Legacy);
+    pHid->Attr.u   = X86DESC_GET_HID_ATTR(&Desc.Legacy);
     pHid->u32Limit = cbLimit;
     pHid->u64Base  = u64Base;
@@ -2723,5 +2698,5 @@
     uint64_t u64Base;
     if (!IEM_IS_LONG_MODE(pIemCpu))
-        u64Base = X86DESC_BASE(Desc.Legacy);
+        u64Base = X86DESC_BASE(&Desc.Legacy);
     else
     {
@@ -2732,5 +2707,5 @@
         }
 
-        u64Base = X86DESC64_BASE(Desc.Long);
+        u64Base = X86DESC64_BASE(&Desc.Long);
         if (!IEM_IS_CANONICAL(u64Base))
         {
@@ -2757,6 +2732,6 @@
     pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK;
     pCtx->ldtr.fFlags   = CPUMSELREG_FLAGS_VALID;
-    pCtx->ldtr.Attr.u   = X86DESC_GET_HID_ATTR(Desc.Legacy);
-    pCtx->ldtr.u32Limit = X86DESC_LIMIT(Desc.Legacy);
+    pCtx->ldtr.Attr.u   = X86DESC_GET_HID_ATTR(&Desc.Legacy);
+    pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
     pCtx->ldtr.u64Base  = u64Base;
 
@@ -2822,5 +2797,5 @@
     uint64_t u64Base;
     if (!IEM_IS_LONG_MODE(pIemCpu))
-        u64Base = X86DESC_BASE(Desc.Legacy);
+        u64Base = X86DESC_BASE(&Desc.Legacy);
     else
     {
@@ -2831,5 +2806,5 @@
         }
 
-        u64Base = X86DESC64_BASE(Desc.Long);
+        u64Base = X86DESC64_BASE(&Desc.Long);
         if (!IEM_IS_CANONICAL(u64Base))
         {
@@ -2878,6 +2853,6 @@
     pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK;
     pCtx->tr.fFlags   = CPUMSELREG_FLAGS_VALID;
-    pCtx->tr.Attr.u   = X86DESC_GET_HID_ATTR(Desc.Legacy);
-    pCtx->tr.u32Limit = X86DESC_LIMIT(Desc.Legacy);
+    pCtx->tr.Attr.u   = X86DESC_GET_HID_ATTR(&Desc.Legacy);
+    pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
     pCtx->tr.u64Base  = u64Base;
 
Index: /trunk/src/VBox/VMM/VMMAll/SELMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/SELMAll.cpp	(revision 42406)
+++ /trunk/src/VBox/VMM/VMMAll/SELMAll.cpp	(revision 42407)
@@ -31,7 +31,15 @@
 #include <VBox/param.h>
 #include <iprt/assert.h>
-#include <VBox/log.h>
 #include <VBox/vmm/vmm.h>
 #include <iprt/x86.h>
+
+
+/*******************************************************************************
+*   Global Variables                                                           *
+*******************************************************************************/
+#if defined(LOG_ENABLED) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
+/** Segment register names. */
+static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" };
+#endif
 
 
@@ -65,5 +73,5 @@
     }
 
-    return (RTGCPTR)(((RTGCUINTPTR)Addr + X86DESC_BASE(Desc)) & 0xffffffff);
+    return (RTGCPTR)(((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc)) & 0xffffffff);
 }
 #endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
@@ -105,11 +113,11 @@
 #ifdef VBOX_WITH_RAW_MODE_NOT_R0
     /** @todo when we're in 16 bits mode, we should cut off the address as well?? */
-    if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSReg))
+    if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
         CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
-    if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(&pCtxCore->cs))
+    if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))
         CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
 #else
-    Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSReg));
-    Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(&pCtxCore->cs));
+    Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
+    Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs));
 #endif
 
@@ -168,5 +176,5 @@
         if (ppvGC)
         {
-            if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSReg))
+            if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
                 *ppvGC = pSReg->u64Base + uFlat;
             else
@@ -178,11 +186,11 @@
 
 #ifdef VBOX_WITH_RAW_MODE_NOT_R0
-    if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSReg))
+    if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
         CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
-    if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(&pCtxCore->cs))
+    if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))
         CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
 #else
-    Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSReg));
-    Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(&pCtxCore->cs));
+    Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
+    Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs));
 #endif
 
@@ -347,10 +355,8 @@
 
     /* calc limit. */
-    uint32_t u32Limit = X86DESC_LIMIT(Desc);
-    if (Desc.Gen.u1Granularity)
-        u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
+    uint32_t u32Limit = X86DESC_LIMIT_G(&Desc);
 
     /* calc address assuming straight stuff. */
-    RTGCPTR pvFlat = Addr + X86DESC_BASE(Desc);
+    RTGCPTR pvFlat = Addr + X86DESC_BASE(&Desc);
 
     /* Cut the address to 32 bits. */
@@ -475,4 +481,37 @@
 
 #ifdef VBOX_WITH_RAW_MODE_NOT_R0
+
+static void selLoadHiddenSelectorRegFromGuestTable(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg,
+                                                   RTGCPTR GCPtrDesc, RTSEL const Sel, uint32_t const iSReg)
+{
+    /*
+     * Try read the entry.
+     */
+    X86DESC GstDesc;
+    int rc = PGMPhysReadGCPtr(pVCpu, &GstDesc, GCPtrDesc, sizeof(GstDesc));
+    if (RT_FAILURE(rc))
+    {
+        Log(("SELMLoadHiddenSelectorReg: Error reading descriptor %s=%#x: %Rrc\n", g_aszSRegNms[iSReg], Sel, rc));
+        STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelReadErrors);
+        return;
+    }
+
+    /*
+     * Validate it and load it.
+     */
+    if (!selmIsGstDescGoodForSReg(pVCpu, pSReg, &GstDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
+    {
+        Log(("SELMLoadHiddenSelectorReg: Guest table entry is no good (%s=%#x): %.8Rhxs\n", g_aszSRegNms[iSReg], Sel, &GstDesc));
+        STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGstNoGood);
+        return;
+    }
+
+    selmLoadHiddenSRegFromGuestDesc(pVCpu, pSReg, &GstDesc);
+    Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (gst)\n",
+         g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel));
+    STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGst);
+}
+
+
 /**
  * CPUM helper that loads the hidden selector register from the descriptor table
@@ -500,71 +539,51 @@
     Assert(pVM->cCpus == 1);
 
-    RTSEL const Sel = pSReg->Sel;
-
-/** @todo Consider loading these from the shadow tables when possible? */
-    /*
-     * Calculate descriptor table entry address.
-     */
-    RTGCPTR GCPtrDesc;
+
+    /*
+     * Get the shadow descriptor table entry and validate it.
+     * Should something go amiss, try the guest table.
+     */
+    RTSEL const     Sel   = pSReg->Sel;
+    uint32_t const  iSReg = pSReg - CPUMCTX_FIRST_SREG(pCtx); Assert(iSReg < X86_SREG_COUNT);
+    PCX86DESC       pShwDesc;
     if (!(Sel & X86_SEL_LDT))
     {
-        if ((Sel & X86_SEL_MASK) >= pCtx->gdtr.cbGdt)
+        /** @todo this shall not happen, we shall check for these things when executing
+         *        LGDT */
+        AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->gdtr.cbGdt);
+
+        pShwDesc = &pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
+        if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT)
+            || !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
         {
-            AssertFailed(); /** @todo count these. */
+            selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->gdtr.pGdt + (Sel & X86_SEL_MASK), Sel, iSReg);
             return;
         }
-        GCPtrDesc = pCtx->gdtr.pGdt + (Sel & X86_SEL_MASK);
-        /** @todo Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT]; for cases
-         *        where we don't change it too much. */
     }
     else
     {
-        if ((Sel & X86_SEL_MASK) >= pCtx->ldtr.u32Limit)
+        /** @todo this shall not happen, we shall check for these things when executing
+         *        LLDT */
+        AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->ldtr.u32Limit);
+
+        pShwDesc = (PCX86DESC)((uintptr_t)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper + (Sel & X86_SEL_MASK));
+        if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT)
+            || !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
         {
-            AssertFailed(); /** @todo count these. */
+            selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->ldtr.u64Base + (Sel & X86_SEL_MASK), Sel, iSReg);
             return;
         }
-        GCPtrDesc = pCtx->ldtr.u64Base + (Sel & X86_SEL_MASK);
-    }
-
-    /*
-     * Try read the entry.
-     */
-    X86DESC Desc;
-    int rc = PGMPhysReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(Desc));
-    if (RT_FAILURE(rc))
-    {
-        //RT_ZERO(Desc);
-        //if (!(Sel & X86_SEL_LDT))
-        //    Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
-        //if (!Desc.Gen.u1Present)
-        {
-            AssertFailed(); /** @todo count these. */
-            return;
-        }
-    }
-
-    /*
-     * Digest it and store the result.
-     */
-    if (   !Desc.Gen.u1Present
-        || !Desc.Gen.u1DescType)
-    {
-        AssertFailed(); /** @todo count these. */
-        return;
-    }
-
-    uint32_t u32Limit = X86DESC_LIMIT(Desc);
-    if (Desc.Gen.u1Granularity)
-        u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
-    pSReg->u32Limit = u32Limit;
-
-    pSReg->u64Base  = X86DESC_BASE(Desc);
-    pSReg->Attr.u   = X86DESC_GET_HID_ATTR(Desc);
-    pSReg->fFlags   = CPUMSELREG_FLAGS_VALID;
-    pSReg->ValidSel = Sel;
-}
-#endif /* VBOX_WITH_RAW_MODE */
-
+    }
+
+    /*
+     * All fine, load it.
+     */
+    selmLoadHiddenSRegFromShadowDesc(pSReg, pShwDesc);
+    STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelShw);
+    Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (shw)\n",
+         g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel));
+}
+
+#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
 
 /**
@@ -583,5 +602,5 @@
 {
     RTGCUINTPTR uFlat = Addr & 0xffff;
-    if (!pSReg || !CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSReg))
+    if (!pSReg || !CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
         uFlat += (RTGCUINTPTR)SelCS << 4;
     else
@@ -646,10 +665,8 @@
                  * Limit check.
                  */
-                uint32_t    u32Limit = X86DESC_LIMIT(Desc);
-                if (Desc.Gen.u1Granularity)
-                    u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
+                uint32_t    u32Limit = X86DESC_LIMIT_G(&Desc);
                 if ((RTGCUINTPTR)Addr <= u32Limit)
                 {
-                    *ppvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(Desc));
+                    *ppvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc));
                     /* Cut the address to 32 bits. */
                     *ppvFlat &= 0xffffffff;
@@ -765,5 +782,5 @@
         return selmValidateAndConvertCSAddrRawMode(pVCpu->CTX_SUFF(pVM), pVCpu, SelCPL, SelCS, Addr, ppvFlat, NULL);
 
-    if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSRegCS))
+    if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS))
         CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSRegCS);
 
@@ -775,5 +792,5 @@
         SelCS  &= ~X86_SEL_RPL;
 #else
-    Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSRegCS));
+    Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS));
     Assert(pSRegCS->Sel == SelCS);
 #endif
Index: /trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp	(revision 42406)
+++ /trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp	(revision 42407)
@@ -1861,15 +1861,13 @@
      * Limit and Base and format the output.
      */
-    uint32_t    u32Limit = X86DESC_LIMIT(*pDesc);
-    if (pDesc->Gen.u1Granularity)
-        u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
+    uint32_t    u32Limit = X86DESC_LIMIT_G(pDesc);
 
 # if HC_ARCH_BITS == 64
-    uint64_t    u32Base =  X86DESC64_BASE(*pDesc);
+    uint64_t    u32Base  = X86DESC64_BASE(pDesc);
 
     Log(("%s %04x - %RX64 %RX64 - base=%RX64 limit=%08x dpl=%d %s\n", pszMsg,
          Sel, pDesc->au64[0], pDesc->au64[1], u32Base, u32Limit, pDesc->Gen.u2Dpl, szMsg));
 # else
-    uint32_t    u32Base =  X86DESC_BASE(*pDesc);
+    uint32_t    u32Base  = X86DESC_BASE(pDesc);
 
     Log(("%s %04x - %08x %08x - base=%08x limit=%08x dpl=%d %s\n", pszMsg,
Index: /trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp	(revision 42406)
+++ /trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp	(revision 42407)
@@ -1247,5 +1247,5 @@
         if (VMX_IS_64BIT_HOST_MODE())
         {
-            uint64_t trBase64 = X86DESC64_BASE(*(PX86DESC64)pDesc);
+            uint64_t trBase64 = X86DESC64_BASE((PX86DESC64)pDesc);
             rc = VMXWriteVMCS64(VMX_VMCS_HOST_TR_BASE, trBase64);
             Log2(("VMX_VMCS_HOST_TR_BASE %RX64\n", trBase64));
@@ -1256,7 +1256,7 @@
         {
 #if HC_ARCH_BITS == 64
-            trBase = X86DESC64_BASE(*pDesc);
+            trBase = X86DESC64_BASE(pDesc);
 #else
-            trBase = X86DESC_BASE(*pDesc);
+            trBase = X86DESC_BASE(pDesc);
 #endif
             rc = VMXWriteVMCS(VMX_VMCS_HOST_TR_BASE, trBase);
Index: /trunk/src/VBox/VMM/VMMR3/EM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/EM.cpp	(revision 42406)
+++ /trunk/src/VBox/VMM/VMMR3/EM.cpp	(revision 42407)
@@ -1313,4 +1313,38 @@
     }
 
+    /*
+     * Stale hidden selectors means raw-mode is unsafe (being very careful).
+     */
+    if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
+    {
+        Log2(("raw mode refused: stale CS\n"));
+        return EMSTATE_REM;
+    }
+    if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
+    {
+        Log2(("raw mode refused: stale SS\n"));
+        return EMSTATE_REM;
+    }
+    if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
+    {
+        Log2(("raw mode refused: stale DS\n"));
+        return EMSTATE_REM;
+    }
+    if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
+    {
+        Log2(("raw mode refused: stale ES\n"));
+        return EMSTATE_REM;
+    }
+    if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
+    {
+        Log2(("raw mode refused: stale FS\n"));
+        return EMSTATE_REM;
+    }
+    if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
+    {
+        Log2(("raw mode refused: stale GS\n"));
+        return EMSTATE_REM;
+    }
+
     /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
     return EMSTATE_RAW;
Index: /trunk/src/VBox/VMM/VMMR3/EMRaw.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/EMRaw.cpp	(revision 42406)
+++ /trunk/src/VBox/VMM/VMMR3/EMRaw.cpp	(revision 42407)
@@ -927,5 +927,5 @@
             DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
 #endif
-            AssertMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", pCtx->eip));
+            AssertMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08x\n", pCtx->eip));
             return VERR_EM_RAW_PATCH_CONFLICT;
         }
@@ -1226,7 +1226,7 @@
     if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT))
     {
-        int rc = SELMR3UpdateFromCPUM(pVM, pVCpu);
-        if (RT_FAILURE(rc))
-            return rc;
+        VBOXSTRICTRC rcStrict = SELMR3UpdateFromCPUM(pVM, pVCpu);
+        if (rcStrict != VINF_SUCCESS)
+            return VBOXSTRICTRC_TODO(rcStrict);
     }
 
@@ -1428,12 +1428,12 @@
         PPATMGCSTATE pGCState = PATMR3QueryGCStateHC(pVM);
         if (pCtx->eflags.Bits.u1VM)
-            Log(("RV86: %04X:%08X IF=%d VMFlags=%x\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
+            Log(("RV86: %04x:%08x IF=%d VMFlags=%x\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
         else if ((pCtx->ss.Sel & X86_SEL_RPL) == 1)
         {
             bool fCSAMScanned = CSAMIsPageScanned(pVM, (RTGCPTR)pCtx->eip);
-            Log(("RR0: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss.Sel & X86_SEL_RPL), fCSAMScanned));
+            Log(("RR0: %08x ESP=%08x IF=%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss.Sel & X86_SEL_RPL), fCSAMScanned));
         }
         else if ((pCtx->ss.Sel & X86_SEL_RPL) == 3)
-            Log(("RR3: %08X ESP=%08X IF=%d VMFlags=%x\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
+            Log(("RR3: %08x ESP=%08x IF=%d VMFlags=%x\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
 #endif /* LOG_ENABLED */
 
@@ -1462,5 +1462,5 @@
         STAM_PROFILE_ADV_START(&pVCpu->em.s.StatRAWTail, d);
 
-        LogFlow(("RR0-E: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss.Sel & X86_SEL_RPL)));
+        LogFlow(("RR%u-E: %08x ESP=%08x IF=%d VMFlags=%x PIF=%d\n", (pCtx->ss.Sel & X86_SEL_RPL), pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF));
         LogFlow(("VMMR3RawRunGC returned %Rrc\n", rc));
 
Index: /trunk/src/VBox/VMM/VMMR3/SELM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/SELM.cpp	(revision 42406)
+++ /trunk/src/VBox/VMM/VMMR3/SELM.cpp	(revision 42407)
@@ -112,4 +112,12 @@
 
 
+/*******************************************************************************
+*   Global Variables                                                           *
+*******************************************************************************/
+#ifdef LOG_ENABLED
+/** Segment register names. */
+static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" };
+#endif
+
 
 /**
@@ -207,4 +215,32 @@
     STAM_REL_REG(pVM, &pVM->selm.s.StatHyperSelsChanged,       STAMTYPE_COUNTER, "/SELM/HyperSels/Changed",      STAMUNIT_OCCURENCES,     "The number of times we had to relocate our hypervisor selectors.");
     STAM_REL_REG(pVM, &pVM->selm.s.StatScanForHyperSels,       STAMTYPE_COUNTER, "/SELM/HyperSels/Scan",         STAMUNIT_OCCURENCES,     "The number of times we had find free hypervisor selectors.");
+
+    STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_ES], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleES", STAMUNIT_OCCURENCES, "Stale ES was detected in UpdateFromCPUM.");
+    STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_CS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleCS", STAMUNIT_OCCURENCES, "Stale CS was detected in UpdateFromCPUM.");
+    STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_SS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleSS", STAMUNIT_OCCURENCES, "Stale SS was detected in UpdateFromCPUM.");
+    STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_DS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleDS", STAMUNIT_OCCURENCES, "Stale DS was detected in UpdateFromCPUM.");
+    STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_FS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleFS", STAMUNIT_OCCURENCES, "Stale FS was detected in UpdateFromCPUM.");
+    STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_GS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleGS", STAMUNIT_OCCURENCES, "Stale GS was detected in UpdateFromCPUM.");
+
+    STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_ES],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleES", STAMUNIT_OCCURENCES, "Already stale ES in UpdateFromCPUM.");
+    STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_CS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleCS", STAMUNIT_OCCURENCES, "Already stale CS in UpdateFromCPUM.");
+    STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_SS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleSS", STAMUNIT_OCCURENCES, "Already stale SS in UpdateFromCPUM.");
+    STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_DS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleDS", STAMUNIT_OCCURENCES, "Already stale DS in UpdateFromCPUM.");
+    STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_FS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleFS", STAMUNIT_OCCURENCES, "Already stale FS in UpdateFromCPUM.");
+    STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_GS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleGS", STAMUNIT_OCCURENCES, "Already stale GS in UpdateFromCPUM.");
+
+    STAM_REL_REG(pVM, &pVM->selm.s.StatStaleToUnstaleSReg,              STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/StaleToUnstale", STAMUNIT_OCCURENCES, "Transitions from stale to unstale UpdateFromCPUM.");
+
+    STAM_REG(    pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_ES],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedES", STAMUNIT_OCCURENCES, "Updated hidden ES values in UpdateFromCPUM.");
+    STAM_REG(    pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_CS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedCS", STAMUNIT_OCCURENCES, "Updated hidden CS values in UpdateFromCPUM.");
+    STAM_REG(    pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_SS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedSS", STAMUNIT_OCCURENCES, "Updated hidden SS values in UpdateFromCPUM.");
+    STAM_REG(    pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_DS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedDS", STAMUNIT_OCCURENCES, "Updated hidden DS values in UpdateFromCPUM.");
+    STAM_REG(    pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_FS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedFS", STAMUNIT_OCCURENCES, "Updated hidden FS values in UpdateFromCPUM.");
+    STAM_REG(    pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_GS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedGS", STAMUNIT_OCCURENCES, "Updated hidden GS values in UpdateFromCPUM.");
+
+    STAM_REG(    pVM, &pVM->selm.s.StatLoadHidSelGst,              STAMTYPE_COUNTER, "/SELM/LoadHidSel/LoadedGuest",   STAMUNIT_OCCURENCES, "SELMLoadHiddenSelectorReg: Loaded from guest tables.");
+    STAM_REG(    pVM, &pVM->selm.s.StatLoadHidSelShw,              STAMTYPE_COUNTER, "/SELM/LoadHidSel/LoadedShadow",  STAMUNIT_OCCURENCES, "SELMLoadHiddenSelectorReg: Loaded from shadow tables.");
+    STAM_REL_REG(pVM, &pVM->selm.s.StatLoadHidSelReadErrors,       STAMTYPE_COUNTER, "/SELM/LoadHidSel/GstReadErrors", STAMUNIT_OCCURENCES, "SELMLoadHiddenSelectorReg: Guest table read errors.");
+    STAM_REL_REG(pVM, &pVM->selm.s.StatLoadHidSelGstNoGood,        STAMTYPE_COUNTER, "/SELM/LoadHidSel/NoGoodGuest",   STAMUNIT_OCCURENCES, "SELMLoadHiddenSelectorReg: No good guest table entry.");
 
     /*
@@ -774,4 +810,11 @@
 
 
+/**
+ * Updates (syncs) the shadow GDT.
+ *
+ * @returns VBox status code.
+ * @param   pVM                 The VM handle.
+ * @param   pVCpu               The current virtual CPU.
+ */
 static int selmR3UpdateShadowGdt(PVM pVM, PVMCPU pVCpu)
 {
@@ -829,5 +872,5 @@
             {
                 if (pu8DstInvalid != pu8Dst)
-                    memset(pu8DstInvalid, 0, pu8Dst - pu8DstInvalid);
+                    RT_BZERO(pu8DstInvalid, pu8Dst - pu8DstInvalid);
                 GCPtrSrc += cb;
                 pu8Dst += cb;
@@ -854,5 +897,5 @@
             /* If any GDTEs was invalidated, zero them. */
             if (cbEffLimit < pVM->selm.s.cbEffGuestGdtLimit)
-                memset(pu8DstInvalid + cbEffLimit + 1, 0, pVM->selm.s.cbEffGuestGdtLimit - cbEffLimit);
+                RT_BZERO(pu8DstInvalid + cbEffLimit + 1, pVM->selm.s.cbEffGuestGdtLimit - cbEffLimit);
         }
 
@@ -916,38 +959,5 @@
     {
         if (pGDTE->Gen.u1Present)
-        {
-            /*
-             * Code and data selectors are generally 1:1, with the
-             * 'little' adjustment we do for DPL 0 selectors.
-             */
-            if (pGDTE->Gen.u1DescType)
-            {
-                /*
-                 * Hack for A-bit against Trap E on read-only GDT.
-                 */
-                /** @todo Fix this by loading ds and cs before turning off WP. */
-                pGDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
-
-                /*
-                 * All DPL 0 code and data segments are squeezed into DPL 1.
-                 *
-                 * We're skipping conforming segments here because those
-                 * cannot give us any trouble.
-                 */
-                if (    pGDTE->Gen.u2Dpl == 0
-                    &&      (pGDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
-                        !=  (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
-                    pGDTE->Gen.u2Dpl = 1;
-            }
-            else
-            {
-                /*
-                 * System type selectors are marked not present.
-                 * Recompiler or special handling is required for these.
-                 */
-                /** @todo what about interrupt gates and rawr0? */
-                pGDTE->Gen.u1Present = 0;
-            }
-        }
+            selmGuestToShadowDesc(pGDTE);
 
         /* Next GDT entry. */
@@ -990,5 +1000,5 @@
     {
         if (pVM->selm.s.GuestGdtr.cbGdt > GDTR.cbGdt)
-            memset(pGDTE, 0, pVM->selm.s.GuestGdtr.cbGdt - GDTR.cbGdt);
+            RT_BZERO(pGDTE, pVM->selm.s.GuestGdtr.cbGdt - GDTR.cbGdt);
     }
 
@@ -1068,4 +1078,5 @@
             pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX;
         }
+        pVM->selm.s.cbLdtLimit = 0;
         return VINF_SUCCESS;
     }
@@ -1075,8 +1086,6 @@
      */
     PX86DESC    pDesc    = &pVM->selm.s.paGdtR3[SelLdt >> X86_SEL_SHIFT];
-    RTGCPTR     GCPtrLdt = X86DESC_BASE(*pDesc);
-    unsigned    cbLdt    = X86DESC_LIMIT(*pDesc);
-    if (pDesc->Gen.u1Granularity)
-        cbLdt = (cbLdt << PAGE_SHIFT) | PAGE_OFFSET_MASK;
+    RTGCPTR     GCPtrLdt = X86DESC_BASE(pDesc);
+    uint32_t    cbLdt    = X86DESC_LIMIT_G(pDesc);
 
     /*
@@ -1228,39 +1237,5 @@
             {
                 if (pLDTE->Gen.u1Present)
-                {
-                    /*
-                     * Code and data selectors are generally 1:1, with the
-                     * 'little' adjustment we do for DPL 0 selectors.
-                     */
-                    if (pLDTE->Gen.u1DescType)
-                    {
-                        /*
-                         * Hack for A-bit against Trap E on read-only GDT.
-                         */
-                        /** @todo Fix this by loading ds and cs before turning off WP. */
-                        if (!(pLDTE->Gen.u4Type & X86_SEL_TYPE_ACCESSED))
-                            pLDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
-
-                        /*
-                         * All DPL 0 code and data segments are squeezed into DPL 1.
-                         *
-                         * We're skipping conforming segments here because those
-                         * cannot give us any trouble.
-                         */
-                        if (    pLDTE->Gen.u2Dpl == 0
-                            &&      (pLDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
-                                !=  (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
-                            pLDTE->Gen.u2Dpl = 1;
-                    }
-                    else
-                    {
-                        /*
-                         * System type selectors are marked not present.
-                         * Recompiler or special handling is required for these.
-                         */
-                        /** @todo what about interrupt gates and rawr0? */
-                        pLDTE->Gen.u1Present = 0;
-                    }
-                }
+                    selmGuestToShadowDesc(pLDTE);
 
                 /* Next LDT entry. */
@@ -1270,4 +1245,5 @@
         else
         {
+            RT_BZERO(pShadowLDT, cbChunk);
             AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc=%Rrc\n", rc));
             rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, 0);
@@ -1289,4 +1265,108 @@
 
 /**
+ * Checks and updates segment selector registers.
+ *
+ * @returns VBox strict status code.
+ * @retval  VINF_EM_RESCHEDULE_REM if a stale register was found.
+ *
+ * @param   pVM                 The VM handle.
+ * @param   pVCpu               The current virtual CPU.
+ */
+static VBOXSTRICTRC selmR3UpdateSegmentRegisters(PVM pVM, PVMCPU pVCpu)
+{
+    Assert(CPUMIsGuestInProtectedMode(pVCpu));
+
+    /*
+     * No stale selectors in V8086 mode.
+     */
+    PCPUMCTX        pCtx     = CPUMQueryGuestCtxPtr(pVCpu);
+    if (pCtx->eflags.Bits.u1VM)
+        return VINF_SUCCESS;
+
+    /*
+     * Check for stale selectors and load hidden register bits where they
+     * are missing.
+     */
+    uint32_t        uCpl     = CPUMGetGuestCPL(pVCpu);
+    VBOXSTRICTRC    rcStrict = VINF_SUCCESS;
+    PCPUMSELREG     paSReg   = CPUMCTX_FIRST_SREG(pCtx);
+    for (uint32_t iSReg = 0; iSReg < X86_SREG_COUNT; iSReg++)
+    {
+        RTSEL const Sel = paSReg[iSReg].Sel & (X86_SEL_MASK | X86_SEL_LDT);
+        if (Sel & (X86_SEL_MASK | X86_SEL_LDT))
+        {
+            /* Get the shadow descriptor entry corresponding to this. */
+            static X86DESC const s_NotPresentDesc = { { 0 } };
+            PCX86DESC pDesc;
+            if (!(Sel & X86_SEL_LDT))
+            {
+                if ((Sel | (sizeof(*pDesc) - 1)) <= pCtx->gdtr.cbGdt)
+                    pDesc = &pVM->selm.s.paGdtR3[Sel >> X86_SEL_SHIFT];
+                else
+                    pDesc = &s_NotPresentDesc;
+            }
+            else
+            {
+                if ((Sel | (sizeof(*pDesc) - 1)) <= pVM->selm.s.cbLdtLimit)
+                    pDesc = &((PCX86DESC)((uintptr_t)pVM->selm.s.pvLdtR3 + pVM->selm.s.offLdtHyper))[Sel >> X86_SEL_SHIFT];
+                else
+                    pDesc = &s_NotPresentDesc;
+            }
+
+            /* Check the segment register. */
+            if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
+            {
+                if (!(paSReg[iSReg].fFlags & CPUMSELREG_FLAGS_STALE))
+                {
+                    /* Did it go stale? */
+                    if (selmIsSRegStale32(&paSReg[iSReg], pDesc, iSReg))
+                    {
+                        Log2(("SELM: Detected stale %s=%#x (was valid)\n", g_aszSRegNms[iSReg], Sel));
+                        STAM_REL_COUNTER_INC(&pVM->selm.s.aStatDetectedStaleSReg[iSReg]);
+                        paSReg[iSReg].fFlags |= CPUMSELREG_FLAGS_STALE;
+                        rcStrict = VINF_EM_RESCHEDULE_REM;
+                    }
+                }
+                else
+                {
+                    /* Did it stop being stale? I.e. did the guest change it things
+                       back to the way they were? */
+                    if (!selmIsSRegStale32(&paSReg[iSReg], pDesc, iSReg))
+                    {
+                        STAM_REL_COUNTER_INC(&pVM->selm.s.StatStaleToUnstaleSReg);
+                        paSReg[iSReg].fFlags &= CPUMSELREG_FLAGS_STALE;
+                    }
+                    else
+                    {
+                        Log2(("SELM: Already stale %s=%#x\n", g_aszSRegNms[iSReg], Sel));
+                        STAM_REL_COUNTER_INC(&pVM->selm.s.aStatAlreadyStaleSReg[iSReg]);
+                        rcStrict = VINF_EM_RESCHEDULE_REM;
+                    }
+                }
+            }
+            /* Load the hidden registers if it's a valid descriptor for the
+               current segment register. */
+            else if (selmIsShwDescGoodForSReg(&paSReg[iSReg], pDesc, iSReg, uCpl))
+            {
+                selmLoadHiddenSRegFromShadowDesc(&paSReg[iSReg], pDesc);
+                STAM_COUNTER_INC(&pVM->selm.s.aStatUpdatedSReg[iSReg]);
+            }
+            /* It's stale. */
+            else
+            {
+                Log2(("SELM: Detected stale %s=%#x (wasn't valid)\n", g_aszSRegNms[iSReg], Sel));
+                STAM_REL_COUNTER_INC(&pVM->selm.s.aStatDetectedStaleSReg[iSReg]);
+                paSReg[iSReg].fFlags = CPUMSELREG_FLAGS_STALE;
+                rcStrict = VINF_EM_RESCHEDULE_REM;
+            }
+        }
+        /* else: 0 selector, ignore. */
+    }
+
+    return rcStrict;
+}
+
+
+/**
  * Updates the Guest GDT & LDT virtualization based on current CPU state.
  *
@@ -1295,5 +1375,5 @@
  * @param   pVCpu       Pointer to the VMCPU.
  */
-VMMR3DECL(int) SELMR3UpdateFromCPUM(PVM pVM, PVMCPU pVCpu)
+VMMR3DECL(VBOXSTRICTRC) SELMR3UpdateFromCPUM(PVM pVM, PVMCPU pVCpu)
 {
     if (pVM->selm.s.fDisableMonitoring)
@@ -1341,15 +1421,11 @@
     }
 
-#if 0
-    /*
-     * Check for stale selectors and load hidden register bits where they
-     * are missing.
-     */
-    PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
-#endif
-    rc = VINF_SUCCESS;
+    /*
+     * Check selector registers.
+     */
+    VBOXSTRICTRC rcStrict = selmR3UpdateSegmentRegisters(pVM, pVCpu);
 
     STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
-    return rc;
+    return rcStrict;
 }
 
@@ -1731,8 +1807,6 @@
         return rc;
     }
-    RTGCPTR     GCPtrLDTEGuest = X86DESC_BASE(LDTDesc);
-    unsigned    cbLdt = X86DESC_LIMIT(LDTDesc);
-    if (LDTDesc.Gen.u1Granularity)
-        cbLdt = (cbLdt << PAGE_SHIFT) | PAGE_OFFSET_MASK;
+    RTGCPTR     GCPtrLDTEGuest = X86DESC_BASE(&LDTDesc);
+    uint32_t    cbLdt = X86DESC_LIMIT_G(&LDTDesc);
 
     /*
@@ -1978,6 +2052,6 @@
     if (ppvLdt)
     {
-        *ppvLdt = (RTGCPTR)X86DESC_BASE(Desc);
-        *pcbLimit = X86DESC_LIMIT(Desc);
+        *ppvLdt = (RTGCPTR)X86DESC_BASE(&Desc);
+        *pcbLimit = X86DESC_LIMIT_G(&Desc);
     }
     return VINF_SUCCESS;
@@ -2033,12 +2107,10 @@
             return VERR_INVALID_SELECTOR;
 
-        uint32_t cbLimit = X86DESC_LIMIT(Desc);
-        if (Desc.Gen.u1Granularity)
-            cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
+        uint32_t cbLimit = X86DESC_LIMIT_G(&Desc);
         if ((uint32_t)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > cbLimit)
             return VERR_INVALID_SELECTOR;
 
         /* calc the descriptor location. */
-        GCPtrDesc = X86DESC64_BASE(Desc);
+        GCPtrDesc = X86DESC64_BASE(&Desc);
         GCPtrDesc += (Sel & X86_SEL_MASK);
     }
@@ -2080,8 +2152,6 @@
         else
         {
-            pSelInfo->cbLimit = X86DESC_LIMIT(Desc);
-            if (Desc.Gen.u1Granularity)
-                pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
-            pSelInfo->GCPtrBase = X86DESC_BASE(Desc);
+            pSelInfo->cbLimit   = X86DESC_LIMIT_G(&Desc);
+            pSelInfo->GCPtrBase = X86DESC_BASE(&Desc);
         }
         pSelInfo->SelGate = 0;
@@ -2093,9 +2163,7 @@
         /* Note. LDT descriptors are weird in long mode, we ignore the footnote
            in the AMD manual here as a simplification. */
-        pSelInfo->GCPtrBase = X86DESC64_BASE(Desc);
-        pSelInfo->cbLimit = X86DESC_LIMIT(Desc);
-        if (Desc.Gen.u1Granularity)
-            pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
-        pSelInfo->SelGate = 0;
+        pSelInfo->GCPtrBase = X86DESC64_BASE(&Desc);
+        pSelInfo->cbLimit   = X86DESC_LIMIT_G(&Desc);
+        pSelInfo->SelGate   = 0;
     }
     else if (   Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE
@@ -2103,5 +2171,5 @@
              || Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_INT_GATE)
     {
-        pSelInfo->cbLimit   = X86DESC64_BASE(Desc);
+        pSelInfo->cbLimit   = X86DESC64_BASE(&Desc);
         pSelInfo->GCPtrBase = Desc.Gate.u16OffsetLow
                             | ((uint32_t)Desc.Gate.u16OffsetHigh << 16)
@@ -2140,9 +2208,7 @@
         ||  !(pDesc->Gen.u4Type & 4))
     {
-        pSelInfo->cbLimit = X86DESC_LIMIT(*pDesc);
-        if (pDesc->Gen.u1Granularity)
-            pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
-        pSelInfo->GCPtrBase = X86DESC_BASE(*pDesc);
-        pSelInfo->SelGate = 0;
+        pSelInfo->cbLimit   = X86DESC_LIMIT_G(pDesc);
+        pSelInfo->GCPtrBase = X86DESC_BASE(pDesc);
+        pSelInfo->SelGate   = 0;
     }
     else if (pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_UNDEFINED4)
@@ -2245,12 +2311,10 @@
                 return VERR_INVALID_SELECTOR;
 
-            unsigned cbLimit = X86DESC_LIMIT(Desc);
-            if (Desc.Gen.u1Granularity)
-                cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
-            if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > cbLimit)
+            uint32_t cbLimit = X86DESC_LIMIT_G(&Desc);
+            if ((uint32_t)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > cbLimit)
                 return VERR_INVALID_SELECTOR;
 
             /* calc the descriptor location. */
-            GCPtrDesc = X86DESC_BASE(Desc);
+            GCPtrDesc = X86DESC_BASE(&Desc);
             GCPtrDesc += (Sel & X86_SEL_MASK);
         }
@@ -2466,8 +2530,6 @@
      * Limit and Base and format the output.
      */
-    uint32_t    u32Limit = X86DESC_LIMIT(Desc);
-    if (Desc.Gen.u1Granularity)
-        u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
-    uint32_t    u32Base =  X86DESC_BASE(Desc);
+    uint32_t    u32Limit = X86DESC_LIMIT_G(&Desc);
+    uint32_t    u32Base  = X86DESC_BASE(&Desc);
 
     RTStrPrintf(pszOutput, cchOutput, "%04x - %08x %08x - base=%08x limit=%08x dpl=%d %s",
Index: /trunk/src/VBox/VMM/VMMRC/SELMRC.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMRC/SELMRC.cpp	(revision 42406)
+++ /trunk/src/VBox/VMM/VMMRC/SELMRC.cpp	(revision 42407)
@@ -36,16 +36,31 @@
 
 
+/*******************************************************************************
+*   Global Variables                                                           *
+*******************************************************************************/
+#ifdef LOG_ENABLED
+/** Segment register names. */
+static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" };
+#endif
+
+
 /**
  * Synchronizes one GDT entry (guest -> shadow).
  *
- * @returns VBox status code (appropriate for trap handling and GC return).
- * @param   pVM         Pointer to the VM.
+ * @returns VBox strict status code (appropriate for trap handling and GC
+ *          return).
+ * @retval  VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
+ * @retval  VINF_SELM_SYNC_GDT
+ * @retval  VINF_EM_RESCHEDULE_REM
+ *
+ * @param   pVM         Pointer to the VM.
+ * @param   pVCpu       The current virtual CPU.
  * @param   pRegFrame   Trap register frame.
  * @param   iGDTEntry   The GDT entry to sync.
- */
-static int selmGCSyncGDTEntry(PVM pVM, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry)
-{
-    PVMCPU pVCpu = VMMGetCpu0(pVM);
-
+ *
+ * @remarks Caller checks that this isn't the LDT entry!
+ */
+static VBOXSTRICTRC selmRCSyncGDTEntry(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry)
+{
     Log2(("GDT %04X LDTR=%04X\n", iGDTEntry, CPUMGetGuestLDTR(pVCpu)));
 
@@ -57,6 +72,6 @@
     unsigned offEntry = iGDTEntry * sizeof(X86DESC);
     if (    iGDTEntry >= SELM_GDT_ELEMENTS
-        ||  offEntry > GdtrGuest.cbGdt)
-        return VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
+        ||  offEntry  >  GdtrGuest.cbGdt)
+        return VINF_SUCCESS; /* ignore */
 
     /*
@@ -66,5 +81,13 @@
     int rc = MMGCRamRead(pVM, &Desc, (uint8_t *)(uintptr_t)GdtrGuest.pGdt + offEntry, sizeof(X86DESC));
     if (RT_FAILURE(rc))
-        return VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
+    {
+        rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, (uintptr_t)GdtrGuest.pGdt + offEntry, sizeof(X86DESC));
+        if (RT_FAILURE(rc))
+        {
+            VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
+            VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */
+            return VINF_EM_RESCHEDULE_REM;
+        }
+    }
 
     /*
@@ -85,8 +108,10 @@
         if (Desc.Gen.u1Present)
         {
-            Log(("selmGCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: detected conflict!!\n", Sel, &Desc));
-            return VINF_SELM_SYNC_GDT;
-        }
-        Log(("selmGCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: potential conflict (still not present)!\n", Sel, &Desc));
+            Log(("selmRCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: detected conflict!!\n", Sel, &Desc));
+            VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
+            VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
+            return VINF_SELM_SYNC_GDT;  /** @todo this status code is ignored, unfortunately. */
+        }
+        Log(("selmRCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: potential conflict (still not present)!\n", Sel, &Desc));
 
         /* Note: we can't continue below or else we'll change the shadow descriptor!! */
@@ -96,82 +121,101 @@
 
     /*
-     * Code and data selectors are generally 1:1, with the
-     * 'little' adjustment we do for DPL 0 selectors.
-     */
-    PX86DESC   pShadowDescr = &pVM->selm.s.paGdtRC[iGDTEntry];
-    if (Desc.Gen.u1DescType)
-    {
-        /*
-         * Hack for A-bit against Trap E on read-only GDT.
-         */
-        /** @todo Fix this by loading ds and cs before turning off WP. */
-        Desc.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
-
-        /*
-         * All DPL 0 code and data segments are squeezed into DPL 1.
-         *
-         * We're skipping conforming segments here because those
-         * cannot give us any trouble.
-         */
-        if (    Desc.Gen.u2Dpl == 0
-            &&      (Desc.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
-                !=  (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
-            Desc.Gen.u2Dpl = 1;
-    }
-    else
-    {
-        /*
-         * System type selectors are marked not present.
-         * Recompiler or special handling is required for these.
-         */
-        /** @todo what about interrupt gates and rawr0? */
-        Desc.Gen.u1Present = 0;
-    }
-    //Log(("O: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(*pShadowDescr)), X86DESC_LIMIT(*pShadowDescr), (pShadowDescr->au32[1] >> 8) & 0xFFFF ));
+     * Convert the guest selector to a shadow selector and update the shadow GDT.
+     */
+    selmGuestToShadowDesc(&Desc);
+    PX86DESC pShwDescr = &pVM->selm.s.paGdtRC[iGDTEntry];
+    //Log(("O: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(*pShwDescr)), X86DESC_LIMIT(*pShwDescr), (pShwDescr->au32[1] >> 8) & 0xFFFF ));
     //Log(("N: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(Desc)), X86DESC_LIMIT(Desc), (Desc.au32[1] >> 8) & 0xFFFF ));
-    *pShadowDescr = Desc;
+    *pShwDescr = Desc;
 
     /*
      * Detect and mark stale registers.
      */
-    PCPUMCTX    pCtx      = CPUMQueryGuestCtxPtr(pVCpu);
-    PCPUMSELREG paSRegCtx = &pCtx->es;
-    PCPUMSELREG paSRegFrm = &pRegFrame->es;
-    for (unsigned i = 0; i <= X86_SREG_GS; i++)
-        if (Sel == (paSRegFrm[i].Sel & X86_SEL_MASK))
-        {
-            /** @todo we clear the valid flag here, maybe we shouldn't... but that would
-             *        require implementing handling of stale registers in raw-mode.
-             *        Tricky, at least for SS and CS. */
-            paSRegFrm[i].fFlags = CPUMSELREG_FLAGS_STALE;
-            paSRegCtx[i].fFlags = CPUMSELREG_FLAGS_STALE;
-        }
-
-    /*
-     * Check if we change the LDT selector.
-     */
-    if (Sel == CPUMGetGuestLDTR(pVCpu)) /** @todo this isn't correct in two(+) ways! 1. It shouldn't be done until the LDTR is reloaded. 2. It caused the next instruction to be emulated.  */
-    {
-        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
-        return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT;
-    }
-
-#ifdef LOG_ENABLED
-    if (Sel == (pRegFrame->cs.Sel & X86_SEL_MASK))
-        Log(("GDT write to selector in CS register %04X\n", pRegFrame->cs.Sel));
-    else if (Sel == (pRegFrame->ds.Sel & X86_SEL_MASK))
-        Log(("GDT write to selector in DS register %04X\n", pRegFrame->ds.Sel));
-    else if (Sel == (pRegFrame->es.Sel & X86_SEL_MASK))
-        Log(("GDT write to selector in ES register %04X\n", pRegFrame->es.Sel));
-    else if (Sel == (pRegFrame->fs.Sel & X86_SEL_MASK))
-        Log(("GDT write to selector in FS register %04X\n", pRegFrame->fs.Sel));
-    else if (Sel == (pRegFrame->gs.Sel & X86_SEL_MASK))
-        Log(("GDT write to selector in GS register %04X\n", pRegFrame->gs.Sel));
-    else if (Sel == (pRegFrame->ss.Sel & X86_SEL_MASK))
-        Log(("GDT write to selector in SS register %04X\n", pRegFrame->ss.Sel));
-#endif
-
-    return VINF_SUCCESS;
-}
+    VBOXSTRICTRC rcStrict = VINF_SUCCESS;
+    PCPUMCTX     pCtx     = CPUMQueryGuestCtxPtr(pVCpu); Assert(CPUMCTX2CORE(pCtx) == pRegFrame);
+    PCPUMSELREG  paSReg   = CPUMCTX_FIRST_SREG(pCtx);
+    for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
+    {
+        if (Sel == (paSReg[iSReg].Sel & X86_SEL_MASK_RPL))
+        {
+            if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
+            {
+                if (selmIsSRegStale32(&paSReg[iSReg], &Desc, iSReg))
+                {
+                    Log(("GDT write to selector in %s register %04X (now stale)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
+                    paSReg[iSReg].fFlags |= CPUMSELREG_FLAGS_STALE;
+                    VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */
+                    rcStrict = VINF_EM_RESCHEDULE_REM;
+                }
+                else if (paSReg[iSReg].fFlags & CPUMSELREG_FLAGS_STALE)
+                {
+                    Log(("GDT write to selector in %s register %04X (no longer stale)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
+                    paSReg[iSReg].fFlags &= ~CPUMSELREG_FLAGS_STALE;
+                }
+                else
+                    Log(("GDT write to selector in %s register %04X (no important change)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
+            }
+            else
+                Log(("GDT write to selector in %s register %04X (out of sync)\n", paSReg[iSReg].Sel));
+        }
+    }
+
+    /** @todo Detect stale LDTR as well? */
+
+    return rcStrict;
+}
+
+
+/**
+ * Synchronizes any segment registers refering to the given GDT entry.
+ *
+ * This is called before any changes performed and shadowed, so it's possible to
+ * look in both the shadow and guest descriptor table entries for hidden
+ * register content.
+ *
+ * @param   pVM         Pointer to the VM.
+ * @param   pVCpu       The current virtual CPU.
+ * @param   pRegFrame   Trap register frame.
+ * @param   iGDTEntry   The GDT entry to sync.
+ */
+static void selmRCSyncGDTSegRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry)
+{
+    /*
+     * Validate the offset.
+     */
+    VBOXGDTR GdtrGuest;
+    CPUMGetGuestGDTR(pVCpu, &GdtrGuest);
+    unsigned offEntry = iGDTEntry * sizeof(X86DESC);
+    if (    iGDTEntry >= SELM_GDT_ELEMENTS
+        ||  offEntry  >  GdtrGuest.cbGdt)
+        return;
+
+    /*
+     * Sync outdated segment registers using this entry.
+     */
+    PCX86DESC       pDesc    = &pVM->selm.s.CTX_SUFF(paGdt)[iGDTEntry];
+    uint32_t        uCpl     = CPUMGetGuestCPL(pVCpu);
+    PCPUMCTX        pCtx     = CPUMQueryGuestCtxPtr(pVCpu); Assert(CPUMCTX2CORE(pCtx) == pRegFrame);
+    PCPUMSELREG     paSReg   = CPUMCTX_FIRST_SREG(pCtx);
+    for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
+    {
+        if (iGDTEntry == (paSReg[iSReg].Sel & X86_SEL_MASK_RPL))
+        {
+            if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
+            {
+                if (selmIsShwDescGoodForSReg(&paSReg[iSReg], pDesc, iSReg, uCpl))
+                {
+                    selmLoadHiddenSRegFromShadowDesc(&paSReg[iSReg], pDesc);
+                    Log(("selmRCSyncGDTSegRegs: Updated %s\n", g_aszSRegNms[iSReg]));
+                }
+                else
+                    Log(("selmRCSyncGDTSegRegs: Bad shadow descriptor %#x (for %s): %.8Rhxs \n",
+                         iGDTEntry, g_aszSRegNms[iSReg], pDesc));
+            }
+        }
+    }
+
+}
+
 
 
@@ -195,34 +239,49 @@
 
     /*
-     * First check if this is the LDT entry.
-     * LDT updates are problems since an invalid LDT entry will cause trouble during worldswitch.
-     */
-    int rc;
-    if (CPUMGetGuestLDTR(pVCpu) / sizeof(X86DESC) == offRange / sizeof(X86DESC))
-    {
-        Log(("LDTR selector change -> fall back to HC!!\n"));
-        rc = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
-        /** @todo We're not handling changed to the selectors in LDTR and TR correctly at all.
-         * We should ignore any changes to those and sync them only when they are loaded by the guest! */
-    }
-    else
-    {
-        /*
-         * Attempt to emulate the instruction and sync the affected entries.
-         */
-        /** @todo should check if any affected selectors are loaded. */
-        uint32_t cb;
-        rc = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
-        if (RT_SUCCESS(rc) && cb)
-        {
-            unsigned iGDTE1 = offRange / sizeof(X86DESC);
-            int rc2 = selmGCSyncGDTEntry(pVM, pRegFrame, iGDTE1);
-            if (rc2 == VINF_SUCCESS)
-            {
-                Assert(cb);
-                unsigned iGDTE2 = (offRange + cb - 1) / sizeof(X86DESC);
+     * Check if any selectors might be affected.
+     */
+    unsigned const iGDTE1 = offRange >> X86_SEL_SHIFT;
+    selmRCSyncGDTSegRegs(pVM, pVCpu, pRegFrame, iGDTE1);
+    if (((offRange + 8) >> X86_SEL_SHIFT) != iGDTE1)
+        selmRCSyncGDTSegRegs(pVM, pVCpu, pRegFrame, iGDTE1 + 1);
+
+    /*
+     * Attempt to emulate the instruction and sync the affected entries.
+     */
+    uint32_t cb;
+    int rc = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
+    if (RT_SUCCESS(rc) && cb)
+    {
+        /* Check if the LDT was in any way affected.  Do not sync the
+           shadow GDT if that's the case or we might have trouble in
+           the world switcher (or so they say). */
+        unsigned const iLdt   = CPUMGetGuestLDTR(pVCpu) >> X86_SEL_SHIFT;
+        unsigned const iGDTE2 = (offRange + cb - 1) >> X86_SEL_SHIFT;
+        if (   iGDTE1 == iLdt
+            || iGDTE2 == iLdt)
+        {
+            Log(("LDTR selector change -> fall back to HC!!\n"));
+            VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
+            rc = VINF_SELM_SYNC_GDT;
+            /** @todo Implement correct stale LDT handling.  */
+        }
+        else
+        {
+            /* Sync the shadow GDT and continue provided the update didn't
+               cause any segment registers to go stale in any way. */
+            int rc2 = selmRCSyncGDTEntry(pVM, pVCpu, pRegFrame, iGDTE1);
+            if (rc2 == VINF_SUCCESS || rc2 == VINF_EM_RESCHEDULE_REM)
+            {
+                if (rc == VINF_SUCCESS)
+                    rc = rc2;
+
                 if (iGDTE1 != iGDTE2)
-                    rc2 = selmGCSyncGDTEntry(pVM, pRegFrame, iGDTE2);
-                if (rc2 == VINF_SUCCESS)
+                {
+                    rc2 = selmRCSyncGDTEntry(pVM, pVCpu, pRegFrame, iGDTE2);
+                    if (rc == VINF_SUCCESS)
+                        rc = rc2;
+                }
+
+                if (rc2 == VINF_SUCCESS || rc2 == VINF_EM_RESCHEDULE_REM)
                 {
                     STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
@@ -230,20 +289,18 @@
                 }
             }
+
+            /* sync failed, return to ring-3 and resync the GDT. */
             if (rc == VINF_SUCCESS || RT_FAILURE(rc2))
                 rc = rc2;
         }
-        else
-        {
-            Assert(RT_FAILURE(rc));
-            if (rc == VERR_EM_INTERPRETER)
-                rc = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
-        }
-    }
-    if (    rc != VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
-        &&  rc != VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT)
-    {
-        /* Not necessary when we need to go back to the host context to sync the LDT or TSS. */
+    }
+    else
+    {
+        Assert(RT_FAILURE(rc));
+        if (rc == VERR_EM_INTERPRETER)
+            rc = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
         VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
     }
+
     STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled);
     return rc;
Index: /trunk/src/VBox/VMM/include/EMHandleRCTmpl.h
===================================================================
--- /trunk/src/VBox/VMM/include/EMHandleRCTmpl.h	(revision 42406)
+++ /trunk/src/VBox/VMM/include/EMHandleRCTmpl.h	(revision 42407)
@@ -284,4 +284,12 @@
             rc = VINF_EM_RESCHEDULE_REM;
             break;
+
+        /*
+         * Conflict in GDT, resync and continue.
+         */
+        case VINF_SELM_SYNC_GDT:
+            AssertMsg(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT), ("VINF_SELM_SYNC_GDT without VMCPU_FF_SELM_SYNC_GDT!\n"));
+            rc = VINF_SUCCESS;
+            break;
 #endif
 
Index: /trunk/src/VBox/VMM/include/SELMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/SELMInternal.h	(revision 42406)
+++ /trunk/src/VBox/VMM/include/SELMInternal.h	(revision 42407)
@@ -23,4 +23,6 @@
 #include <VBox/vmm/stam.h>
 #include <VBox/vmm/cpum.h>
+#include <VBox/log.h>
+#include <iprt/x86.h>
 
 
@@ -173,4 +175,19 @@
     /** The number of times we had find free hypervisor selectors. */
     STAMCOUNTER             StatScanForHyperSels;
+    /** Counts the times we detected state selectors in SELMR3UpdateFromCPUM. */
+    STAMCOUNTER             aStatDetectedStaleSReg[X86_SREG_COUNT];
+    /** Counts the times we were called with already state selectors in
+     * SELMR3UpdateFromCPUM. */
+    STAMCOUNTER             aStatAlreadyStaleSReg[X86_SREG_COUNT];
+    /** Counts the times we found a stale selector becomming valid again. */
+    STAMCOUNTER             StatStaleToUnstaleSReg;
+#ifdef VBOX_WITH_STATISTICS
+    /** Times we updated hidden selector registers in CPUMR3UpdateFromCPUM. */
+    STAMCOUNTER             aStatUpdatedSReg[X86_SREG_COUNT];
+    STAMCOUNTER             StatLoadHidSelGst;
+    STAMCOUNTER             StatLoadHidSelShw;
+#endif
+    STAMCOUNTER             StatLoadHidSelReadErrors;
+    STAMCOUNTER             StatLoadHidSelGstNoGood;
 } SELM, *PSELM;
 
@@ -189,4 +206,285 @@
 RT_C_DECLS_END
 
+
+#ifdef VBOX_WITH_RAW_MODE_NOT_R0
+
+/**
+ * Checks if a shadow descriptor table entry is good for the given segment
+ * register.
+ *
+ * @returns @c true if good, @c false if not.
+ * @param   pSReg               The segment register.
+ * @param   pShwDesc            The shadow descriptor table entry.
+ * @param   iSReg               The segment register index (X86_SREG_XXX).
+ * @param   uCpl                The CPL.
+ */
+DECLINLINE(bool) selmIsShwDescGoodForSReg(PCCPUMSELREG pSReg, PCX86DESC pShwDesc, uint32_t iSReg, uint32_t uCpl)
+{
+    /*
+     * See iemMiscValidateNewSS, iemCImpl_LoadSReg and intel+amd manuals.
+     */
+
+    if (!pShwDesc->Gen.u1Present)
+    {
+        Log(("selmIsShwDescGoodForSReg: Not present\n"));
+        return false;
+    }
+
+    if (!pShwDesc->Gen.u1DescType)
+    {
+        Log(("selmIsShwDescGoodForSReg: System descriptor\n"));
+        return false;
+    }
+
+    if (iSReg == X86_SREG_SS)
+    {
+        if ((pShwDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
+        {
+            Log(("selmIsShwDescGoodForSReg: Stack must be writable\n"));
+            return false;
+        }
+        if (uCpl > (unsigned)pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available)
+        {
+            Log(("selmIsShwDescGoodForSReg: CPL(%d) > DPL(%d)\n", uCpl, pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available));
+            return false;
+        }
+    }
+    else
+    {
+        if (iSReg == X86_SREG_CS)
+        {
+            if (!(pShwDesc->Gen.u4Type & X86_SEL_TYPE_CODE))
+            {
+                Log(("selmIsShwDescGoodForSReg: CS needs code segment\n"));
+                return false;
+            }
+        }
+        else if ((pShwDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
+        {
+            Log(("selmIsShwDescGoodForSReg: iSReg=%u execute only\n", iSReg));
+            return false;
+        }
+
+        if (       (pShwDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
+                != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
+            &&  (   (   (pSReg->Sel & X86_SEL_RPL) > (unsigned)pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available
+                     && (pSReg->Sel & X86_SEL_RPL) != pShwDesc->Gen.u1Available )
+                 || uCpl > (unsigned)pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available ) )
+        {
+            Log(("selmIsShwDescGoodForSReg: iSReg=%u DPL=%u CPL=%u RPL=%u\n", iSReg,
+                 pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available, uCpl, pSReg->Sel & X86_SEL_RPL));
+            return false;
+        }
+    }
+
+    return true;
+}
+
+
+/**
+ * Checks if a guest descriptor table entry is good for the given segment
+ * register.
+ *
+ * @returns @c true if good, @c false if not.
+ * @param   pVCpu               The current virtual CPU.
+ * @param   pSReg               The segment register.
+ * @param   pGstDesc            The guest descriptor table entry.
+ * @param   iSReg               The segment register index (X86_SREG_XXX).
+ * @param   uCpl                The CPL.
+ */
+DECLINLINE(bool) selmIsGstDescGoodForSReg(PVMCPU pVCpu, PCCPUMSELREG pSReg, PCX86DESC pGstDesc, uint32_t iSReg, uint32_t uCpl)
+{
+    /*
+     * See iemMiscValidateNewSS, iemCImpl_LoadSReg and intel+amd manuals.
+     */
+
+    if (!pGstDesc->Gen.u1Present)
+    {
+        Log(("selmIsGstDescGoodForSReg: Not present\n"));
+        return false;
+    }
+
+    if (!pGstDesc->Gen.u1DescType)
+    {
+        Log(("selmIsGstDescGoodForSReg: System descriptor\n"));
+        return false;
+    }
+
+    if (iSReg == X86_SREG_SS)
+    {
+        if ((pGstDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
+        {
+            Log(("selmIsGstDescGoodForSReg: Stack must be writable\n"));
+            return false;
+        }
+        if (uCpl > pGstDesc->Gen.u2Dpl)
+        {
+            Log(("selmIsGstDescGoodForSReg: CPL(%d) > DPL(%d)\n", uCpl, pGstDesc->Gen.u2Dpl));
+            return false;
+        }
+    }
+    else
+    {
+        if (iSReg == X86_SREG_CS)
+        {
+            if (!(pGstDesc->Gen.u4Type & X86_SEL_TYPE_CODE))
+            {
+                Log(("selmIsGstDescGoodForSReg: CS needs code segment\n"));
+                return false;
+            }
+        }
+        else if ((pGstDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
+        {
+            Log(("selmIsGstDescGoodForSReg: iSReg=%u execute only\n", iSReg));
+            return false;
+        }
+
+        if (       (pGstDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
+                != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
+            &&  (   (   (pSReg->Sel & X86_SEL_RPL) > pGstDesc->Gen.u2Dpl
+                     && (   (pSReg->Sel & X86_SEL_RPL) != 1
+                         || !CPUMIsGuestInRawMode(pVCpu) ) )
+                 || uCpl > (unsigned)pGstDesc->Gen.u2Dpl
+                )
+           )
+        {
+            Log(("selmIsGstDescGoodForSReg: iSReg=%u DPL=%u CPL=%u RPL=%u InRawMode=%u\n", iSReg,
+                 pGstDesc->Gen.u2Dpl, uCpl, pSReg->Sel & X86_SEL_RPL, CPUMIsGuestInRawMode(pVCpu)));
+            return false;
+        }
+    }
+
+    return true;
+}
+
+
+/**
+ * Converts a guest GDT or LDT entry to a shadow table entry.
+ *
+ * @param   pDesc       Guest entry on input, shadow entry on return.
+ */
+DECL_FORCE_INLINE(void) selmGuestToShadowDesc(PX86DESC pDesc)
+{
+    /*
+     * Code and data selectors are generally 1:1, with the
+     * 'little' adjustment we do for DPL 0 selectors.
+     */
+    if (pDesc->Gen.u1DescType)
+    {
+        /*
+         * Hack for A-bit against Trap E on read-only GDT.
+         */
+        /** @todo Fix this by loading ds and cs before turning off WP. */
+        pDesc->Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
+
+        /*
+         * All DPL 0 code and data segments are squeezed into DPL 1.
+         *
+         * We're skipping conforming segments here because those
+         * cannot give us any trouble.
+         */
+        if (    pDesc->Gen.u2Dpl == 0
+            &&      (pDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
+                !=  (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
+        {
+            pDesc->Gen.u2Dpl       = 1;
+            pDesc->Gen.u1Available = 1;
+        }
+        else
+            pDesc->Gen.u1Available = 0;
+    }
+    else
+    {
+        /*
+         * System type selectors are marked not present.
+         * Recompiler or special handling is required for these.
+         */
+        /** @todo what about interrupt gates and rawr0? */
+        pDesc->Gen.u1Present = 0;
+    }
+}
+
+
+/**
+ * Checks if a segment register is stale given the shadow descriptor table
+ * entry.
+ *
+ * @returns @c true if stale, @c false if not.
+ * @param   pSReg               The segment register.
+ * @param   pShwDesc            The shadow descriptor entry.
+ * @param   iSReg               The segment register number (X86_SREG_XXX).
+ */
+DECLINLINE(bool) selmIsSRegStale32(PCCPUMSELREG pSReg, PCX86DESC pShwDesc, uint32_t iSReg)
+{
+    if (   pSReg->Attr.n.u1Present     != pShwDesc->Gen.u1Present
+        || pSReg->Attr.n.u4Type        != pShwDesc->Gen.u4Type
+        || pSReg->Attr.n.u1DescType    != pShwDesc->Gen.u1DescType
+        || pSReg->Attr.n.u1DefBig      != pShwDesc->Gen.u1DefBig
+        || pSReg->Attr.n.u1Granularity != pShwDesc->Gen.u1Granularity
+        || pSReg->Attr.n.u2Dpl         != pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available)
+    {
+        Log(("selmIsSRegStale32: Attributes changed (%#x -> %#x)\n", pSReg->Attr.u, X86DESC_GET_HID_ATTR(pShwDesc)));
+        return true;
+    }
+
+    if (pSReg->u64Base != X86DESC_BASE(pShwDesc))
+    {
+        Log(("selmIsSRegStale32: base changed (%#llx -> %#llx)\n", pSReg->u64Base, X86DESC_BASE(pShwDesc)));
+        return true;
+    }
+
+    if (pSReg->u32Limit != X86DESC_LIMIT_G(pShwDesc))
+    {
+        Log(("selmIsSRegStale32: limit changed (%#x -> %#x)\n", pSReg->u32Limit, X86DESC_LIMIT_G(pShwDesc)));
+        return true;
+    }
+
+    return false;
+}
+
+
+/**
+ * Loads the hidden bits of a selector register from a shadow descriptor table
+ * entry.
+ *
+ * @param   pSReg               The segment register in question.
+ * @param   pShwDesc            The shadow descriptor table entry.
+ */
+DECLINLINE(void) selmLoadHiddenSRegFromShadowDesc(PCPUMSELREG pSReg, PCX86DESC pShwDesc)
+{
+    pSReg->Attr.u         = X86DESC_GET_HID_ATTR(pShwDesc);
+    pSReg->Attr.n.u2Dpl  -= pSReg->Attr.n.u1Available;
+    Assert(pSReg->Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
+    pSReg->u32Limit       = X86DESC_LIMIT_G(pShwDesc);
+    pSReg->u64Base        = X86DESC_BASE(pShwDesc);
+    pSReg->ValidSel       = pSReg->Sel;
+    if (pSReg->Attr.n.u1Available)
+        pSReg->ValidSel  &= ~(RTSEL)1;
+    pSReg->fFlags         = CPUMSELREG_FLAGS_VALID;
+}
+
+
+/**
+ * Loads the hidden bits of a selector register from a guest descriptor table
+ * entry.
+ *
+ * @param   pVCpu               The current virtual CPU.
+ * @param   pSReg               The segment register in question.
+ * @param   pGstDesc            The guest descriptor table entry.
+ */
+DECLINLINE(void) selmLoadHiddenSRegFromGuestDesc(PVMCPU pVCpu, PCPUMSELREG pSReg, PCX86DESC pGstDesc)
+{
+    pSReg->Attr.u         = X86DESC_GET_HID_ATTR(pGstDesc);
+    pSReg->Attr.n.u4Type |= X86_SEL_TYPE_ACCESSED;
+    pSReg->u32Limit       = X86DESC_LIMIT_G(pGstDesc);
+    pSReg->u64Base        = X86DESC_BASE(pGstDesc);
+    pSReg->ValidSel       = pSReg->Sel;
+    if ((pSReg->ValidSel & 1) && CPUMIsGuestInRawMode(pVCpu))
+        pSReg->ValidSel  &= ~(RTSEL)1;
+    pSReg->fFlags         = CPUMSELREG_FLAGS_VALID;
+}
+
+#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
+
 /** @} */
 
Index: /trunk/src/VBox/VMM/testcase/tstVMStruct.h
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstVMStruct.h	(revision 42406)
+++ /trunk/src/VBox/VMM/testcase/tstVMStruct.h	(revision 42407)
@@ -994,4 +994,6 @@
     GEN_CHECK_OFF(SELM, fGDTRangeRegistered);
     GEN_CHECK_OFF(SELM, StatUpdateFromCPUM);
+    GEN_CHECK_OFF(SELM, StatStaleToUnstaleSReg);
+    GEN_CHECK_OFF(SELM, StatLoadHidSelGstNoGood);
 
     GEN_CHECK_SIZE(TM);
Index: /trunk/src/recompiler/VBoxRecompiler.c
===================================================================
--- /trunk/src/recompiler/VBoxRecompiler.c	(revision 42406)
+++ /trunk/src/recompiler/VBoxRecompiler.c	(revision 42407)
@@ -138,4 +138,5 @@
 static STAMCOUNTER    gStatRefuseRing1or2;
 static STAMCOUNTER    gStatRefuseCanExecute;
+static STAMCOUNTER    gaStatRefuseStale[6];
 static STAMCOUNTER    gStatREMGDTChange;
 static STAMCOUNTER    gStatREMIDTChange;
@@ -390,4 +391,10 @@
     STAM_REG(pVM, &gStatRefuseRing1or2,     STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES,     "Raw mode refused because of ring 1/2 execution");
     STAM_REG(pVM, &gStatRefuseCanExecute,   STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES,     "Raw mode refused because of cCanExecuteRaw");
+    STAM_REG(pVM, &gaStatRefuseStale[R_ES], STAMTYPE_COUNTER, "/REM/Refuse/StaleES",  STAMUNIT_OCCURENCES,     "Raw mode refused because of stale ES");
+    STAM_REG(pVM, &gaStatRefuseStale[R_CS], STAMTYPE_COUNTER, "/REM/Refuse/StaleCS",  STAMUNIT_OCCURENCES,     "Raw mode refused because of stale CS");
+    STAM_REG(pVM, &gaStatRefuseStale[R_SS], STAMTYPE_COUNTER, "/REM/Refuse/StaleSS",  STAMUNIT_OCCURENCES,     "Raw mode refused because of stale SS");
+    STAM_REG(pVM, &gaStatRefuseStale[R_DS], STAMTYPE_COUNTER, "/REM/Refuse/StaleDS",  STAMUNIT_OCCURENCES,     "Raw mode refused because of stale DS");
+    STAM_REG(pVM, &gaStatRefuseStale[R_FS], STAMTYPE_COUNTER, "/REM/Refuse/StaleFS",  STAMUNIT_OCCURENCES,     "Raw mode refused because of stale FS");
+    STAM_REG(pVM, &gaStatRefuseStale[R_GS], STAMTYPE_COUNTER, "/REM/Refuse/StaleGS",  STAMUNIT_OCCURENCES,     "Raw mode refused because of stale GS");
     STAM_REG(pVM, &gStatFlushTBs,           STAMTYPE_COUNTER, "/REM/FlushTB",         STAMUNIT_OCCURENCES,     "Number of TB flushes");
 
@@ -568,4 +575,10 @@
     STAM_DEREG(pVM, &gStatRefuseRing1or2);
     STAM_DEREG(pVM, &gStatRefuseCanExecute);
+    STAM_DEREG(pVM, &gaStatRefuseStale[0]);
+    STAM_DEREG(pVM, &gaStatRefuseStale[1]);
+    STAM_DEREG(pVM, &gaStatRefuseStale[2]);
+    STAM_DEREG(pVM, &gaStatRefuseStale[3]);
+    STAM_DEREG(pVM, &gaStatRefuseStale[4]);
+    STAM_DEREG(pVM, &gaStatRefuseStale[5]);
     STAM_DEREG(pVM, &gStatFlushTBs);
 
@@ -1664,4 +1677,44 @@
     }
 
+    /*
+     * Stale hidden selectors means raw-mode is unsafe (being very careful).
+     */
+    if (env->segs[R_CS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
+    {
+        Log2(("raw mode refused: stale CS (%#x)\n", env->segs[R_CS].selector));
+        STAM_COUNTER_INC(&gaStatRefuseStale[R_CS]);
+        return EMSTATE_REM;
+    }
+    if (env->segs[R_SS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
+    {
+        Log2(("raw mode refused: stale SS (%#x)\n", env->segs[R_SS].selector));
+        STAM_COUNTER_INC(&gaStatRefuseStale[R_SS]);
+        return EMSTATE_REM;
+    }
+    if (env->segs[R_DS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
+    {
+        Log2(("raw mode refused: stale DS (%#x)\n", env->segs[R_DS].selector));
+        STAM_COUNTER_INC(&gaStatRefuseStale[R_DS]);
+        return EMSTATE_REM;
+    }
+    if (env->segs[R_ES].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
+    {
+        Log2(("raw mode refused: stale ES (%#x)\n", env->segs[R_ES].selector));
+        STAM_COUNTER_INC(&gaStatRefuseStale[R_ES]);
+        return EMSTATE_REM;
+    }
+    if (env->segs[R_FS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
+    {
+        Log2(("raw mode refused: stale FS (%#x)\n", env->segs[R_FS].selector));
+        STAM_COUNTER_INC(&gaStatRefuseStale[R_FS]);
+        return EMSTATE_REM;
+    }
+    if (env->segs[R_GS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
+    {
+        Log2(("raw mode refused: stale GS (%#x)\n", env->segs[R_GS].selector));
+        STAM_COUNTER_INC(&gaStatRefuseStale[R_GS]);
+        return EMSTATE_REM;
+    }
+
 /*    Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));*/
     *piException = EXCP_EXECUTE_RAW;
@@ -2042,5 +2095,4 @@
     register const CPUMCTX *pCtx;
     register unsigned       fFlags;
-    bool                    fHiddenSelRegsValid;
     unsigned                i;
     TRPMEVENT               enmType;
@@ -2054,5 +2106,4 @@
     pVM->rem.s.Env.pVCpu = pVCpu;
     pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
-    fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
 
     Assert(!pVM->rem.s.fInREM);
@@ -2277,5 +2328,5 @@
         if (fFlags & CPUM_CHANGED_LDTR)
         {
-            if (fHiddenSelRegsValid || (pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID))
+            if (pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
             {
                 pVM->rem.s.Env.ldt.selector = pCtx->ldtr.Sel;
@@ -2307,130 +2358,58 @@
      * Sync TR unconditionally to make life simpler.
      */
-    pVM->rem.s.Env.tr.selector = pCtx->tr.Sel;
-    pVM->rem.s.Env.tr.base     = pCtx->tr.u64Base;
-    pVM->rem.s.Env.tr.limit    = pCtx->tr.u32Limit;
-    pVM->rem.s.Env.tr.flags    = (pCtx->tr.Attr.u << 8) & 0xFFFFFF;
+    pVM->rem.s.Env.tr.selector    = pCtx->tr.Sel;
+    pVM->rem.s.Env.tr.newselector = 0;
+    pVM->rem.s.Env.tr.fVBoxFlags  = pCtx->tr.fFlags;
+    pVM->rem.s.Env.tr.base        = pCtx->tr.u64Base;
+    pVM->rem.s.Env.tr.limit       = pCtx->tr.u32Limit;
+    pVM->rem.s.Env.tr.flags       = (pCtx->tr.Attr.u << 8) & 0xFFFFFF;
     /* Note! do_interrupt will fault if the busy flag is still set... */
-    pVM->rem.s.Env.tr.flags   &= ~DESC_TSS_BUSY_MASK;
+    pVM->rem.s.Env.tr.flags      &= ~DESC_TSS_BUSY_MASK;
 
     /*
      * Update selector registers.
+     *
      * This must be done *after* we've synced gdt, ldt and crX registers
      * since we're reading the GDT/LDT om sync_seg. This will happen with
      * saved state which takes a quick dip into rawmode for instance.
-     */
-    /*
-     * Stack; Note first check this one as the CPL might have changed. The
-     * wrong CPL can cause QEmu to raise an exception in sync_seg!!
-     */
-
-    if (fHiddenSelRegsValid)
-    {
-        /* The hidden selector registers are valid in the CPU context. */
-        /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
-
-        /* Set current CPL */
-        cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
-
-        cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, (pCtx->cs.Attr.u << 8) & 0xFFFFFF);
-        cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss.Sel, pCtx->ss.u64Base, pCtx->ss.u32Limit, (pCtx->ss.Attr.u << 8) & 0xFFFFFF);
-        cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds.Sel, pCtx->ds.u64Base, pCtx->ds.u32Limit, (pCtx->ds.Attr.u << 8) & 0xFFFFFF);
-        cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es.Sel, pCtx->es.u64Base, pCtx->es.u32Limit, (pCtx->es.Attr.u << 8) & 0xFFFFFF);
-        cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs.Sel, pCtx->fs.u64Base, pCtx->fs.u32Limit, (pCtx->fs.Attr.u << 8) & 0xFFFFFF);
-        cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs.Sel, pCtx->gs.u64Base, pCtx->gs.u32Limit, (pCtx->gs.Attr.u << 8) & 0xFFFFFF);
-    }
-    else
-    {
-        /* In 'normal' raw mode we don't have access to the hidden selector registers. */
-        /** @todo use hidden registers when possible and make CPUM/someone do the
-         *        reading of lazily maintained hidden registers. */
-        if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss.Sel)
-        {
-            Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss.Sel));
-
-            cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
-            sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss.Sel);
-#ifdef VBOX_WITH_STATISTICS
-            if (pVM->rem.s.Env.segs[R_SS].newselector)
-            {
-                STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
-            }
-#endif
-        }
-        else
-            pVM->rem.s.Env.segs[R_SS].newselector = 0;
-
-        if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es.Sel)
-        {
-            Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es.Sel));
-            sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es.Sel);
-#ifdef VBOX_WITH_STATISTICS
-            if (pVM->rem.s.Env.segs[R_ES].newselector)
-            {
-                STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
-            }
-#endif
-        }
-        else
-            pVM->rem.s.Env.segs[R_ES].newselector = 0;
-
-        if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs.Sel)
-        {
-            Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs.Sel));
-            sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs.Sel);
-#ifdef VBOX_WITH_STATISTICS
-            if (pVM->rem.s.Env.segs[R_CS].newselector)
-            {
-                STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
-            }
-#endif
-        }
-        else
-            pVM->rem.s.Env.segs[R_CS].newselector = 0;
-
-        if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds.Sel)
-        {
-            Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds.Sel));
-            sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds.Sel);
-#ifdef VBOX_WITH_STATISTICS
-            if (pVM->rem.s.Env.segs[R_DS].newselector)
-            {
-                STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
-            }
-#endif
-        }
-        else
-            pVM->rem.s.Env.segs[R_DS].newselector = 0;
-
+     *
+     * CPL/Stack; Note first check this one as the CPL might have changed.
+     * The wrong CPL can cause QEmu to raise an exception in sync_seg!!
+     */
+    cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
+    /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
+#define SYNC_IN_SREG(a_pEnv, a_SReg, a_pRemSReg, a_pVBoxSReg) \
+        do \
+        { \
+            if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, a_pVBoxSReg)) \
+            { \
+                cpu_x86_load_seg_cache(a_pEnv, R_##a_SReg, \
+                                       (a_pVBoxSReg)->Sel, \
+                                       (a_pVBoxSReg)->u64Base, \
+                                       (a_pVBoxSReg)->u32Limit, \
+                                       ((a_pVBoxSReg)->Attr.u << 8) & 0xFFFFFF); \
+                (a_pRemSReg)->fVBoxFlags = (a_pVBoxSReg)->fFlags; \
+            } \
+            /* This only-reload-if-changed stuff is the old approach, we should ditch it. */ \
+            else if ((a_pRemSReg)->selector != (a_pVBoxSReg)->Sel) \
+            { \
+                Log2(("REMR3State: " #a_SReg " changed from %04x to %04x!\n", \
+                      (a_pRemSReg)->selector, (a_pVBoxSReg)->Sel)); \
+                sync_seg(a_pEnv, R_##a_SReg, (a_pVBoxSReg)->Sel); \
+                if ((a_pRemSReg)->newselector) \
+                    STAM_COUNTER_INC(&gStatSelOutOfSync[R_##a_SReg]); \
+            } \
+            else \
+                (a_pRemSReg)->newselector = 0; \
+        } while (0)
+
+    SYNC_IN_SREG(&pVM->rem.s.Env, CS, &pVM->rem.s.Env.segs[R_CS], &pCtx->cs);
+    SYNC_IN_SREG(&pVM->rem.s.Env, SS, &pVM->rem.s.Env.segs[R_SS], &pCtx->ss);
+    SYNC_IN_SREG(&pVM->rem.s.Env, DS, &pVM->rem.s.Env.segs[R_DS], &pCtx->ds);
+    SYNC_IN_SREG(&pVM->rem.s.Env, ES, &pVM->rem.s.Env.segs[R_ES], &pCtx->es);
+    SYNC_IN_SREG(&pVM->rem.s.Env, FS, &pVM->rem.s.Env.segs[R_FS], &pCtx->fs);
+    SYNC_IN_SREG(&pVM->rem.s.Env, GS, &pVM->rem.s.Env.segs[R_GS], &pCtx->gs);
     /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
      * be the same but not the base/limit. */
-        if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs.Sel)
-        {
-            Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs.Sel));
-            sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs.Sel);
-#ifdef VBOX_WITH_STATISTICS
-            if (pVM->rem.s.Env.segs[R_FS].newselector)
-            {
-                STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
-            }
-#endif
-        }
-        else
-            pVM->rem.s.Env.segs[R_FS].newselector = 0;
-
-        if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs.Sel)
-        {
-            Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs.Sel));
-            sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs.Sel);
-#ifdef VBOX_WITH_STATISTICS
-            if (pVM->rem.s.Env.segs[R_GS].newselector)
-            {
-                STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
-            }
-#endif
-        }
-        else
-            pVM->rem.s.Env.segs[R_GS].newselector = 0;
-    }
 
     /*
@@ -2683,6 +2662,6 @@
             /* Qemu and AMD/Intel have different ideas about the busy flag ... */
         ||  pCtx->tr.Attr.u   != (  (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
-                                     ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
-                                     : 0)
+                                  ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
+                                  : 0)
         ||  !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
        )
@@ -2905,6 +2884,6 @@
             /* Qemu and AMD/Intel have different ideas about the busy flag ... */
         ||  pCtx->tr.Attr.u   != (  (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
-                                     ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
-                                     : 0)
+                                  ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
+                                  : 0)
         ||  !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
        )
Index: /trunk/src/recompiler/target-i386/cpu.h
===================================================================
--- /trunk/src/recompiler/target-i386/cpu.h	(revision 42406)
+++ /trunk/src/recompiler/target-i386/cpu.h	(revision 42407)
@@ -65,4 +65,5 @@
 # include <VBox/vmm/vmm.h>
 # include <VBox/vmm/stam.h>
+# include <VBox/vmm/cpumctx.h>
 #endif /* VBOX */
 
@@ -532,11 +533,12 @@
 typedef struct SegmentCache {
     uint32_t selector;
+#ifdef VBOX
+    /** The new selector is saved here when we are unable to sync it before invoking the recompiled code. */
+    uint16_t newselector;
+    uint16_t fVBoxFlags;
+#endif
     target_ulong base;
     uint32_t limit;
     uint32_t flags;
-#ifdef VBOX
-    /** The new selector is saved here when we are unable to sync it before invoking the recompiled code. */
-    uint32_t newselector;
-#endif
 } SegmentCache;
 
@@ -942,7 +944,12 @@
     sc->base = base;
     sc->limit = limit;
+#ifndef VBOX
     sc->flags = flags;
-#ifdef VBOX
+#else
+    if (flags & DESC_P_MASK)
+        flags |= DESC_A_MASK;           /* Make sure the A bit is set to avoid trouble. */
+    sc->flags = flags;
     sc->newselector = 0;
+    sc->fVBoxFlags  = CPUMSELREG_FLAGS_VALID;
 #endif
 
Index: /trunk/src/recompiler/target-i386/op_helper.c
===================================================================
--- /trunk/src/recompiler/target-i386/op_helper.c	(revision 42406)
+++ /trunk/src/recompiler/target-i386/op_helper.c	(revision 42407)
@@ -254,4 +254,8 @@
     sc->limit = get_seg_limit(e1, e2);
     sc->flags = e2;
+#ifdef VBOX
+    sc->newselector = 0;
+    sc->fVBoxFlags  = CPUMSELREG_FLAGS_VALID;
+#endif
 }
 
@@ -557,4 +561,8 @@
     env->tr.limit = tss_limit;
     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
+#ifdef VBOX
+    env->tr.fVBoxFlags  = CPUMSELREG_FLAGS_VALID;
+    env->tr.newselector = 0;
+#endif
 
     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
@@ -596,4 +604,8 @@
     env->ldt.limit = 0;
     env->ldt.flags = 0;
+#ifdef VBOX
+    env->ldt.fVBoxFlags  = CPUMSELREG_FLAGS_VALID;
+    env->ldt.newselector = 0;
+#endif
 
     /* load the LDT */
@@ -1954,4 +1966,8 @@
     env->ldt.limit = ldl_phys(sm_state + 0x7e74);
     env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
+#ifdef VBOX
+    env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
+    env->ldt.newselector = 0;
+#endif
 
     env->idt.base = ldq_phys(sm_state + 0x7e88);
@@ -1962,4 +1978,8 @@
     env->tr.limit = ldl_phys(sm_state + 0x7e94);
     env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
+#ifdef VBOX
+    env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
+    env->tr.newselector = 0;
+#endif
 
     EAX = ldq_phys(sm_state + 0x7ff8);
@@ -2008,4 +2028,8 @@
     env->tr.limit = ldl_phys(sm_state + 0x7f60);
     env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
+#ifdef VBOX
+    env->tr.fVBoxFlags  = CPUMSELREG_FLAGS_VALID;
+    env->tr.newselector = 0;
+#endif
 
     env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
@@ -2013,4 +2037,8 @@
     env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
     env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
+#ifdef VBOX
+    env->ldt.fVBoxFlags  = CPUMSELREG_FLAGS_VALID;
+    env->ldt.newselector = 0;
+#endif
 
     env->gdt.base = ldl_phys(sm_state + 0x7f74);
@@ -2448,4 +2476,8 @@
         env->ldt.base = 0;
         env->ldt.limit = 0;
+#ifdef VBOX
+        env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
+        env->ldt.newselector = 0;
+#endif
     } else {
         if (selector & 0x4)
@@ -2510,4 +2542,8 @@
         env->tr.limit = 0;
         env->tr.flags = 0;
+#ifdef VBOX
+        env->tr.fVBoxFlags  = CPUMSELREG_FLAGS_VALID;
+        env->tr.newselector = 0;
+#endif
     } else {
         if (selector & 0x4)
@@ -5724,5 +5760,5 @@
 
         /* Successful sync. */
-        env1->segs[seg_reg].newselector = 0;
+        Assert(env1->segs[seg_reg].newselector == 0);
     }
     else
@@ -5741,7 +5777,7 @@
                 load_segment(&e1, &e2, selector);
                 cpu_x86_load_seg_cache(env, R_CS, selector,
-                               get_seg_base(e1, e2),
-                               get_seg_limit(e1, e2),
-                               e2);
+                                       get_seg_base(e1, e2),
+                                       get_seg_limit(e1, e2),
+                                       e2);
             }
             else
@@ -5753,5 +5789,5 @@
 
             /* Successful sync. */
-            env1->segs[seg_reg].newselector = 0;
+            Assert(env1->segs[seg_reg].newselector == 0);
         }
         else
