Index: /trunk/include/VBox/vmm/cpum.h
===================================================================
--- /trunk/include/VBox/vmm/cpum.h	(revision 42426)
+++ /trunk/include/VBox/vmm/cpum.h	(revision 42427)
@@ -91,4 +91,5 @@
 VMMDECL(RTSEL)      CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden);
 VMMDECL(RTSEL)      CPUMGetGuestLDTR(PVMCPU pVCpu);
+VMMDECL(RTSEL)      CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit);
 VMMDECL(uint64_t)   CPUMGetGuestCR0(PVMCPU pVCpu);
 VMMDECL(uint64_t)   CPUMGetGuestCR2(PVMCPU pVCpu);
Index: /trunk/include/VBox/vmm/cpumctx.h
===================================================================
--- /trunk/include/VBox/vmm/cpumctx.h	(revision 42426)
+++ /trunk/include/VBox/vmm/cpumctx.h	(revision 42427)
@@ -86,5 +86,5 @@
      && (   (a_pSelReg)->ValidSel == (a_pSelReg)->Sel \
          || (   (a_pVCpu) /*!= NULL*/ \
-             && (a_pSelReg)->ValidSel == ((a_pSelReg)->Sel & X86_SEL_MASK_RPL) \
+             && (a_pSelReg)->ValidSel == ((a_pSelReg)->Sel & X86_SEL_MASK_OFF_RPL) \
              && ((a_pSelReg)->Sel      & X86_SEL_RPL) == 1 \
              && ((a_pSelReg)->ValidSel & X86_SEL_RPL) == 0 \
Index: /trunk/include/VBox/vmm/selm.h
===================================================================
--- /trunk/include/VBox/vmm/selm.h	(revision 42426)
+++ /trunk/include/VBox/vmm/selm.h	(revision 42427)
@@ -80,5 +80,4 @@
 VMMDECL(int)            SELMValidateAndConvertCSAddr(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL SelCPL, RTSEL SelCS,
                                                      PCPUMSELREG pSRegCS, RTGCPTR Addr, PRTGCPTR ppvFlat);
-VMMDECL(int)            SELMGetLDTFromSel(PVM pVM, RTSEL SelLdt, PRTGCPTR ppvLdt, unsigned *pcbLimit);
 #ifdef VBOX_WITH_RAW_MODE
 VMM_INT_DECL(void)      SELMLoadHiddenSelectorReg(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg);
Index: /trunk/include/iprt/x86.h
===================================================================
--- /trunk/include/iprt/x86.h	(revision 42426)
+++ /trunk/include/iprt/x86.h	(revision 42427)
@@ -2955,24 +2955,33 @@
  * The shift used to convert a selector from and to index an index (C).
  */
-#define X86_SEL_SHIFT       3
+#define X86_SEL_SHIFT           3
 
 /**
  * The mask used to mask off the table indicator and RPL of an selector.
  */
-#define X86_SEL_MASK        0xfff8U
+#define X86_SEL_MASK            0xfff8U
 
 /**
  * The mask used to mask off the RPL of an selector.
- */
-#define X86_SEL_MASK_RPL    0xfffcU
+ * This is suitable for checking for NULL selectors.
+ */
+#define X86_SEL_MASK_OFF_RPL    0xfffcU
 
 /**
  * The bit indicating that a selector is in the LDT and not in the GDT.
  */
-#define X86_SEL_LDT         0x0004U
+#define X86_SEL_LDT             0x0004U
+
 /**
  * The bit mask for getting the RPL of a selector.
  */
-#define X86_SEL_RPL         0x0003U
+#define X86_SEL_RPL             0x0003U
+
+/**
+ * The mask covering both RPL and LDT.
+ * This is incidentally the same as sizeof(X86DESC) - 1, so good for limit
+ * checks.
+ */
+#define X86_SEL_RPL_LDT         0x0007U
 
 /** @} */
Index: /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 42426)
+++ /trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp	(revision 42427)
@@ -116,5 +116,5 @@
     {
         /* Protected mode - get it from the selector descriptor tables. */
-        if (!(pSReg->Sel & X86_SEL_MASK))
+        if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
         {
             Assert(!CPUMIsGuestInLongMode(pVCpu));
@@ -1303,4 +1303,12 @@
 VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
 {
+    return pVCpu->cpum.s.Guest.ldtr.Sel;
+}
+
+
+VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
+{
+    *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
+    *pcbLimit   = pVCpu->cpum.s.Guest.ldtr.u32Limit;
     return pVCpu->cpum.s.Guest.ldtr.Sel;
 }
Index: /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 42426)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 42427)
@@ -1514,5 +1514,5 @@
     /* Null selectors are not allowed (we're not called for dispatching
        interrupts with SS=0 in long mode). */
-    if (!(NewSS & (X86_SEL_MASK | X86_SEL_LDT)))
+    if (!(NewSS & X86_SEL_MASK_OFF_RPL))
     {
         Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #GP(0)\n", NewSS));
@@ -1863,5 +1863,5 @@
     /* A null CS is bad. */
     RTSEL NewCS = Idte.Gate.u16Sel;
-    if (!(NewCS & (X86_SEL_MASK | X86_SEL_LDT)))
+    if (!(NewCS & X86_SEL_MASK_OFF_RPL))
     {
         Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
@@ -1882,10 +1882,10 @@
     {
         Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
-        return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
+        return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
     }
     if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
     {
         Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
-        return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
+        return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
     }
 
@@ -1899,5 +1899,5 @@
         Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
              u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
-        return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
+        return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
     }
     /** @todo is the RPL of the interrupt/trap gate descriptor checked? */
@@ -1913,5 +1913,5 @@
         Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
              u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
-        return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));
+        return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
     }
 
@@ -5816,5 +5816,5 @@
     {
         if (   !pCtx->ldtr.Attr.n.u1Present
-            || (uSel | 0x7U) > pCtx->ldtr.u32Limit )
+            || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
         {
             Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
@@ -5829,5 +5829,5 @@
     else
     {
-        if ((uSel | 0x7U) > pCtx->gdtr.cbGdt)
+        if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
         {
             Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
@@ -5848,6 +5848,6 @@
             || pDesc->Legacy.Gen.u1DescType)
             pDesc->Long.au64[1] = 0;
-        else if ((uint32_t)(uSel & X86_SEL_MASK) + 15 < (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
-            rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
+        else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
+            rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
         else
         {
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 42426)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 42427)
@@ -818,5 +818,5 @@
 {
     Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
-    Assert((uSel & (X86_SEL_MASK | X86_SEL_LDT)));
+    Assert((uSel & X86_SEL_MASK_OFF_RPL));
 
     if (IEM_IS_LONG_MODE(pIemCpu))
@@ -913,5 +913,5 @@
      * Protected mode. Need to parse the specified descriptor...
      */
-    if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
+    if (!(uSel & X86_SEL_MASK_OFF_RPL))
     {
         Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
@@ -1015,5 +1015,5 @@
     /* commit */
     pCtx->rip = offSeg;
-    pCtx->cs.Sel         = uSel & (X86_SEL_MASK | X86_SEL_LDT);
+    pCtx->cs.Sel         = uSel & X86_SEL_MASK_OFF_RPL;
     pCtx->cs.Sel        |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
     pCtx->cs.ValidSel    = pCtx->cs.Sel;
@@ -1096,5 +1096,5 @@
      * Protected mode. Need to parse the specified descriptor...
      */
-    if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
+    if (!(uSel & X86_SEL_MASK_OFF_RPL))
     {
         Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
@@ -1236,5 +1236,5 @@
     /* commit */
     pCtx->rip = offSeg;
-    pCtx->cs.Sel         = uSel & (X86_SEL_MASK | X86_SEL_LDT);
+    pCtx->cs.Sel         = uSel & X86_SEL_MASK_OFF_RPL;
     pCtx->cs.Sel        |= pIemCpu->uCpl;
     pCtx->cs.ValidSel    = pCtx->cs.Sel;
@@ -1323,5 +1323,5 @@
      * Protected mode is complicated, of course.
      */
-    if (!(uNewCs & (X86_SEL_MASK | X86_SEL_LDT)))
+    if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
     {
         Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
@@ -1417,5 +1417,5 @@
            and read the selector. */
         IEMSELDESC DescSs;
-        if (!(uNewOuterSs & (X86_SEL_MASK | X86_SEL_LDT)))
+        if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
         {
             if (   !DescCs.Legacy.Gen.u1Long
@@ -1961,5 +1961,5 @@
              */
             /* Read the CS descriptor. */
-            if (!(uNewCs & (X86_SEL_MASK | X86_SEL_LDT)))
+            if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
             {
                 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
@@ -2037,5 +2037,5 @@
 
                 /* Read the SS descriptor. */
-                if (!(uNewSS & (X86_SEL_MASK | X86_SEL_LDT)))
+                if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
                 {
                     Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
@@ -2281,5 +2281,5 @@
      * FS and GS.  If not null, then we have to load and parse the descriptor.
      */
-    if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
+    if (!(uSel & X86_SEL_MASK_OFF_RPL))
     {
         if (iSegReg == X86_SREG_SS)
@@ -2659,5 +2659,5 @@
      * Now, loading a NULL selector is easy.
      */
-    if ((uNewLdt & X86_SEL_MASK) == 0)
+    if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
     {
         Log(("lldt %04x: Loading NULL selector.\n",  uNewLdt));
@@ -2689,10 +2689,10 @@
     {
         Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
-        return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
+        return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
     }
     if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
     {
         Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
-        return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
+        return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
     }
     uint64_t u64Base;
@@ -2704,5 +2704,5 @@
         {
             Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
-            return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
+            return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
         }
 
@@ -2711,5 +2711,5 @@
         {
             Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
-            return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
+            return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
         }
     }
@@ -2727,8 +2727,8 @@
 /** @todo check if the actual value is loaded or if the RPL is dropped */
     if (!IEM_VERIFICATION_ENABLED(pIemCpu))
-        CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK);
+        CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK_OFF_RPL);
     else
-        pCtx->ldtr.Sel  = uNewLdt & X86_SEL_MASK;
-    pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK;
+        pCtx->ldtr.Sel  = uNewLdt & X86_SEL_MASK_OFF_RPL;
+    pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
     pCtx->ldtr.fFlags   = CPUMSELREG_FLAGS_VALID;
     pCtx->ldtr.Attr.u   = X86DESC_GET_HID_ATTR(&Desc.Legacy);
@@ -2768,5 +2768,5 @@
         return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
     }
-    if ((uNewTr & X86_SEL_MASK) == 0)
+    if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
     {
         Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
@@ -2786,5 +2786,5 @@
     {
         Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
-        return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
+        return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
     }
     if (   Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
@@ -2793,5 +2793,5 @@
     {
         Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
-        return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
+        return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
     }
     uint64_t u64Base;
@@ -2803,5 +2803,5 @@
         {
             Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
-            return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
+            return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
         }
 
@@ -2810,5 +2810,5 @@
         {
             Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
-            return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
+            return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
         }
     }
@@ -2848,8 +2848,8 @@
 /** @todo check if the actual value is loaded or if the RPL is dropped */
     if (!IEM_VERIFICATION_ENABLED(pIemCpu))
-        CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK);
+        CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK_OFF_RPL);
     else
-        pCtx->tr.Sel  = uNewTr & X86_SEL_MASK;
-    pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK;
+        pCtx->tr.Sel  = uNewTr & X86_SEL_MASK_OFF_RPL;
+    pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
     pCtx->tr.fFlags   = CPUMSELREG_FLAGS_VALID;
     pCtx->tr.Attr.u   = X86DESC_GET_HID_ATTR(&Desc.Legacy);
Index: /trunk/src/VBox/VMM/VMMAll/SELMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/SELMAll.cpp	(revision 42426)
+++ /trunk/src/VBox/VMM/VMMAll/SELMAll.cpp	(revision 42427)
@@ -340,5 +340,5 @@
     {
         if (   !(fFlags & SELMTOFLAT_FLAGS_HYPER)
-            && (unsigned)(Sel & X86_SEL_MASK) >= pVM->selm.s.GuestGdtr.cbGdt)
+            && (Sel | X86_SEL_RPL_LDT) > pVM->selm.s.GuestGdtr.cbGdt)
             return VERR_INVALID_SELECTOR;
         Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
@@ -346,9 +346,9 @@
     else
     {
-        if ((unsigned)(Sel & X86_SEL_MASK) >= pVM->selm.s.cbLdtLimit)
+        if ((Sel | X86_SEL_RPL_LDT) > pVM->selm.s.cbLdtLimit)
             return VERR_INVALID_SELECTOR;
 
         /** @todo handle LDT page(s) not present! */
-        PX86DESC    paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
+        PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
         Desc = paLDT[Sel >> X86_SEL_SHIFT];
     }
@@ -1032,5 +1032,5 @@
     CPUMSELREGHID trHid;
     RTSEL tr = CPUMGetGuestTR(pVCpu, &trHid);
-    if (!(tr & X86_SEL_MASK))
+    if (!(tr & X86_SEL_MASK_OFF_RPL))
         return VERR_SELM_NO_TSS;
 
Index: /trunk/src/VBox/VMM/VMMR3/SELM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/SELM.cpp	(revision 42426)
+++ /trunk/src/VBox/VMM/VMMR3/SELM.cpp	(revision 42427)
@@ -1068,5 +1068,5 @@
      */
     RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu);
-    if ((SelLdt & X86_SEL_MASK) == 0)
+    if (!(SelLdt & X86_SEL_MASK_OFF_RPL))
     {
         /* ldtr = 0 - update hyper LDTR and deregister any active handler. */
@@ -1085,4 +1085,5 @@
      * Get the LDT selector.
      */
+/** @todo this is wrong, use CPUMGetGuestLdtrEx */
     PX86DESC    pDesc    = &pVM->selm.s.paGdtR3[SelLdt >> X86_SEL_SHIFT];
     RTGCPTR     GCPtrLdt = X86DESC_BASE(pDesc);
@@ -1293,6 +1294,6 @@
     for (uint32_t iSReg = 0; iSReg < X86_SREG_COUNT; iSReg++)
     {
-        RTSEL const Sel = paSReg[iSReg].Sel & (X86_SEL_MASK | X86_SEL_LDT);
-        if (Sel & (X86_SEL_MASK | X86_SEL_LDT))
+        RTSEL const Sel = paSReg[iSReg].Sel;
+        if (Sel & X86_SEL_MASK_OFF_RPL)
         {
             /* Get the shadow descriptor entry corresponding to this. */
@@ -1553,12 +1554,13 @@
      *       make sure cbTss is 0.
      */
+/** @todo use the hidden bits, not shadow GDT. */
     CPUMSELREGHID   trHid;
     RTSEL           SelTss   = CPUMGetGuestTR(pVCpu, &trHid);
     RTGCPTR         GCPtrTss = trHid.u64Base;
     uint32_t        cbTss    = trHid.u32Limit;
-    Assert(     (SelTss & X86_SEL_MASK)
-           ||   (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */)
-           ||   (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY /* RESET */));
-    if (SelTss & X86_SEL_MASK)
+    Assert(   (SelTss & X86_SEL_MASK_OFF_RPL)
+           || (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */)
+           || (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY /* RESET */));
+    if (SelTss & X86_SEL_MASK_OFF_RPL)
     {
         Assert(!(SelTss & X86_SEL_LDT));
@@ -1793,6 +1795,7 @@
      */
     RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu);
-    if ((SelLdt & X86_SEL_MASK) == 0)
+    if ((SelLdt & X86_SEL_MASK_OFF_RPL) == 0)
         return VINF_SUCCESS;
+    Assert(!(SelLdt & X86_SEL_LDT));
     if (SelLdt > GDTR.cbGdt)
     {
@@ -1886,8 +1889,8 @@
     RTGCPTR         GCPtrTss = trHid.u64Base;
     uint32_t        cbTss    = trHid.u32Limit;
-    Assert(     (SelTss & X86_SEL_MASK)
-           ||   (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */)
-           ||   (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY /* RESET */));
-    if (SelTss & X86_SEL_MASK)
+    Assert(   (SelTss & X86_SEL_MASK_OFF_RPL)
+           || (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */)
+           || (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY /* RESET */));
+    if (SelTss & X86_SEL_MASK_OFF_RPL)
     {
         AssertReturn(!(SelTss & X86_SEL_LDT), false);
@@ -2007,57 +2010,4 @@
 
 /**
- * Returns flat address and limit of LDT by LDT selector from guest GDTR.
- *
- * Fully validate selector.
- *
- * @returns VBox status.
- * @param   pVM       Pointer to the VM.
- * @param   SelLdt    LDT selector.
- * @param   ppvLdt    Where to store the flat address of LDT.
- * @param   pcbLimit  Where to store LDT limit.
- */
-VMMDECL(int) SELMGetLDTFromSel(PVM pVM, RTSEL SelLdt, PRTGCPTR ppvLdt, unsigned *pcbLimit)
-{
-    PVMCPU pVCpu = VMMGetCpu(pVM);
-
-    /* Get guest GDTR. */
-    VBOXGDTR GDTR;
-    CPUMGetGuestGDTR(pVCpu, &GDTR);
-
-    /* Check selector TI and GDT limit. */
-    if (   (SelLdt & X86_SEL_LDT)
-        || SelLdt > GDTR.cbGdt)
-        return VERR_INVALID_SELECTOR;
-
-    /* Read descriptor from GC. */
-    X86DESC Desc;
-    int rc = PGMPhysSimpleReadGCPtr(pVCpu, (void *)&Desc, (RTGCPTR)(GDTR.pGdt + (SelLdt & X86_SEL_MASK)), sizeof(Desc));
-    if (RT_FAILURE(rc))
-    {
-        /* fatal */
-        Log(("Can't read LDT descriptor for selector=%04X\n", SelLdt));
-        return VERR_SELECTOR_NOT_PRESENT;
-    }
-
-    /* Check if LDT descriptor is not present. */
-    if (Desc.Gen.u1Present == 0)
-        return VERR_SELECTOR_NOT_PRESENT;
-
-    /* Check LDT descriptor type. */
-    if (    Desc.Gen.u1DescType == 1
-        ||  Desc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
-        return VERR_INVALID_SELECTOR;
-
-    /* LDT descriptor is ok. */
-    if (ppvLdt)
-    {
-        *ppvLdt = (RTGCPTR)X86DESC_BASE(&Desc);
-        *pcbLimit = X86DESC_LIMIT_G(&Desc);
-    }
-    return VINF_SUCCESS;
-}
-
-
-/**
  * Gets information about a 64-bit selector, SELMR3GetSelectorInfo helper.
  *
@@ -2075,12 +2025,13 @@
      * Read it from the guest descriptor table.
      */
+/** @todo this is bogus wrt the LDT/GDT limit on long selectors. */
     X86DESC64   Desc;
-    VBOXGDTR    Gdtr;
     RTGCPTR     GCPtrDesc;
-    CPUMGetGuestGDTR(pVCpu, &Gdtr);
     if (!(Sel & X86_SEL_LDT))
     {
         /* GDT */
-        if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > (unsigned)Gdtr.cbGdt)
+        VBOXGDTR Gdtr;
+        CPUMGetGuestGDTR(pVCpu, &Gdtr);
+        if ((Sel | X86_SEL_RPL_LDT) > Gdtr.cbGdt)
             return VERR_INVALID_SELECTOR;
         GCPtrDesc = Gdtr.pGdt + (Sel & X86_SEL_MASK);
@@ -2088,30 +2039,13 @@
     else
     {
-        /*
-         * LDT - must locate the LDT first.
-         */
-        RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu);
-        if (    (unsigned)(SelLdt & X86_SEL_MASK) < sizeof(X86DESC) /* the first selector is invalid, right? */ /** @todo r=bird: No, I don't think so */
-            ||  (unsigned)(SelLdt & X86_SEL_MASK) + sizeof(X86DESC) - 1 > (unsigned)Gdtr.cbGdt)
+        /* LDT */
+        uint64_t GCPtrBase;
+        uint32_t cbLimit;
+        CPUMGetGuestLdtrEx(pVCpu, &GCPtrBase, &cbLimit);
+        if ((Sel | X86_SEL_RPL_LDT) > cbLimit)
             return VERR_INVALID_SELECTOR;
-        GCPtrDesc = Gdtr.pGdt + (SelLdt & X86_SEL_MASK);
-        int rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(Desc));
-        if (RT_FAILURE(rc))
-            return rc;
-
-        /* validate the LDT descriptor. */
-        if (Desc.Gen.u1Present == 0)
-            return VERR_SELECTOR_NOT_PRESENT;
-        if (    Desc.Gen.u1DescType == 1
-            ||  Desc.Gen.u4Type != AMD64_SEL_TYPE_SYS_LDT)
-            return VERR_INVALID_SELECTOR;
-
-        uint32_t cbLimit = X86DESC_LIMIT_G(&Desc);
-        if ((uint32_t)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > cbLimit)
-            return VERR_INVALID_SELECTOR;
 
         /* calc the descriptor location. */
-        GCPtrDesc = X86DESC64_BASE(&Desc);
-        GCPtrDesc += (Sel & X86_SEL_MASK);
+        GCPtrDesc = GCPtrBase + (Sel & X86_SEL_MASK);
     }
 
@@ -2255,9 +2189,9 @@
     X86DESC Desc;
     if (    !(Sel & X86_SEL_LDT)
-        && (    pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == (Sel & X86_SEL_MASK)
-            ||  pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == (Sel & X86_SEL_MASK)
-            ||  pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == (Sel & X86_SEL_MASK)
-            ||  pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == (Sel & X86_SEL_MASK)
-            ||  pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == (Sel & X86_SEL_MASK))
+        && (    pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]         == (Sel & X86_SEL_RPL_LDT)
+            ||  pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]         == (Sel & X86_SEL_RPL_LDT)
+            ||  pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64]       == (Sel & X86_SEL_RPL_LDT)
+            ||  pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]        == (Sel & X86_SEL_RPL_LDT)
+            ||  pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == (Sel & X86_SEL_RPL_LDT))
        )
     {
@@ -2280,11 +2214,11 @@
         pSelInfo->fFlags = DBGFSELINFO_FLAGS_PROT_MODE;
 
-        VBOXGDTR    Gdtr;
         RTGCPTR     GCPtrDesc;
-        CPUMGetGuestGDTR(pVCpu, &Gdtr);
         if (!(Sel & X86_SEL_LDT))
         {
             /* GDT */
-            if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > (unsigned)Gdtr.cbGdt)
+            VBOXGDTR Gdtr;
+            CPUMGetGuestGDTR(pVCpu, &Gdtr);
+            if ((Sel | X86_SEL_RPL_LDT) > Gdtr.cbGdt)
                 return VERR_INVALID_SELECTOR;
             GCPtrDesc = Gdtr.pGdt + (Sel & X86_SEL_MASK);
@@ -2292,30 +2226,13 @@
         else
         {
-            /*
-             * LDT - must locate the LDT first...
-             */
-            RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu);
-            if (    (unsigned)(SelLdt & X86_SEL_MASK) < sizeof(X86DESC) /* the first selector is invalid, right? */ /** @todo r=bird: No, I don't think so */
-                ||  (unsigned)(SelLdt & X86_SEL_MASK) + sizeof(X86DESC) - 1 > (unsigned)Gdtr.cbGdt)
+            /* LDT */
+            uint64_t GCPtrBase;
+            uint32_t cbLimit;
+            CPUMGetGuestLdtrEx(pVCpu, &GCPtrBase, &cbLimit);
+            if ((Sel | X86_SEL_RPL_LDT) > cbLimit)
                 return VERR_INVALID_SELECTOR;
-            GCPtrDesc = Gdtr.pGdt + (SelLdt & X86_SEL_MASK);
-            int rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(Desc));
-            if (RT_FAILURE(rc))
-                return rc;
-
-            /* validate the LDT descriptor. */
-            if (Desc.Gen.u1Present == 0)
-                return VERR_SELECTOR_NOT_PRESENT;
-            if (    Desc.Gen.u1DescType == 1
-                ||  Desc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
-                return VERR_INVALID_SELECTOR;
-
-            uint32_t cbLimit = X86DESC_LIMIT_G(&Desc);
-            if ((uint32_t)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > cbLimit)
-                return VERR_INVALID_SELECTOR;
 
             /* calc the descriptor location. */
-            GCPtrDesc = X86DESC_BASE(&Desc);
-            GCPtrDesc += (Sel & X86_SEL_MASK);
+            GCPtrDesc = GCPtrBase + (Sel & X86_SEL_MASK);
         }
 
@@ -2416,9 +2333,9 @@
          */
         Desc = pVM->selm.s.paGdtR3[Sel >> X86_SEL_SHIFT];
-        pSelInfo->fFlags =    pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == (Sel & X86_SEL_MASK)
-                           || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == (Sel & X86_SEL_MASK)
-                           || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == (Sel & X86_SEL_MASK)
-                           || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == (Sel & X86_SEL_MASK)
-                           || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == (Sel & X86_SEL_MASK)
+        pSelInfo->fFlags =    pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]         == (Sel & X86_SEL_MASK_OFF_RPL)
+                           || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]         == (Sel & X86_SEL_MASK_OFF_RPL)
+                           || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64]       == (Sel & X86_SEL_MASK_OFF_RPL)
+                           || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]        == (Sel & X86_SEL_MASK_OFF_RPL)
+                           || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == (Sel & X86_SEL_MASK_OFF_RPL)
                          ? DBGFSELINFO_FLAGS_HYPER
                          : 0;
@@ -2666,8 +2583,10 @@
 {
     /** @todo SMP support! */
-    PVMCPU      pVCpu = &pVM->aCpus[0];
-
-    RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu);
-    if (!(SelLdt & X86_SEL_MASK))
+    PVMCPU   pVCpu = &pVM->aCpus[0];
+
+    uint64_t GCPtrLdt;
+    uint32_t cbLdt;
+    RTSEL    SelLdt = CPUMGetGuestLdtrEx(pVCpu, &GCPtrLdt, &cbLdt);
+    if (!(SelLdt & X86_SEL_MASK_OFF_RPL))
     {
         pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x): Null-Selector\n", SelLdt);
@@ -2675,19 +2594,10 @@
     }
 
-    RTGCPTR     GCPtrLdt;
-    unsigned    cbLdt;
-    int rc = SELMGetLDTFromSel(pVM, SelLdt, &GCPtrLdt, &cbLdt);
-    if (RT_FAILURE(rc))
-    {
-        pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x): rc=%Rrc\n", SelLdt, rc);
-        return;
-    }
-
-    pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x GCAddr=%RGv limit=%x):\n", SelLdt, GCPtrLdt, cbLdt);
-    unsigned    cLdts  = (cbLdt + 1) >> X86_SEL_SHIFT;
+    pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x GCAddr=%RX64 limit=%x):\n", SelLdt, GCPtrLdt, cbLdt);
+    unsigned cLdts  = (cbLdt + 1) >> X86_SEL_SHIFT;
     for (unsigned iLdt = 0; iLdt < cLdts; iLdt++, GCPtrLdt += sizeof(X86DESC))
     {
         X86DESC LdtE;
-        rc = PGMPhysSimpleReadGCPtr(pVCpu, &LdtE, GCPtrLdt, sizeof(LdtE));
+        int rc = PGMPhysSimpleReadGCPtr(pVCpu, &LdtE, GCPtrLdt, sizeof(LdtE));
         if (RT_SUCCESS(rc))
         {
Index: /trunk/src/VBox/VMM/VMMRC/SELMRC.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMRC/SELMRC.cpp	(revision 42426)
+++ /trunk/src/VBox/VMM/VMMRC/SELMRC.cpp	(revision 42427)
@@ -95,9 +95,9 @@
      */
     RTSEL   Sel = iGDTEntry << X86_SEL_SHIFT;
-    Assert(   !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] & ~X86_SEL_MASK)
-           && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] & ~X86_SEL_MASK)
-           && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] & ~X86_SEL_MASK)
-           && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] & ~X86_SEL_MASK)
-           && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] & ~X86_SEL_MASK));
+    Assert(   !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]         & ~X86_SEL_MASK_OFF_RPL)
+           && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]         & ~X86_SEL_MASK_OFF_RPL)
+           && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64]       & ~X86_SEL_MASK_OFF_RPL)
+           && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]        & ~X86_SEL_MASK_OFF_RPL)
+           && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] & ~X86_SEL_MASK_OFF_RPL));
     if (    pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]         == Sel
         ||  pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]         == Sel
@@ -137,5 +137,5 @@
     for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
     {
-        if (Sel == (paSReg[iSReg].Sel & X86_SEL_MASK_RPL))
+        if (Sel == (paSReg[iSReg].Sel & X86_SEL_MASK_OFF_RPL))
         {
             if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
@@ -200,5 +200,5 @@
     for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
     {
-        if (iGDTEntry == (paSReg[iSReg].Sel & X86_SEL_MASK_RPL))
+        if (iGDTEntry == (paSReg[iSReg].Sel & X86_SEL_MASK_OFF_RPL))
         {
             if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
