Changeset 42427 in vbox
- Timestamp:
- Jul 26, 2012 11:48:01 PM (12 years ago)
- Location:
- trunk
- Files:
-
- 10 edited
-
include/VBox/vmm/cpum.h (modified) (1 diff)
-
include/VBox/vmm/cpumctx.h (modified) (1 diff)
-
include/VBox/vmm/selm.h (modified) (1 diff)
-
include/iprt/x86.h (modified) (1 diff)
-
src/VBox/VMM/VMMAll/CPUMAllRegs.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMAll/IEMAll.cpp (modified) (8 diffs)
-
src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h (modified) (21 diffs)
-
src/VBox/VMM/VMMAll/SELMAll.cpp (modified) (3 diffs)
-
src/VBox/VMM/VMMR3/SELM.cpp (modified) (15 diffs)
-
src/VBox/VMM/VMMRC/SELMRC.cpp (modified) (3 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r42420 r42427 91 91 VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden); 92 92 VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu); 93 VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit); 93 94 VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu); 94 95 VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu); -
trunk/include/VBox/vmm/cpumctx.h
r42415 r42427 86 86 && ( (a_pSelReg)->ValidSel == (a_pSelReg)->Sel \ 87 87 || ( (a_pVCpu) /*!= NULL*/ \ 88 && (a_pSelReg)->ValidSel == ((a_pSelReg)->Sel & X86_SEL_MASK_ RPL) \88 && (a_pSelReg)->ValidSel == ((a_pSelReg)->Sel & X86_SEL_MASK_OFF_RPL) \ 89 89 && ((a_pSelReg)->Sel & X86_SEL_RPL) == 1 \ 90 90 && ((a_pSelReg)->ValidSel & X86_SEL_RPL) == 0 \ -
trunk/include/VBox/vmm/selm.h
r42407 r42427 80 80 VMMDECL(int) SELMValidateAndConvertCSAddr(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL SelCPL, RTSEL SelCS, 81 81 PCPUMSELREG pSRegCS, RTGCPTR Addr, PRTGCPTR ppvFlat); 82 VMMDECL(int) SELMGetLDTFromSel(PVM pVM, RTSEL SelLdt, PRTGCPTR ppvLdt, unsigned *pcbLimit);83 82 #ifdef VBOX_WITH_RAW_MODE 84 83 VMM_INT_DECL(void) SELMLoadHiddenSelectorReg(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg); -
trunk/include/iprt/x86.h
r42407 r42427 2955 2955 * The shift used to convert a selector from and to index an index (C). 2956 2956 */ 2957 #define X86_SEL_SHIFT 32957 #define X86_SEL_SHIFT 3 2958 2958 2959 2959 /** 2960 2960 * The mask used to mask off the table indicator and RPL of an selector. 2961 2961 */ 2962 #define X86_SEL_MASK 0xfff8U2962 #define X86_SEL_MASK 0xfff8U 2963 2963 2964 2964 /** 2965 2965 * The mask used to mask off the RPL of an selector. 2966 */ 2967 #define X86_SEL_MASK_RPL 0xfffcU 2966 * This is suitable for checking for NULL selectors. 2967 */ 2968 #define X86_SEL_MASK_OFF_RPL 0xfffcU 2968 2969 2969 2970 /** 2970 2971 * The bit indicating that a selector is in the LDT and not in the GDT. 2971 2972 */ 2972 #define X86_SEL_LDT 0x0004U 2973 #define X86_SEL_LDT 0x0004U 2974 2973 2975 /** 2974 2976 * The bit mask for getting the RPL of a selector. 2975 2977 */ 2976 #define X86_SEL_RPL 0x0003U 2978 #define X86_SEL_RPL 0x0003U 2979 2980 /** 2981 * The mask covering both RPL and LDT. 2982 * This is incidentally the same as sizeof(X86DESC) - 1, so good for limit 2983 * checks. 2984 */ 2985 #define X86_SEL_RPL_LDT 0x0007U 2977 2986 2978 2987 /** @} */ -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r42420 r42427 116 116 { 117 117 /* Protected mode - get it from the selector descriptor tables. */ 118 if (!(pSReg->Sel & X86_SEL_MASK ))118 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL)) 119 119 { 120 120 Assert(!CPUMIsGuestInLongMode(pVCpu)); … … 1303 1303 VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu) 1304 1304 { 1305 return pVCpu->cpum.s.Guest.ldtr.Sel; 1306 } 1307 1308 1309 VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit) 1310 { 1311 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base; 1312 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit; 1305 1313 return pVCpu->cpum.s.Guest.ldtr.Sel; 1306 1314 } -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r42407 r42427 1514 1514 /* Null selectors are not allowed (we're not called for dispatching 1515 1515 interrupts with SS=0 in long mode). */ 1516 if (!(NewSS & (X86_SEL_MASK | X86_SEL_LDT)))1516 if (!(NewSS & X86_SEL_MASK_OFF_RPL)) 1517 1517 { 1518 1518 Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #GP(0)\n", NewSS)); … … 1863 1863 /* A null CS is bad. */ 1864 1864 RTSEL NewCS = Idte.Gate.u16Sel; 1865 if (!(NewCS & (X86_SEL_MASK | X86_SEL_LDT)))1865 if (!(NewCS & X86_SEL_MASK_OFF_RPL)) 1866 1866 { 1867 1867 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS)); … … 1882 1882 { 1883 1883 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type)); 1884 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));1884 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL); 1885 1885 } 1886 1886 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)) 1887 1887 { 1888 1888 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type)); 1889 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));1889 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL); 1890 1890 } 1891 1891 … … 1899 1899 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n", 1900 1900 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl)); 1901 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));1901 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL); 1902 1902 } 1903 1903 /** @todo is the RPL of the interrupt/trap gate descriptor checked? */ … … 1913 1913 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n", 1914 1914 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl)); 1915 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & (X86_SEL_MASK | X86_SEL_LDT));1915 return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL); 1916 1916 } 1917 1917 … … 5816 5816 { 5817 5817 if ( !pCtx->ldtr.Attr.n.u1Present 5818 || (uSel | 0x7U) > pCtx->ldtr.u32Limit )5818 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit ) 5819 5819 { 5820 5820 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n", … … 5829 5829 else 5830 5830 { 5831 if ((uSel | 0x7U) > pCtx->gdtr.cbGdt)5831 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt) 5832 5832 { 5833 5833 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt)); … … 5848 5848 || pDesc->Legacy.Gen.u1DescType) 5849 5849 pDesc->Long.au64[1] = 0; 5850 else if ((uint32_t)(uSel & X86_SEL_MASK) + 15 <(uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))5851 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->L egacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));5850 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt)) 5851 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1); 5852 5852 else 5853 5853 { -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r42407 r42427 818 818 { 819 819 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL); 820 Assert((uSel & (X86_SEL_MASK | X86_SEL_LDT)));820 Assert((uSel & X86_SEL_MASK_OFF_RPL)); 821 821 822 822 if (IEM_IS_LONG_MODE(pIemCpu)) … … 913 913 * Protected mode. Need to parse the specified descriptor... 914 914 */ 915 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))915 if (!(uSel & X86_SEL_MASK_OFF_RPL)) 916 916 { 917 917 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg)); … … 1015 1015 /* commit */ 1016 1016 pCtx->rip = offSeg; 1017 pCtx->cs.Sel = uSel & (X86_SEL_MASK | X86_SEL_LDT);1017 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL; 1018 1018 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */ 1019 1019 pCtx->cs.ValidSel = pCtx->cs.Sel; … … 1096 1096 * Protected mode. Need to parse the specified descriptor... 1097 1097 */ 1098 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))1098 if (!(uSel & X86_SEL_MASK_OFF_RPL)) 1099 1099 { 1100 1100 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg)); … … 1236 1236 /* commit */ 1237 1237 pCtx->rip = offSeg; 1238 pCtx->cs.Sel = uSel & (X86_SEL_MASK | X86_SEL_LDT);1238 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL; 1239 1239 pCtx->cs.Sel |= pIemCpu->uCpl; 1240 1240 pCtx->cs.ValidSel = pCtx->cs.Sel; … … 1323 1323 * Protected mode is complicated, of course. 1324 1324 */ 1325 if (!(uNewCs & (X86_SEL_MASK | X86_SEL_LDT)))1325 if (!(uNewCs & X86_SEL_MASK_OFF_RPL)) 1326 1326 { 1327 1327 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip)); … … 1417 1417 and read the selector. */ 1418 1418 IEMSELDESC DescSs; 1419 if (!(uNewOuterSs & (X86_SEL_MASK | X86_SEL_LDT)))1419 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL)) 1420 1420 { 1421 1421 if ( !DescCs.Legacy.Gen.u1Long … … 1961 1961 */ 1962 1962 /* Read the CS descriptor. */ 1963 if (!(uNewCs & (X86_SEL_MASK | X86_SEL_LDT)))1963 if (!(uNewCs & X86_SEL_MASK_OFF_RPL)) 1964 1964 { 1965 1965 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip)); … … 2037 2037 2038 2038 /* Read the SS descriptor. */ 2039 if (!(uNewSS & (X86_SEL_MASK | X86_SEL_LDT)))2039 if (!(uNewSS & X86_SEL_MASK_OFF_RPL)) 2040 2040 { 2041 2041 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP)); … … 2281 2281 * FS and GS. If not null, then we have to load and parse the descriptor. 2282 2282 */ 2283 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))2283 if (!(uSel & X86_SEL_MASK_OFF_RPL)) 2284 2284 { 2285 2285 if (iSegReg == X86_SREG_SS) … … 2659 2659 * Now, loading a NULL selector is easy. 2660 2660 */ 2661 if ( (uNewLdt & X86_SEL_MASK) == 0)2661 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL)) 2662 2662 { 2663 2663 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt)); … … 2689 2689 { 2690 2690 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type)); 2691 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK );2691 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL); 2692 2692 } 2693 2693 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT) 2694 2694 { 2695 2695 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type)); 2696 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK );2696 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL); 2697 2697 } 2698 2698 uint64_t u64Base; … … 2704 2704 { 2705 2705 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros)); 2706 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK );2706 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL); 2707 2707 } 2708 2708 … … 2711 2711 { 2712 2712 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base)); 2713 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK );2713 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL); 2714 2714 } 2715 2715 } … … 2727 2727 /** @todo check if the actual value is loaded or if the RPL is dropped */ 2728 2728 if (!IEM_VERIFICATION_ENABLED(pIemCpu)) 2729 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK );2729 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK_OFF_RPL); 2730 2730 else 2731 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK ;2732 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK ;2731 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL; 2732 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL; 2733 2733 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID; 2734 2734 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); … … 2768 2768 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr); 2769 2769 } 2770 if ( (uNewTr & X86_SEL_MASK) == 0)2770 if (!(uNewTr & X86_SEL_MASK_OFF_RPL)) 2771 2771 { 2772 2772 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr)); … … 2786 2786 { 2787 2787 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type)); 2788 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK );2788 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL); 2789 2789 } 2790 2790 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */ … … 2793 2793 { 2794 2794 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type)); 2795 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK );2795 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL); 2796 2796 } 2797 2797 uint64_t u64Base; … … 2803 2803 { 2804 2804 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros)); 2805 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK );2805 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL); 2806 2806 } 2807 2807 … … 2810 2810 { 2811 2811 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base)); 2812 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK );2812 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL); 2813 2813 } 2814 2814 } … … 2848 2848 /** @todo check if the actual value is loaded or if the RPL is dropped */ 2849 2849 if (!IEM_VERIFICATION_ENABLED(pIemCpu)) 2850 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK );2850 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK_OFF_RPL); 2851 2851 else 2852 pCtx->tr.Sel = uNewTr & X86_SEL_MASK ;2853 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK ;2852 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL; 2853 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL; 2854 2854 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID; 2855 2855 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); -
trunk/src/VBox/VMM/VMMAll/SELMAll.cpp
r42420 r42427 340 340 { 341 341 if ( !(fFlags & SELMTOFLAT_FLAGS_HYPER) 342 && ( unsigned)(Sel & X86_SEL_MASK) >=pVM->selm.s.GuestGdtr.cbGdt)342 && (Sel | X86_SEL_RPL_LDT) > pVM->selm.s.GuestGdtr.cbGdt) 343 343 return VERR_INVALID_SELECTOR; 344 344 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT]; … … 346 346 else 347 347 { 348 if (( unsigned)(Sel & X86_SEL_MASK) >=pVM->selm.s.cbLdtLimit)348 if ((Sel | X86_SEL_RPL_LDT) > pVM->selm.s.cbLdtLimit) 349 349 return VERR_INVALID_SELECTOR; 350 350 351 351 /** @todo handle LDT page(s) not present! */ 352 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);352 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper); 353 353 Desc = paLDT[Sel >> X86_SEL_SHIFT]; 354 354 } … … 1032 1032 CPUMSELREGHID trHid; 1033 1033 RTSEL tr = CPUMGetGuestTR(pVCpu, &trHid); 1034 if (!(tr & X86_SEL_MASK ))1034 if (!(tr & X86_SEL_MASK_OFF_RPL)) 1035 1035 return VERR_SELM_NO_TSS; 1036 1036 -
trunk/src/VBox/VMM/VMMR3/SELM.cpp
r42418 r42427 1068 1068 */ 1069 1069 RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu); 1070 if ( (SelLdt & X86_SEL_MASK) == 0)1070 if (!(SelLdt & X86_SEL_MASK_OFF_RPL)) 1071 1071 { 1072 1072 /* ldtr = 0 - update hyper LDTR and deregister any active handler. */ … … 1085 1085 * Get the LDT selector. 1086 1086 */ 1087 /** @todo this is wrong, use CPUMGetGuestLdtrEx */ 1087 1088 PX86DESC pDesc = &pVM->selm.s.paGdtR3[SelLdt >> X86_SEL_SHIFT]; 1088 1089 RTGCPTR GCPtrLdt = X86DESC_BASE(pDesc); … … 1293 1294 for (uint32_t iSReg = 0; iSReg < X86_SREG_COUNT; iSReg++) 1294 1295 { 1295 RTSEL const Sel = paSReg[iSReg].Sel & (X86_SEL_MASK | X86_SEL_LDT);1296 if (Sel & (X86_SEL_MASK | X86_SEL_LDT))1296 RTSEL const Sel = paSReg[iSReg].Sel; 1297 if (Sel & X86_SEL_MASK_OFF_RPL) 1297 1298 { 1298 1299 /* Get the shadow descriptor entry corresponding to this. */ … … 1553 1554 * make sure cbTss is 0. 1554 1555 */ 1556 /** @todo use the hidden bits, not shadow GDT. */ 1555 1557 CPUMSELREGHID trHid; 1556 1558 RTSEL SelTss = CPUMGetGuestTR(pVCpu, &trHid); 1557 1559 RTGCPTR GCPtrTss = trHid.u64Base; 1558 1560 uint32_t cbTss = trHid.u32Limit; 1559 Assert( (SelTss & X86_SEL_MASK)1560 || (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */)1561 || (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY /* RESET */));1562 if (SelTss & X86_SEL_MASK )1561 Assert( (SelTss & X86_SEL_MASK_OFF_RPL) 1562 || (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */) 1563 || (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY /* RESET */)); 1564 if (SelTss & X86_SEL_MASK_OFF_RPL) 1563 1565 { 1564 1566 Assert(!(SelTss & X86_SEL_LDT)); … … 1793 1795 */ 1794 1796 RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu); 1795 if ((SelLdt & X86_SEL_MASK ) == 0)1797 if ((SelLdt & X86_SEL_MASK_OFF_RPL) == 0) 1796 1798 return VINF_SUCCESS; 1799 Assert(!(SelLdt & X86_SEL_LDT)); 1797 1800 if (SelLdt > GDTR.cbGdt) 1798 1801 { … … 1886 1889 RTGCPTR GCPtrTss = trHid.u64Base; 1887 1890 uint32_t cbTss = trHid.u32Limit; 1888 Assert( (SelTss & X86_SEL_MASK)1889 || (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */)1890 || (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY /* RESET */));1891 if (SelTss & X86_SEL_MASK )1891 Assert( (SelTss & X86_SEL_MASK_OFF_RPL) 1892 || (cbTss == 0 && GCPtrTss == 0 && trHid.Attr.u == 0 /* TR=0 */) 1893 || (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY /* RESET */)); 1894 if (SelTss & X86_SEL_MASK_OFF_RPL) 1892 1895 { 1893 1896 AssertReturn(!(SelTss & X86_SEL_LDT), false); … … 2007 2010 2008 2011 /** 2009 * Returns flat address and limit of LDT by LDT selector from guest GDTR.2010 *2011 * Fully validate selector.2012 *2013 * @returns VBox status.2014 * @param pVM Pointer to the VM.2015 * @param SelLdt LDT selector.2016 * @param ppvLdt Where to store the flat address of LDT.2017 * @param pcbLimit Where to store LDT limit.2018 */2019 VMMDECL(int) SELMGetLDTFromSel(PVM pVM, RTSEL SelLdt, PRTGCPTR ppvLdt, unsigned *pcbLimit)2020 {2021 PVMCPU pVCpu = VMMGetCpu(pVM);2022 2023 /* Get guest GDTR. */2024 VBOXGDTR GDTR;2025 CPUMGetGuestGDTR(pVCpu, &GDTR);2026 2027 /* Check selector TI and GDT limit. */2028 if ( (SelLdt & X86_SEL_LDT)2029 || SelLdt > GDTR.cbGdt)2030 return VERR_INVALID_SELECTOR;2031 2032 /* Read descriptor from GC. */2033 X86DESC Desc;2034 int rc = PGMPhysSimpleReadGCPtr(pVCpu, (void *)&Desc, (RTGCPTR)(GDTR.pGdt + (SelLdt & X86_SEL_MASK)), sizeof(Desc));2035 if (RT_FAILURE(rc))2036 {2037 /* fatal */2038 Log(("Can't read LDT descriptor for selector=%04X\n", SelLdt));2039 return VERR_SELECTOR_NOT_PRESENT;2040 }2041 2042 /* Check if LDT descriptor is not present. */2043 if (Desc.Gen.u1Present == 0)2044 return VERR_SELECTOR_NOT_PRESENT;2045 2046 /* Check LDT descriptor type. */2047 if ( Desc.Gen.u1DescType == 12048 || Desc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)2049 return VERR_INVALID_SELECTOR;2050 2051 /* LDT descriptor is ok. */2052 if (ppvLdt)2053 {2054 *ppvLdt = (RTGCPTR)X86DESC_BASE(&Desc);2055 *pcbLimit = X86DESC_LIMIT_G(&Desc);2056 }2057 return VINF_SUCCESS;2058 }2059 2060 2061 /**2062 2012 * Gets information about a 64-bit selector, SELMR3GetSelectorInfo helper. 2063 2013 * … … 2075 2025 * Read it from the guest descriptor table. 2076 2026 */ 2027 /** @todo this is bogus wrt the LDT/GDT limit on long selectors. */ 2077 2028 X86DESC64 Desc; 2078 VBOXGDTR Gdtr;2079 2029 RTGCPTR GCPtrDesc; 2080 CPUMGetGuestGDTR(pVCpu, &Gdtr);2081 2030 if (!(Sel & X86_SEL_LDT)) 2082 2031 { 2083 2032 /* GDT */ 2084 if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > (unsigned)Gdtr.cbGdt) 2033 VBOXGDTR Gdtr; 2034 CPUMGetGuestGDTR(pVCpu, &Gdtr); 2035 if ((Sel | X86_SEL_RPL_LDT) > Gdtr.cbGdt) 2085 2036 return VERR_INVALID_SELECTOR; 2086 2037 GCPtrDesc = Gdtr.pGdt + (Sel & X86_SEL_MASK); … … 2088 2039 else 2089 2040 { 2090 /* 2091 * LDT - must locate the LDT first. 2092 */ 2093 RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu); 2094 if ( (unsigned)(SelLdt & X86_SEL_MASK) < sizeof(X86DESC) /* the first selector is invalid, right? */ /** @todo r=bird: No, I don't think so */ 2095 || (unsigned)(SelLdt & X86_SEL_MASK) + sizeof(X86DESC) - 1 > (unsigned)Gdtr.cbGdt) 2041 /* LDT */ 2042 uint64_t GCPtrBase; 2043 uint32_t cbLimit; 2044 CPUMGetGuestLdtrEx(pVCpu, &GCPtrBase, &cbLimit); 2045 if ((Sel | X86_SEL_RPL_LDT) > cbLimit) 2096 2046 return VERR_INVALID_SELECTOR; 2097 GCPtrDesc = Gdtr.pGdt + (SelLdt & X86_SEL_MASK);2098 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(Desc));2099 if (RT_FAILURE(rc))2100 return rc;2101 2102 /* validate the LDT descriptor. */2103 if (Desc.Gen.u1Present == 0)2104 return VERR_SELECTOR_NOT_PRESENT;2105 if ( Desc.Gen.u1DescType == 12106 || Desc.Gen.u4Type != AMD64_SEL_TYPE_SYS_LDT)2107 return VERR_INVALID_SELECTOR;2108 2109 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc);2110 if ((uint32_t)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > cbLimit)2111 return VERR_INVALID_SELECTOR;2112 2047 2113 2048 /* calc the descriptor location. */ 2114 GCPtrDesc = X86DESC64_BASE(&Desc); 2115 GCPtrDesc += (Sel & X86_SEL_MASK); 2049 GCPtrDesc = GCPtrBase + (Sel & X86_SEL_MASK); 2116 2050 } 2117 2051 … … 2255 2189 X86DESC Desc; 2256 2190 if ( !(Sel & X86_SEL_LDT) 2257 && ( pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == (Sel & X86_SEL_MASK)2258 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == (Sel & X86_SEL_MASK)2259 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == (Sel & X86_SEL_MASK)2260 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == (Sel & X86_SEL_MASK)2261 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == (Sel & X86_SEL_ MASK))2191 && ( pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == (Sel & X86_SEL_RPL_LDT) 2192 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == (Sel & X86_SEL_RPL_LDT) 2193 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == (Sel & X86_SEL_RPL_LDT) 2194 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == (Sel & X86_SEL_RPL_LDT) 2195 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == (Sel & X86_SEL_RPL_LDT)) 2262 2196 ) 2263 2197 { … … 2280 2214 pSelInfo->fFlags = DBGFSELINFO_FLAGS_PROT_MODE; 2281 2215 2282 VBOXGDTR Gdtr;2283 2216 RTGCPTR GCPtrDesc; 2284 CPUMGetGuestGDTR(pVCpu, &Gdtr);2285 2217 if (!(Sel & X86_SEL_LDT)) 2286 2218 { 2287 2219 /* GDT */ 2288 if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > (unsigned)Gdtr.cbGdt) 2220 VBOXGDTR Gdtr; 2221 CPUMGetGuestGDTR(pVCpu, &Gdtr); 2222 if ((Sel | X86_SEL_RPL_LDT) > Gdtr.cbGdt) 2289 2223 return VERR_INVALID_SELECTOR; 2290 2224 GCPtrDesc = Gdtr.pGdt + (Sel & X86_SEL_MASK); … … 2292 2226 else 2293 2227 { 2294 /* 2295 * LDT - must locate the LDT first... 2296 */ 2297 RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu); 2298 if ( (unsigned)(SelLdt & X86_SEL_MASK) < sizeof(X86DESC) /* the first selector is invalid, right? */ /** @todo r=bird: No, I don't think so */ 2299 || (unsigned)(SelLdt & X86_SEL_MASK) + sizeof(X86DESC) - 1 > (unsigned)Gdtr.cbGdt) 2228 /* LDT */ 2229 uint64_t GCPtrBase; 2230 uint32_t cbLimit; 2231 CPUMGetGuestLdtrEx(pVCpu, &GCPtrBase, &cbLimit); 2232 if ((Sel | X86_SEL_RPL_LDT) > cbLimit) 2300 2233 return VERR_INVALID_SELECTOR; 2301 GCPtrDesc = Gdtr.pGdt + (SelLdt & X86_SEL_MASK);2302 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(Desc));2303 if (RT_FAILURE(rc))2304 return rc;2305 2306 /* validate the LDT descriptor. */2307 if (Desc.Gen.u1Present == 0)2308 return VERR_SELECTOR_NOT_PRESENT;2309 if ( Desc.Gen.u1DescType == 12310 || Desc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)2311 return VERR_INVALID_SELECTOR;2312 2313 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc);2314 if ((uint32_t)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > cbLimit)2315 return VERR_INVALID_SELECTOR;2316 2234 2317 2235 /* calc the descriptor location. */ 2318 GCPtrDesc = X86DESC_BASE(&Desc); 2319 GCPtrDesc += (Sel & X86_SEL_MASK); 2236 GCPtrDesc = GCPtrBase + (Sel & X86_SEL_MASK); 2320 2237 } 2321 2238 … … 2416 2333 */ 2417 2334 Desc = pVM->selm.s.paGdtR3[Sel >> X86_SEL_SHIFT]; 2418 pSelInfo->fFlags = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == (Sel & X86_SEL_MASK)2419 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == (Sel & X86_SEL_MASK)2420 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == (Sel & X86_SEL_MASK)2421 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == (Sel & X86_SEL_MASK)2422 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == (Sel & X86_SEL_MASK )2335 pSelInfo->fFlags = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == (Sel & X86_SEL_MASK_OFF_RPL) 2336 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == (Sel & X86_SEL_MASK_OFF_RPL) 2337 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == (Sel & X86_SEL_MASK_OFF_RPL) 2338 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == (Sel & X86_SEL_MASK_OFF_RPL) 2339 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == (Sel & X86_SEL_MASK_OFF_RPL) 2423 2340 ? DBGFSELINFO_FLAGS_HYPER 2424 2341 : 0; … … 2666 2583 { 2667 2584 /** @todo SMP support! */ 2668 PVMCPU pVCpu = &pVM->aCpus[0]; 2669 2670 RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu); 2671 if (!(SelLdt & X86_SEL_MASK)) 2585 PVMCPU pVCpu = &pVM->aCpus[0]; 2586 2587 uint64_t GCPtrLdt; 2588 uint32_t cbLdt; 2589 RTSEL SelLdt = CPUMGetGuestLdtrEx(pVCpu, &GCPtrLdt, &cbLdt); 2590 if (!(SelLdt & X86_SEL_MASK_OFF_RPL)) 2672 2591 { 2673 2592 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x): Null-Selector\n", SelLdt); … … 2675 2594 } 2676 2595 2677 RTGCPTR GCPtrLdt; 2678 unsigned cbLdt; 2679 int rc = SELMGetLDTFromSel(pVM, SelLdt, &GCPtrLdt, &cbLdt); 2680 if (RT_FAILURE(rc)) 2681 { 2682 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x): rc=%Rrc\n", SelLdt, rc); 2683 return; 2684 } 2685 2686 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x GCAddr=%RGv limit=%x):\n", SelLdt, GCPtrLdt, cbLdt); 2687 unsigned cLdts = (cbLdt + 1) >> X86_SEL_SHIFT; 2596 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x GCAddr=%RX64 limit=%x):\n", SelLdt, GCPtrLdt, cbLdt); 2597 unsigned cLdts = (cbLdt + 1) >> X86_SEL_SHIFT; 2688 2598 for (unsigned iLdt = 0; iLdt < cLdts; iLdt++, GCPtrLdt += sizeof(X86DESC)) 2689 2599 { 2690 2600 X86DESC LdtE; 2691 rc = PGMPhysSimpleReadGCPtr(pVCpu, &LdtE, GCPtrLdt, sizeof(LdtE));2601 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &LdtE, GCPtrLdt, sizeof(LdtE)); 2692 2602 if (RT_SUCCESS(rc)) 2693 2603 { -
trunk/src/VBox/VMM/VMMRC/SELMRC.cpp
r42407 r42427 95 95 */ 96 96 RTSEL Sel = iGDTEntry << X86_SEL_SHIFT; 97 Assert( !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] & ~X86_SEL_MASK)98 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] & ~X86_SEL_MASK)99 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] & ~X86_SEL_MASK)100 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] & ~X86_SEL_MASK)101 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] & ~X86_SEL_MASK ));97 Assert( !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] & ~X86_SEL_MASK_OFF_RPL) 98 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] & ~X86_SEL_MASK_OFF_RPL) 99 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] & ~X86_SEL_MASK_OFF_RPL) 100 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] & ~X86_SEL_MASK_OFF_RPL) 101 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] & ~X86_SEL_MASK_OFF_RPL)); 102 102 if ( pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == Sel 103 103 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == Sel … … 137 137 for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++) 138 138 { 139 if (Sel == (paSReg[iSReg].Sel & X86_SEL_MASK_ RPL))139 if (Sel == (paSReg[iSReg].Sel & X86_SEL_MASK_OFF_RPL)) 140 140 { 141 141 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg])) … … 200 200 for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++) 201 201 { 202 if (iGDTEntry == (paSReg[iSReg].Sel & X86_SEL_MASK_ RPL))202 if (iGDTEntry == (paSReg[iSReg].Sel & X86_SEL_MASK_OFF_RPL)) 203 203 { 204 204 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
Note:
See TracChangeset
for help on using the changeset viewer.

