Changeset 42407 in vbox
- Timestamp:
- Jul 26, 2012 11:41:35 AM (12 years ago)
- Location:
- trunk
- Files:
-
- 22 edited
-
include/VBox/vmm/cpum.h (modified) (2 diffs)
-
include/VBox/vmm/cpumctx.h (modified) (1 diff)
-
include/VBox/vmm/selm.h (modified) (4 diffs)
-
include/VBox/vmm/vm.h (modified) (8 diffs)
-
include/iprt/x86.h (modified) (3 diffs)
-
src/VBox/Debugger/DBGCEmulateCodeView.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMAll/CPUMAllRegs.cpp (modified) (11 diffs)
-
src/VBox/VMM/VMMAll/IEMAll.cpp (modified) (7 diffs)
-
src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h (modified) (26 diffs)
-
src/VBox/VMM/VMMAll/SELMAll.cpp (modified) (12 diffs)
-
src/VBox/VMM/VMMR0/HWACCMR0.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMR0/HWVMXR0.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMR3/EM.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMR3/EMRaw.cpp (modified) (4 diffs)
-
src/VBox/VMM/VMMR3/SELM.cpp (modified) (23 diffs)
-
src/VBox/VMM/VMMRC/SELMRC.cpp (modified) (7 diffs)
-
src/VBox/VMM/include/EMHandleRCTmpl.h (modified) (1 diff)
-
src/VBox/VMM/include/SELMInternal.h (modified) (3 diffs)
-
src/VBox/VMM/testcase/tstVMStruct.h (modified) (1 diff)
-
src/recompiler/VBoxRecompiler.c (modified) (10 diffs)
-
src/recompiler/target-i386/cpu.h (modified) (3 diffs)
-
src/recompiler/target-i386/op_helper.c (modified) (12 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r42186 r42407 191 191 VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu); 192 192 VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu); 193 VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu); 193 194 194 195 #ifndef VBOX_WITHOUT_UNNAMED_UNIONS … … 252 253 if (!(pCtx->msrEFER & MSR_K6_EFER_LMA)) 253 254 return false; 254 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID( &pCtx->cs))255 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(NULL, &pCtx->cs)) 255 256 return CPUMIsGuestIn64BitCodeSlow(pCtx); 256 257 return pCtx->cs.Attr.n.u1Long; -
trunk/include/VBox/vmm/cpumctx.h
r42337 r42407 81 81 82 82 /** Checks if the hidden parts of the selector register are valid. */ 83 #define CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pSelReg) ( ((a_pSelReg)->fFlags & CPUMSELREG_FLAGS_VALID) \ 84 && (a_pSelReg)->ValidSel == (a_pSelReg)->Sel ) 83 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 84 # define CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSelReg) \ 85 ( ((a_pSelReg)->fFlags & CPUMSELREG_FLAGS_VALID) \ 86 && ( (a_pSelReg)->ValidSel == (a_pSelReg)->Sel \ 87 || ( (a_pVCpu) != NULL \ 88 && (a_pSelReg)->ValidSel == ((a_pSelReg)->Sel & X86_SEL_MASK_RPL) \ 89 && ((a_pSelReg)->Sel & X86_SEL_RPL) == 1 \ 90 && ((a_pSelReg)->ValidSel & X86_SEL_RPL) == 0 \ 91 && CPUMIsGuestInRawMode(a_pVCpu) \ 92 ) \ 93 ) \ 94 ) 95 #else 96 # define CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSelReg) \ 97 ( ((a_pSelReg)->fFlags & CPUMSELREG_FLAGS_VALID) \ 98 && (a_pSelReg)->ValidSel == (a_pSelReg)->Sel ) 99 #endif 85 100 86 101 /** Old type used for the hidden register part. -
trunk/include/VBox/vmm/selm.h
r42186 r42407 4 4 5 5 /* 6 * Copyright (C) 2006-201 0Oracle Corporation6 * Copyright (C) 2006-2012 Oracle Corporation 7 7 * 8 8 * This file is part of VirtualBox Open Source Edition (OSE), as … … 39 39 */ 40 40 41 VMMDECL(RTSEL) SELMGetTrap8Selector(PVM pVM);42 VMMDECL(void) SELMSetTrap8EIP(PVM pVM, uint32_t u32EIP);43 VMMDECL(int) SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp);44 VMMDECL(RTGCPTR) SELMGetGuestTSS(PVM pVM);45 VMMDECL(RTSEL) SELMGetHyperCS(PVM pVM);46 VMMDECL(RTSEL) SELMGetHyperCS64(PVM pVM);47 VMMDECL(RTSEL) SELMGetHyperDS(PVM pVM);48 VMMDECL(RTSEL) SELMGetHyperTSS(PVM pVM);49 VMMDECL(RTSEL) SELMGetHyperTSSTrap08(PVM pVM);50 VMMDECL(RTRCPTR) SELMGetHyperGDT(PVM pVM);51 VMMDECL(int) SELMGetTSSInfo(PVM pVM, PVMCPU pVCpu, PRTGCUINTPTR pGCPtrTss, PRTGCUINTPTR pcbTss, bool *pfCanHaveIOBitmap);52 VMMDECL(RTGCPTR) SELMToFlat(PVM pVM, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr);53 VMMDECL(RTGCPTR) SELMToFlatBySel(PVM pVM, RTSEL Sel, RTGCPTR Addr);54 VMMDECL(void) SELMShadowCR3Changed(PVM pVM, PVMCPU pVCpu);41 VMMDECL(RTSEL) SELMGetTrap8Selector(PVM pVM); 42 VMMDECL(void) SELMSetTrap8EIP(PVM pVM, uint32_t u32EIP); 43 VMMDECL(int) SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp); 44 VMMDECL(RTGCPTR) SELMGetGuestTSS(PVM pVM); 45 VMMDECL(RTSEL) SELMGetHyperCS(PVM pVM); 46 VMMDECL(RTSEL) SELMGetHyperCS64(PVM pVM); 47 VMMDECL(RTSEL) SELMGetHyperDS(PVM pVM); 48 VMMDECL(RTSEL) SELMGetHyperTSS(PVM pVM); 49 VMMDECL(RTSEL) SELMGetHyperTSSTrap08(PVM pVM); 50 VMMDECL(RTRCPTR) SELMGetHyperGDT(PVM pVM); 51 VMMDECL(int) SELMGetTSSInfo(PVM pVM, PVMCPU pVCpu, PRTGCUINTPTR pGCPtrTss, PRTGCUINTPTR pcbTss, bool *pfCanHaveIOBitmap); 52 VMMDECL(RTGCPTR) SELMToFlat(PVM pVM, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr); 53 VMMDECL(RTGCPTR) SELMToFlatBySel(PVM pVM, RTSEL Sel, RTGCPTR Addr); 54 VMMDECL(void) SELMShadowCR3Changed(PVM pVM, PVMCPU pVCpu); 55 55 56 56 /** Flags for SELMToFlatEx(). … … 74 74 /** @} */ 75 75 76 VMMDECL(int) SELMToFlatEx(PVMCPU pVCpu, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr, uint32_t fFlags,77 PRTGCPTR ppvGC);78 VMMDECL(int) SELMToFlatBySelEx(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL Sel, RTGCPTR Addr, uint32_t fFlags,79 PRTGCPTR ppvGC, uint32_t *pcb);80 VMMDECL(int) SELMValidateAndConvertCSAddr(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL SelCPL, RTSEL SelCS, PCPUMSELREG pSRegCS,81 RTGCPTR Addr, PRTGCPTR ppvFlat);82 VMMDECL(int) SELMGetLDTFromSel(PVM pVM, RTSEL SelLdt, PRTGCPTR ppvLdt, unsigned *pcbLimit);76 VMMDECL(int) SELMToFlatEx(PVMCPU pVCpu, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr, uint32_t fFlags, 77 PRTGCPTR ppvGC); 78 VMMDECL(int) SELMToFlatBySelEx(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL Sel, RTGCPTR Addr, uint32_t fFlags, 79 PRTGCPTR ppvGC, uint32_t *pcb); 80 VMMDECL(int) SELMValidateAndConvertCSAddr(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL SelCPL, RTSEL SelCS, 81 PCPUMSELREG pSRegCS, RTGCPTR Addr, PRTGCPTR ppvFlat); 82 VMMDECL(int) SELMGetLDTFromSel(PVM pVM, RTSEL SelLdt, PRTGCPTR ppvLdt, unsigned *pcbLimit); 83 83 #ifdef VBOX_WITH_RAW_MODE 84 VMM_INT_DECL(void) SELMLoadHiddenSelectorReg(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg);84 VMM_INT_DECL(void) SELMLoadHiddenSelectorReg(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg); 85 85 #endif 86 86 … … 91 91 * @{ 92 92 */ 93 VMMR3DECL(int) SELMR3Init(PVM pVM);94 VMMR3DECL(int) SELMR3InitFinalize(PVM pVM);95 VMMR3DECL(void) SELMR3Relocate(PVM pVM);96 VMMR3DECL(int) SELMR3Term(PVM pVM);97 VMMR3DECL(void) SELMR3Reset(PVM pVM);98 VMMR3DECL( int)SELMR3UpdateFromCPUM(PVM pVM, PVMCPU pVCpu);99 VMMR3DECL(int) SELMR3SyncTSS(PVM pVM, PVMCPU pVCpu);100 VMMR3DECL(int) SELMR3GetSelectorInfo(PVM pVM, PVMCPU pVCpu, RTSEL Sel, PDBGFSELINFO pSelInfo);101 VMMR3DECL(int) SELMR3GetShadowSelectorInfo(PVM pVM, RTSEL Sel, PDBGFSELINFO pSelInfo);102 VMMR3DECL(void) SELMR3DisableMonitoring(PVM pVM);103 VMMR3DECL(void) SELMR3DumpDescriptor(X86DESC Desc, RTSEL Sel, const char *pszMsg);104 VMMR3DECL(void) SELMR3DumpHyperGDT(PVM pVM);105 VMMR3DECL(void) SELMR3DumpHyperLDT(PVM pVM);106 VMMR3DECL(void) SELMR3DumpGuestGDT(PVM pVM);107 VMMR3DECL(void) SELMR3DumpGuestLDT(PVM pVM);108 VMMR3DECL(bool) SELMR3CheckTSS(PVM pVM);109 VMMR3DECL(int) SELMR3DebugCheck(PVM pVM);93 VMMR3DECL(int) SELMR3Init(PVM pVM); 94 VMMR3DECL(int) SELMR3InitFinalize(PVM pVM); 95 VMMR3DECL(void) SELMR3Relocate(PVM pVM); 96 VMMR3DECL(int) SELMR3Term(PVM pVM); 97 VMMR3DECL(void) SELMR3Reset(PVM pVM); 98 VMMR3DECL(VBOXSTRICTRC) SELMR3UpdateFromCPUM(PVM pVM, PVMCPU pVCpu); 99 VMMR3DECL(int) SELMR3SyncTSS(PVM pVM, PVMCPU pVCpu); 100 VMMR3DECL(int) SELMR3GetSelectorInfo(PVM pVM, PVMCPU pVCpu, RTSEL Sel, PDBGFSELINFO pSelInfo); 101 VMMR3DECL(int) SELMR3GetShadowSelectorInfo(PVM pVM, RTSEL Sel, PDBGFSELINFO pSelInfo); 102 VMMR3DECL(void) SELMR3DisableMonitoring(PVM pVM); 103 VMMR3DECL(void) SELMR3DumpDescriptor(X86DESC Desc, RTSEL Sel, const char *pszMsg); 104 VMMR3DECL(void) SELMR3DumpHyperGDT(PVM pVM); 105 VMMR3DECL(void) SELMR3DumpHyperLDT(PVM pVM); 106 VMMR3DECL(void) SELMR3DumpGuestGDT(PVM pVM); 107 VMMR3DECL(void) SELMR3DumpGuestLDT(PVM pVM); 108 VMMR3DECL(bool) SELMR3CheckTSS(PVM pVM); 109 VMMR3DECL(int) SELMR3DebugCheck(PVM pVM); 110 110 /** @def SELMR3_DEBUG_CHECK 111 111 * Invokes SELMR3DebugCheck in stricts builds. */ -
trunk/include/VBox/vmm/vm.h
r41976 r42407 496 496 * @param fFlag The flag to check. 497 497 */ 498 #define VM_FF_ISSET(pVM, fFlag) (((pVM)->fGlobalForcedActions & (fFlag)) == (fFlag)) 498 #define VM_FF_IS_SET(pVM, fFlag) (((pVM)->fGlobalForcedActions & (fFlag)) == (fFlag)) 499 /** @deprecated */ 500 #define VM_FF_ISSET(pVM, fFlag) VM_FF_IS_SET(pVM, fFlag) 499 501 500 502 /** @def VMCPU_FF_ISSET … … 504 506 * @param fFlag The flag to check. 505 507 */ 506 #define VMCPU_FF_ISSET(pVCpu, fFlag) (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag)) 508 #define VMCPU_FF_IS_SET(pVCpu, fFlag) (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag)) 509 /** @deprecated */ 510 #define VMCPU_FF_ISSET(pVCpu, fFlag) VMCPU_FF_IS_SET(pVCpu, fFlag) 507 511 508 512 /** @def VM_FF_ISPENDING … … 512 516 * @param fFlags The flags to check for. 513 517 */ 514 #define VM_FF_ISPENDING(pVM, fFlags) ((pVM)->fGlobalForcedActions & (fFlags)) 518 #define VM_FF_IS_PENDING(pVM, fFlags) ((pVM)->fGlobalForcedActions & (fFlags)) 519 /** @deprecated */ 520 #define VM_FF_ISPENDING(pVM, fFlags) VM_FF_IS_PENDING(pVM, fFlags) 515 521 516 522 /** @def VM_FF_TESTANDCLEAR … … 522 528 * @param iBit Bit position to check and clear 523 529 */ 524 #define VM_FF_TESTANDCLEAR(pVM, iBit) (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, iBit##_BIT)) 530 #define VM_FF_TEST_AND_CLEAR(pVM, iBit) (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, iBit##_BIT)) 531 /** @deprecated */ 532 #define VM_FF_TESTANDCLEAR(pVM, iBit) (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, iBit##_BIT)) 525 533 526 534 /** @def VMCPU_FF_TESTANDCLEAR … … 532 540 * @param iBit Bit position to check and clear 533 541 */ 534 #define VMCPU_FF_TESTANDCLEAR(pVCpu, iBit) (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, iBit##_BIT)) 542 #define VMCPU_FF_TEST_AND_CLEAR(pVCpu, iBit) (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, iBit##_BIT)) 543 /** @deprecated */ 544 #define VMCPU_FF_TESTANDCLEAR(pVCpu, iBit) (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, iBit##_BIT)) 535 545 536 546 /** @def VMCPU_FF_ISPENDING … … 540 550 * @param fFlags The flags to check for. 541 551 */ 542 #define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags)) 552 #define VMCPU_FF_IS_PENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags)) 553 /** @deprecated */ 554 #define VMCPU_FF_ISPENDING(pVCpu, fFlags) VMCPU_FF_IS_PENDING(pVCpu, fFlags) 543 555 544 556 /** @def VM_FF_ISPENDING … … 942 954 struct SELM s; 943 955 #endif 944 uint8_t padding[ 576]; /* multiple of 64 */956 uint8_t padding[768]; /* multiple of 64 */ 945 957 } selm; 946 958 … … 1066 1078 1067 1079 /** Padding for aligning the cpu array on a page boundary. */ 1068 uint8_t abAlignment2[ 734];1080 uint8_t abAlignment2[542]; 1069 1081 1070 1082 /* ---- end small stuff ---- */ -
trunk/include/iprt/x86.h
r42337 r42407 2385 2385 * Return the base address of a descriptor. 2386 2386 */ 2387 #define X86DESC_BASE( desc) /*ASM-NOINC*/ \2388 ( ((uint32_t)(( desc).Gen.u8BaseHigh2) << 24) \2389 | ( ( desc).Gen.u8BaseHigh1 << 16) \2390 | ( ( desc).Gen.u16BaseLow ) )2387 #define X86DESC_BASE(a_pDesc) /*ASM-NOINC*/ \ 2388 ( ((uint32_t)((a_pDesc)->Gen.u8BaseHigh2) << 24) \ 2389 | ( (a_pDesc)->Gen.u8BaseHigh1 << 16) \ 2390 | ( (a_pDesc)->Gen.u16BaseLow ) ) 2391 2391 2392 2392 /** @def X86DESC_LIMIT 2393 2393 * Return the limit of a descriptor. 2394 2394 */ 2395 #define X86DESC_LIMIT(desc) /*ASM-NOINC*/ \ 2396 ( ((uint32_t)((desc).Gen.u4LimitHigh) << 16) \ 2397 | ( (desc).Gen.u16LimitLow ) ) 2395 #define X86DESC_LIMIT(a_pDesc) /*ASM-NOINC*/ \ 2396 ( ((uint32_t)((a_pDesc)->Gen.u4LimitHigh) << 16) \ 2397 | ( (a_pDesc)->Gen.u16LimitLow ) ) 2398 2399 /** @def X86DESC_LIMIT_G 2400 * Return the limit of a descriptor with the granularity bit taken into account. 2401 * @returns Selector limit (uint32_t). 2402 * @param a_pDesc Pointer to the descriptor. 2403 */ 2404 #define X86DESC_LIMIT_G(a_pDesc) /*ASM-NOINC*/ \ 2405 ( (a_pDesc)->Gen.u1Granularity \ 2406 ? ( ( ((uint32_t)(a_pDesc)->Gen.u4LimitHigh << 16) | (a_pDesc)->Gen.u16LimitLow ) << 12 ) | UINT32_C(0xfff) \ 2407 : ((uint32_t)(a_pDesc)->Gen.u4LimitHigh << 16) | (a_pDesc)->Gen.u16LimitLow \ 2408 ) 2398 2409 2399 2410 /** @def X86DESC_GET_HID_ATTR 2400 2411 * Get the descriptor attributes for the hidden register. 2401 2412 */ 2402 #define X86DESC_GET_HID_ATTR( desc) /*ASM-NOINC*/ \2403 ( ( desc.u >> (16+16+8)) & UINT32_C(0xf0ff) ) /** @todo do we have a define for 0xf0ff? */2413 #define X86DESC_GET_HID_ATTR(a_pDesc) /*ASM-NOINC*/ \ 2414 ( ((a_pDesc)->u >> (16+16+8)) & UINT32_C(0xf0ff) ) /** @todo do we have a define for 0xf0ff? */ 2404 2415 2405 2416 #ifndef VBOX_FOR_DTRACE_LIB … … 2578 2589 * Return the base of a 64-bit descriptor. 2579 2590 */ 2580 #define X86DESC64_BASE( desc) /*ASM-NOINC*/ \2581 ( ((uint64_t)(( desc).Gen.u32BaseHigh3) << 32) \2582 | ((uint32_t)(( desc).Gen.u8BaseHigh2) << 24) \2583 | ( ( desc).Gen.u8BaseHigh1 << 16) \2584 | ( ( desc).Gen.u16BaseLow ) )2591 #define X86DESC64_BASE(a_pDesc) /*ASM-NOINC*/ \ 2592 ( ((uint64_t)((a_pDesc)->Gen.u32BaseHigh3) << 32) \ 2593 | ((uint32_t)((a_pDesc)->Gen.u8BaseHigh2) << 24) \ 2594 | ( (a_pDesc)->Gen.u8BaseHigh1 << 16) \ 2595 | ( (a_pDesc)->Gen.u16BaseLow ) ) 2585 2596 2586 2597 … … 2947 2958 2948 2959 /** 2949 * The mask used to mask off the table indicator and CPL of an selector.2960 * The mask used to mask off the table indicator and RPL of an selector. 2950 2961 */ 2951 2962 #define X86_SEL_MASK 0xfff8U 2963 2964 /** 2965 * The mask used to mask off the RPL of an selector. 2966 */ 2967 #define X86_SEL_MASK_RPL 0xfffcU 2952 2968 2953 2969 /** -
trunk/src/VBox/Debugger/DBGCEmulateCodeView.cpp
r42165 r42407 1758 1758 const char *pszGranularity = pDesc->Gen.u1Granularity ? "G" : " "; 1759 1759 const char *pszBig = pDesc->Gen.u1DefBig ? "BIG" : " "; 1760 uint32_t u32Base = X86DESC_BASE(*pDesc); 1761 uint32_t cbLimit = X86DESC_LIMIT(*pDesc); 1762 if (pDesc->Gen.u1Granularity) 1763 cbLimit <<= PAGE_SHIFT; 1760 uint32_t u32Base = X86DESC_BASE(pDesc); 1761 uint32_t cbLimit = X86DESC_LIMIT_G(pDesc); 1764 1762 1765 1763 rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, "%04x %s Bas=%08x Lim=%08x DPL=%d %s %s %s %s AVL=%d L=%d%s\n", … … 1815 1813 const char *pszLong = pDesc->Gen.u1Long ? "LONG" : " "; 1816 1814 1817 uint64_t u32Base = X86DESC64_BASE( *pDesc);1818 uint32_t cbLimit = X86DESC_LIMIT (*pDesc);1815 uint64_t u32Base = X86DESC64_BASE(pDesc); 1816 uint32_t cbLimit = X86DESC_LIMIT_G(pDesc); 1819 1817 1820 1818 rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, "%04x %s Bas=%016RX64 Lim=%08x DPL=%d %s %s %s %sAVL=%d R=%d%s\n", -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r42193 r42407 65 65 */ 66 66 #if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0) 67 # define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg , a_fIsCS) \67 # define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \ 68 68 do \ 69 69 { \ 70 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_p SReg)) \71 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg , a_fIsCS); \70 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \ 71 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \ 72 72 } while (0) 73 73 #else 74 # define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg , a_fIsCS) \75 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_p SReg));74 # define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \ 75 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)); 76 76 #endif 77 77 … … 85 85 * @param pVCpu The current Virtual CPU. 86 86 * @param pSReg The selector register to lazily load hidden parts of. 87 * @param fIsCS 88 */ 89 static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg, bool fIsCS) 90 { 91 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSReg)); 87 */ 88 static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg) 89 { 90 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)); 92 91 Assert(!HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM))); 92 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT); 93 93 94 94 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM) … … 96 96 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */ 97 97 pSReg->Attr.u = 0; 98 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC; 98 99 pSReg->Attr.n.u1DescType = 1; /* code/data segment */ 100 pSReg->Attr.n.u2Dpl = 3; 99 101 pSReg->Attr.n.u1Present = 1; 100 pSReg->Attr.n.u4Type = fIsCS ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;101 102 pSReg->u32Limit = 0x0000ffff; 102 103 pSReg->u64Base = (uint32_t)pSReg->Sel << 4; … … 140 141 VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu) 141 142 { 142 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs , true);143 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss , false);143 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs); 144 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss); 144 145 } 145 146 … … 152 153 VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg) 153 154 { 154 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg , pSReg == &pVCpu->cpum.s.Guest.cs);155 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg); 155 156 } 156 157 … … 565 566 VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr) 566 567 { 567 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr; 568 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr; 569 /* The caller will set more hidden bits if it has them. */ 570 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0; 571 pVCpu->cpum.s.Guest.ldtr.fFlags = 0; 568 572 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR; 569 573 return VINF_SUCCESS; … … 2332 2336 if (!CPUMIsGuestInLongMode(pVCpu)) 2333 2337 return false; 2334 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs , true);2338 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs); 2335 2339 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long; 2336 2340 } … … 2349 2353 } 2350 2354 2355 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 2356 /** 2357 * 2358 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are 2359 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1). 2360 * @param pVCpu The current virtual CPU. 2361 */ 2362 VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu) 2363 { 2364 return pVCpu->cpum.s.fRawEntered; 2365 } 2366 #endif 2351 2367 2352 2368 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 … … 2561 2577 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM) 2562 2578 { 2563 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID( &pVCpu->cpum.s.Guest.ss))2579 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss)) 2564 2580 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl; 2565 2581 else … … 2620 2636 } 2621 2637 2622 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs , true);2638 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs); 2623 2639 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long 2624 2640 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA)) … … 2643 2659 } 2644 2660 2645 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs , true);2661 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs); 2646 2662 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long 2647 2663 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA)) -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r42193 r42407 1536 1536 } 1537 1537 1538 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)1539 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )1540 {1541 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));1542 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);1543 }1544 1538 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) 1545 1539 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) ) … … 1914 1908 ? Idte.Gate.u16OffsetLow 1915 1909 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16); 1916 uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy); 1917 if (DescCS.Legacy.Gen.u1Granularity) 1918 cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1910 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy); 1919 1911 if (uNewEip > cbLimitCS) 1920 1912 { … … 1951 1943 1952 1944 /* Check that there is sufficient space for the stack frame. */ 1953 uint32_t cbLimitSS = X86DESC_LIMIT(DescSS.Legacy); 1954 if (DescSS.Legacy.Gen.u1Granularity) 1955 cbLimitSS = (cbLimitSS << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1945 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy); 1956 1946 AssertReturn(!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN), VERR_IEM_ASPECT_NOT_IMPLEMENTED); 1957 1947 … … 1972 1962 RTPTRUNION uStackFrame; 1973 1963 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX, 1974 uNewEsp - cbStackFrame + X86DESC_BASE( DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */1964 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */ 1975 1965 if (rcStrict != VINF_SUCCESS) 1976 1966 return rcStrict; … … 2016 2006 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; 2017 2007 pCtx->ss.u32Limit = cbLimitSS; 2018 pCtx->ss.u64Base = X86DESC_BASE( DescSS.Legacy);2019 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR( DescSS.Legacy);2008 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy); 2009 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy); 2020 2010 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */ 2021 2011 pIemCpu->uCpl = uNewCpl; … … 2064 2054 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 2065 2055 pCtx->cs.u32Limit = cbLimitCS; 2066 pCtx->cs.u64Base = X86DESC_BASE( DescCS.Legacy);2067 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR( DescCS.Legacy);2056 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 2057 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 2068 2058 2069 2059 pCtx->rip = uNewEip; … … 2682 2672 } 2683 2673 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 2684 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID( pSReg))2674 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg)) 2685 2675 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg); 2686 2676 #else 2687 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID( pSReg));2677 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg)); 2688 2678 #endif 2689 2679 return pSReg; -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r41906 r42407 986 986 here, but that is ruled out by offSeg being 32-bit, right?) */ 987 987 uint64_t u64Base; 988 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy); 989 if (Desc.Legacy.Gen.u1Granularity) 990 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK; 988 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy); 991 989 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) 992 990 u64Base = 0; … … 998 996 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 999 997 } 1000 u64Base = X86DESC_BASE( Desc.Legacy);998 u64Base = X86DESC_BASE(&Desc.Legacy); 1001 999 } 1002 1000 … … 1021 1019 pCtx->cs.ValidSel = pCtx->cs.Sel; 1022 1020 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 1023 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR( Desc.Legacy);1021 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); 1024 1022 pCtx->cs.u32Limit = cbLimit; 1025 1023 pCtx->cs.u64Base = u64Base; … … 1180 1178 /* Limit / canonical check. */ 1181 1179 uint64_t u64Base; 1182 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy); 1183 if (Desc.Legacy.Gen.u1Granularity) 1184 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1185 1180 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy); 1186 1181 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) 1187 1182 { … … 1200 1195 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 1201 1196 } 1202 u64Base = X86DESC_BASE( Desc.Legacy);1197 u64Base = X86DESC_BASE(&Desc.Legacy); 1203 1198 } 1204 1199 … … 1245 1240 pCtx->cs.ValidSel = pCtx->cs.Sel; 1246 1241 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 1247 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR( Desc.Legacy);1242 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); 1248 1243 pCtx->cs.u32Limit = cbLimit; 1249 1244 pCtx->cs.u64Base = u64Base; … … 1485 1480 1486 1481 /* Calc SS limit.*/ 1487 uint32_t cbLimitSs = X86DESC_LIMIT(DescSs.Legacy); 1488 if (DescSs.Legacy.Gen.u1Granularity) 1489 cbLimitSs = (cbLimitSs << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1490 1482 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy); 1491 1483 1492 1484 /* Is RIP canonical or within CS.limit? */ 1493 1485 uint64_t u64Base; 1494 uint32_t cbLimitCs = X86DESC_LIMIT(DescCs.Legacy); 1495 if (DescCs.Legacy.Gen.u1Granularity) 1496 cbLimitCs = (cbLimitCs << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1486 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy); 1497 1487 1498 1488 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) … … 1513 1503 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 1514 1504 } 1515 u64Base = X86DESC_BASE( DescCs.Legacy);1505 u64Base = X86DESC_BASE(&DescCs.Legacy); 1516 1506 } 1517 1507 … … 1553 1543 pCtx->cs.ValidSel = uNewCs; 1554 1544 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 1555 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR( DescCs.Legacy);1545 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy); 1556 1546 pCtx->cs.u32Limit = cbLimitCs; 1557 1547 pCtx->cs.u64Base = u64Base; … … 1560 1550 pCtx->ss.ValidSel = uNewOuterSs; 1561 1551 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; 1562 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR( DescSs.Legacy);1552 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy); 1563 1553 pCtx->ss.u32Limit = cbLimitSs; 1564 1554 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) 1565 1555 pCtx->ss.u64Base = 0; 1566 1556 else 1567 pCtx->ss.u64Base = X86DESC_BASE( DescSs.Legacy);1557 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy); 1568 1558 1569 1559 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL); … … 1588 1578 /* Limit / canonical check. */ 1589 1579 uint64_t u64Base; 1590 uint32_t cbLimitCs = X86DESC_LIMIT(DescCs.Legacy); 1591 if (DescCs.Legacy.Gen.u1Granularity) 1592 cbLimitCs = (cbLimitCs << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1580 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy); 1593 1581 1594 1582 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) … … 1608 1596 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 1609 1597 } 1610 u64Base = X86DESC_BASE( DescCs.Legacy);1598 u64Base = X86DESC_BASE(&DescCs.Legacy); 1611 1599 } 1612 1600 … … 1638 1626 pCtx->cs.ValidSel = uNewCs; 1639 1627 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 1640 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR( DescCs.Legacy);1628 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy); 1641 1629 pCtx->cs.u32Limit = cbLimitCs; 1642 1630 pCtx->cs.u64Base = u64Base; … … 2019 2007 } 2020 2008 2021 uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy); 2022 if (DescCS.Legacy.Gen.u1Granularity) 2023 cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK; 2009 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy); 2024 2010 2025 2011 /* … … 2100 2086 } 2101 2087 2102 uint32_t cbLimitSs = X86DESC_LIMIT(DescSS.Legacy); 2103 if (DescSS.Legacy.Gen.u1Granularity) 2104 cbLimitSs = (cbLimitSs << PAGE_SHIFT) | PAGE_OFFSET_MASK; 2088 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy); 2105 2089 2106 2090 /* Check EIP. */ … … 2135 2119 pCtx->cs.ValidSel = uNewCs; 2136 2120 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 2137 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR( DescCS.Legacy);2121 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 2138 2122 pCtx->cs.u32Limit = cbLimitCS; 2139 pCtx->cs.u64Base = X86DESC_BASE( DescCS.Legacy);2123 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 2140 2124 pCtx->rsp = uNewESP; 2141 2125 pCtx->ss.Sel = uNewSS; 2142 2126 pCtx->ss.ValidSel = uNewSS; 2143 2127 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; 2144 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR( DescSS.Legacy);2128 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy); 2145 2129 pCtx->ss.u32Limit = cbLimitSs; 2146 pCtx->ss.u64Base = X86DESC_BASE( DescSS.Legacy);2130 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy); 2147 2131 2148 2132 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF … … 2193 2177 pCtx->cs.ValidSel = uNewCs; 2194 2178 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 2195 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR( DescCS.Legacy);2179 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 2196 2180 pCtx->cs.u32Limit = cbLimitCS; 2197 pCtx->cs.u64Base = X86DESC_BASE( DescCS.Legacy);2181 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 2198 2182 pCtx->rsp = uNewRsp; 2199 2183 … … 2357 2341 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */ 2358 2342 { 2359 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)2360 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )2361 {2362 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));2363 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);2364 }2365 2343 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) 2366 2344 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) ) … … 2423 2401 2424 2402 /* The base and limit. */ 2403 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy); 2425 2404 uint64_t u64Base; 2426 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);2427 if (Desc.Legacy.Gen.u1Granularity)2428 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;2429 2430 2405 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT 2431 2406 && iSegReg < X86_SREG_FS) 2432 2407 u64Base = 0; 2433 2408 else 2434 u64Base = X86DESC_BASE( Desc.Legacy);2409 u64Base = X86DESC_BASE(&Desc.Legacy); 2435 2410 2436 2411 /* … … 2448 2423 /* commit */ 2449 2424 *pSel = uSel; 2450 pHid->Attr.u = X86DESC_GET_HID_ATTR( Desc.Legacy);2425 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); 2451 2426 pHid->u32Limit = cbLimit; 2452 2427 pHid->u64Base = u64Base; … … 2723 2698 uint64_t u64Base; 2724 2699 if (!IEM_IS_LONG_MODE(pIemCpu)) 2725 u64Base = X86DESC_BASE( Desc.Legacy);2700 u64Base = X86DESC_BASE(&Desc.Legacy); 2726 2701 else 2727 2702 { … … 2732 2707 } 2733 2708 2734 u64Base = X86DESC64_BASE( Desc.Long);2709 u64Base = X86DESC64_BASE(&Desc.Long); 2735 2710 if (!IEM_IS_CANONICAL(u64Base)) 2736 2711 { … … 2757 2732 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK; 2758 2733 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID; 2759 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR( Desc.Legacy);2760 pCtx->ldtr.u32Limit = X86DESC_LIMIT (Desc.Legacy);2734 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); 2735 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy); 2761 2736 pCtx->ldtr.u64Base = u64Base; 2762 2737 … … 2822 2797 uint64_t u64Base; 2823 2798 if (!IEM_IS_LONG_MODE(pIemCpu)) 2824 u64Base = X86DESC_BASE( Desc.Legacy);2799 u64Base = X86DESC_BASE(&Desc.Legacy); 2825 2800 else 2826 2801 { … … 2831 2806 } 2832 2807 2833 u64Base = X86DESC64_BASE( Desc.Long);2808 u64Base = X86DESC64_BASE(&Desc.Long); 2834 2809 if (!IEM_IS_CANONICAL(u64Base)) 2835 2810 { … … 2878 2853 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK; 2879 2854 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID; 2880 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR( Desc.Legacy);2881 pCtx->tr.u32Limit = X86DESC_LIMIT (Desc.Legacy);2855 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); 2856 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy); 2882 2857 pCtx->tr.u64Base = u64Base; 2883 2858 -
trunk/src/VBox/VMM/VMMAll/SELMAll.cpp
r42186 r42407 31 31 #include <VBox/param.h> 32 32 #include <iprt/assert.h> 33 #include <VBox/log.h>34 33 #include <VBox/vmm/vmm.h> 35 34 #include <iprt/x86.h> 35 36 37 /******************************************************************************* 38 * Global Variables * 39 *******************************************************************************/ 40 #if defined(LOG_ENABLED) && defined(VBOX_WITH_RAW_MODE_NOT_R0) 41 /** Segment register names. */ 42 static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" }; 43 #endif 36 44 37 45 … … 65 73 } 66 74 67 return (RTGCPTR)(((RTGCUINTPTR)Addr + X86DESC_BASE( Desc)) & 0xffffffff);75 return (RTGCPTR)(((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc)) & 0xffffffff); 68 76 } 69 77 #endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ … … 105 113 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 106 114 /** @todo when we're in 16 bits mode, we should cut off the address as well?? */ 107 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(p SReg))115 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)) 108 116 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg); 109 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID( &pCtxCore->cs))117 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs)) 110 118 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs); 111 119 #else 112 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(p SReg));113 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID( &pCtxCore->cs));120 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)); 121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs)); 114 122 #endif 115 123 … … 168 176 if (ppvGC) 169 177 { 170 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(p SReg))178 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)) 171 179 *ppvGC = pSReg->u64Base + uFlat; 172 180 else … … 178 186 179 187 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 180 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(p SReg))188 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)) 181 189 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg); 182 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID( &pCtxCore->cs))190 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs)) 183 191 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs); 184 192 #else 185 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(p SReg));186 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID( &pCtxCore->cs));193 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)); 194 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs)); 187 195 #endif 188 196 … … 347 355 348 356 /* calc limit. */ 349 uint32_t u32Limit = X86DESC_LIMIT(Desc); 350 if (Desc.Gen.u1Granularity) 351 u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK; 357 uint32_t u32Limit = X86DESC_LIMIT_G(&Desc); 352 358 353 359 /* calc address assuming straight stuff. */ 354 RTGCPTR pvFlat = Addr + X86DESC_BASE( Desc);360 RTGCPTR pvFlat = Addr + X86DESC_BASE(&Desc); 355 361 356 362 /* Cut the address to 32 bits. */ … … 475 481 476 482 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 483 484 static void selLoadHiddenSelectorRegFromGuestTable(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg, 485 RTGCPTR GCPtrDesc, RTSEL const Sel, uint32_t const iSReg) 486 { 487 /* 488 * Try read the entry. 489 */ 490 X86DESC GstDesc; 491 int rc = PGMPhysReadGCPtr(pVCpu, &GstDesc, GCPtrDesc, sizeof(GstDesc)); 492 if (RT_FAILURE(rc)) 493 { 494 Log(("SELMLoadHiddenSelectorReg: Error reading descriptor %s=%#x: %Rrc\n", g_aszSRegNms[iSReg], Sel, rc)); 495 STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelReadErrors); 496 return; 497 } 498 499 /* 500 * Validate it and load it. 501 */ 502 if (!selmIsGstDescGoodForSReg(pVCpu, pSReg, &GstDesc, iSReg, CPUMGetGuestCPL(pVCpu))) 503 { 504 Log(("SELMLoadHiddenSelectorReg: Guest table entry is no good (%s=%#x): %.8Rhxs\n", g_aszSRegNms[iSReg], Sel, &GstDesc)); 505 STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGstNoGood); 506 return; 507 } 508 509 selmLoadHiddenSRegFromGuestDesc(pVCpu, pSReg, &GstDesc); 510 Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (gst)\n", 511 g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel)); 512 STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGst); 513 } 514 515 477 516 /** 478 517 * CPUM helper that loads the hidden selector register from the descriptor table … … 500 539 Assert(pVM->cCpus == 1); 501 540 502 RTSEL const Sel = pSReg->Sel; 503 504 /** @todo Consider loading these from the shadow tables when possible? */ 505 /* 506 * Calculate descriptor table entry address. 507 */ 508 RTGCPTR GCPtrDesc; 541 542 /* 543 * Get the shadow descriptor table entry and validate it. 544 * Should something go amiss, try the guest table. 545 */ 546 RTSEL const Sel = pSReg->Sel; 547 uint32_t const iSReg = pSReg - CPUMCTX_FIRST_SREG(pCtx); Assert(iSReg < X86_SREG_COUNT); 548 PCX86DESC pShwDesc; 509 549 if (!(Sel & X86_SEL_LDT)) 510 550 { 511 if ((Sel & X86_SEL_MASK) >= pCtx->gdtr.cbGdt) 551 /** @todo this shall not happen, we shall check for these things when executing 552 * LGDT */ 553 AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->gdtr.cbGdt); 554 555 pShwDesc = &pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT]; 556 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT) 557 || !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu))) 512 558 { 513 AssertFailed(); /** @todo count these. */559 selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->gdtr.pGdt + (Sel & X86_SEL_MASK), Sel, iSReg); 514 560 return; 515 561 } 516 GCPtrDesc = pCtx->gdtr.pGdt + (Sel & X86_SEL_MASK);517 /** @todo Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT]; for cases518 * where we don't change it too much. */519 562 } 520 563 else 521 564 { 522 if ((Sel & X86_SEL_MASK) >= pCtx->ldtr.u32Limit) 565 /** @todo this shall not happen, we shall check for these things when executing 566 * LLDT */ 567 AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->ldtr.u32Limit); 568 569 pShwDesc = (PCX86DESC)((uintptr_t)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper + (Sel & X86_SEL_MASK)); 570 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT) 571 || !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu))) 523 572 { 524 AssertFailed(); /** @todo count these. */573 selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->ldtr.u64Base + (Sel & X86_SEL_MASK), Sel, iSReg); 525 574 return; 526 575 } 527 GCPtrDesc = pCtx->ldtr.u64Base + (Sel & X86_SEL_MASK); 528 } 529 530 /* 531 * Try read the entry. 532 */ 533 X86DESC Desc; 534 int rc = PGMPhysReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(Desc)); 535 if (RT_FAILURE(rc)) 536 { 537 //RT_ZERO(Desc); 538 //if (!(Sel & X86_SEL_LDT)) 539 // Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT]; 540 //if (!Desc.Gen.u1Present) 541 { 542 AssertFailed(); /** @todo count these. */ 543 return; 544 } 545 } 546 547 /* 548 * Digest it and store the result. 549 */ 550 if ( !Desc.Gen.u1Present 551 || !Desc.Gen.u1DescType) 552 { 553 AssertFailed(); /** @todo count these. */ 554 return; 555 } 556 557 uint32_t u32Limit = X86DESC_LIMIT(Desc); 558 if (Desc.Gen.u1Granularity) 559 u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK; 560 pSReg->u32Limit = u32Limit; 561 562 pSReg->u64Base = X86DESC_BASE(Desc); 563 pSReg->Attr.u = X86DESC_GET_HID_ATTR(Desc); 564 pSReg->fFlags = CPUMSELREG_FLAGS_VALID; 565 pSReg->ValidSel = Sel; 566 } 567 #endif /* VBOX_WITH_RAW_MODE */ 568 576 } 577 578 /* 579 * All fine, load it. 580 */ 581 selmLoadHiddenSRegFromShadowDesc(pSReg, pShwDesc); 582 STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelShw); 583 Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (shw)\n", 584 g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel)); 585 } 586 587 #endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ 569 588 570 589 /** … … 583 602 { 584 603 RTGCUINTPTR uFlat = Addr & 0xffff; 585 if (!pSReg || !CPUMSELREG_ARE_HIDDEN_PARTS_VALID(p SReg))604 if (!pSReg || !CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)) 586 605 uFlat += (RTGCUINTPTR)SelCS << 4; 587 606 else … … 646 665 * Limit check. 647 666 */ 648 uint32_t u32Limit = X86DESC_LIMIT(Desc); 649 if (Desc.Gen.u1Granularity) 650 u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK; 667 uint32_t u32Limit = X86DESC_LIMIT_G(&Desc); 651 668 if ((RTGCUINTPTR)Addr <= u32Limit) 652 669 { 653 *ppvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE( Desc));670 *ppvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc)); 654 671 /* Cut the address to 32 bits. */ 655 672 *ppvFlat &= 0xffffffff; … … 765 782 return selmValidateAndConvertCSAddrRawMode(pVCpu->CTX_SUFF(pVM), pVCpu, SelCPL, SelCS, Addr, ppvFlat, NULL); 766 783 767 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(p SRegCS))784 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS)) 768 785 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSRegCS); 769 786 … … 775 792 SelCS &= ~X86_SEL_RPL; 776 793 #else 777 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(p SRegCS));794 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS)); 778 795 Assert(pSRegCS->Sel == SelCS); 779 796 #endif -
trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp
r42165 r42407 1861 1861 * Limit and Base and format the output. 1862 1862 */ 1863 uint32_t u32Limit = X86DESC_LIMIT(*pDesc); 1864 if (pDesc->Gen.u1Granularity) 1865 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK; 1863 uint32_t u32Limit = X86DESC_LIMIT_G(pDesc); 1866 1864 1867 1865 # if HC_ARCH_BITS == 64 1868 uint64_t u32Base = X86DESC64_BASE(*pDesc);1866 uint64_t u32Base = X86DESC64_BASE(pDesc); 1869 1867 1870 1868 Log(("%s %04x - %RX64 %RX64 - base=%RX64 limit=%08x dpl=%d %s\n", pszMsg, 1871 1869 Sel, pDesc->au64[0], pDesc->au64[1], u32Base, u32Limit, pDesc->Gen.u2Dpl, szMsg)); 1872 1870 # else 1873 uint32_t u32Base = X86DESC_BASE(*pDesc);1871 uint32_t u32Base = X86DESC_BASE(pDesc); 1874 1872 1875 1873 Log(("%s %04x - %08x %08x - base=%08x limit=%08x dpl=%d %s\n", pszMsg, -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r42394 r42407 1247 1247 if (VMX_IS_64BIT_HOST_MODE()) 1248 1248 { 1249 uint64_t trBase64 = X86DESC64_BASE( *(PX86DESC64)pDesc);1249 uint64_t trBase64 = X86DESC64_BASE((PX86DESC64)pDesc); 1250 1250 rc = VMXWriteVMCS64(VMX_VMCS_HOST_TR_BASE, trBase64); 1251 1251 Log2(("VMX_VMCS_HOST_TR_BASE %RX64\n", trBase64)); … … 1256 1256 { 1257 1257 #if HC_ARCH_BITS == 64 1258 trBase = X86DESC64_BASE( *pDesc);1258 trBase = X86DESC64_BASE(pDesc); 1259 1259 #else 1260 trBase = X86DESC_BASE( *pDesc);1260 trBase = X86DESC_BASE(pDesc); 1261 1261 #endif 1262 1262 rc = VMXWriteVMCS(VMX_VMCS_HOST_TR_BASE, trBase); -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r41939 r42407 1313 1313 } 1314 1314 1315 /* 1316 * Stale hidden selectors means raw-mode is unsafe (being very careful). 1317 */ 1318 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE) 1319 { 1320 Log2(("raw mode refused: stale CS\n")); 1321 return EMSTATE_REM; 1322 } 1323 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE) 1324 { 1325 Log2(("raw mode refused: stale SS\n")); 1326 return EMSTATE_REM; 1327 } 1328 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE) 1329 { 1330 Log2(("raw mode refused: stale DS\n")); 1331 return EMSTATE_REM; 1332 } 1333 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE) 1334 { 1335 Log2(("raw mode refused: stale ES\n")); 1336 return EMSTATE_REM; 1337 } 1338 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE) 1339 { 1340 Log2(("raw mode refused: stale FS\n")); 1341 return EMSTATE_REM; 1342 } 1343 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE) 1344 { 1345 Log2(("raw mode refused: stale GS\n")); 1346 return EMSTATE_REM; 1347 } 1348 1315 1349 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/ 1316 1350 return EMSTATE_RAW; -
trunk/src/VBox/VMM/VMMR3/EMRaw.cpp
r42186 r42407 927 927 DBGFR3InfoLog(pVM, "cpumguest", "PRIV"); 928 928 #endif 929 AssertMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08 X\n", pCtx->eip));929 AssertMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08x\n", pCtx->eip)); 930 930 return VERR_EM_RAW_PATCH_CONFLICT; 931 931 } … … 1226 1226 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)) 1227 1227 { 1228 int rc= SELMR3UpdateFromCPUM(pVM, pVCpu);1229 if ( RT_FAILURE(rc))1230 return rc;1228 VBOXSTRICTRC rcStrict = SELMR3UpdateFromCPUM(pVM, pVCpu); 1229 if (rcStrict != VINF_SUCCESS) 1230 return VBOXSTRICTRC_TODO(rcStrict); 1231 1231 } 1232 1232 … … 1428 1428 PPATMGCSTATE pGCState = PATMR3QueryGCStateHC(pVM); 1429 1429 if (pCtx->eflags.Bits.u1VM) 1430 Log(("RV86: %04 X:%08XIF=%d VMFlags=%x\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));1430 Log(("RV86: %04x:%08x IF=%d VMFlags=%x\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags)); 1431 1431 else if ((pCtx->ss.Sel & X86_SEL_RPL) == 1) 1432 1432 { 1433 1433 bool fCSAMScanned = CSAMIsPageScanned(pVM, (RTGCPTR)pCtx->eip); 1434 Log(("RR0: %08 X ESP=%08XIF=%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss.Sel & X86_SEL_RPL), fCSAMScanned));1434 Log(("RR0: %08x ESP=%08x IF=%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss.Sel & X86_SEL_RPL), fCSAMScanned)); 1435 1435 } 1436 1436 else if ((pCtx->ss.Sel & X86_SEL_RPL) == 3) 1437 Log(("RR3: %08 X ESP=%08XIF=%d VMFlags=%x\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));1437 Log(("RR3: %08x ESP=%08x IF=%d VMFlags=%x\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags)); 1438 1438 #endif /* LOG_ENABLED */ 1439 1439 … … 1462 1462 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatRAWTail, d); 1463 1463 1464 LogFlow(("RR 0-E: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss.Sel & X86_SEL_RPL)));1464 LogFlow(("RR%u-E: %08x ESP=%08x IF=%d VMFlags=%x PIF=%d\n", (pCtx->ss.Sel & X86_SEL_RPL), pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF)); 1465 1465 LogFlow(("VMMR3RawRunGC returned %Rrc\n", rc)); 1466 1466 -
trunk/src/VBox/VMM/VMMR3/SELM.cpp
r42372 r42407 112 112 113 113 114 /******************************************************************************* 115 * Global Variables * 116 *******************************************************************************/ 117 #ifdef LOG_ENABLED 118 /** Segment register names. */ 119 static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" }; 120 #endif 121 114 122 115 123 /** … … 207 215 STAM_REL_REG(pVM, &pVM->selm.s.StatHyperSelsChanged, STAMTYPE_COUNTER, "/SELM/HyperSels/Changed", STAMUNIT_OCCURENCES, "The number of times we had to relocate our hypervisor selectors."); 208 216 STAM_REL_REG(pVM, &pVM->selm.s.StatScanForHyperSels, STAMTYPE_COUNTER, "/SELM/HyperSels/Scan", STAMUNIT_OCCURENCES, "The number of times we had find free hypervisor selectors."); 217 218 STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_ES], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleES", STAMUNIT_OCCURENCES, "Stale ES was detected in UpdateFromCPUM."); 219 STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_CS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleCS", STAMUNIT_OCCURENCES, "Stale CS was detected in UpdateFromCPUM."); 220 STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_SS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleSS", STAMUNIT_OCCURENCES, "Stale SS was detected in UpdateFromCPUM."); 221 STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_DS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleDS", STAMUNIT_OCCURENCES, "Stale DS was detected in UpdateFromCPUM."); 222 STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_FS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleFS", STAMUNIT_OCCURENCES, "Stale FS was detected in UpdateFromCPUM."); 223 STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_GS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleGS", STAMUNIT_OCCURENCES, "Stale GS was detected in UpdateFromCPUM."); 224 225 STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_ES], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleES", STAMUNIT_OCCURENCES, "Already stale ES in UpdateFromCPUM."); 226 STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_CS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleCS", STAMUNIT_OCCURENCES, "Already stale CS in UpdateFromCPUM."); 227 STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_SS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleSS", STAMUNIT_OCCURENCES, "Already stale SS in UpdateFromCPUM."); 228 STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_DS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleDS", STAMUNIT_OCCURENCES, "Already stale DS in UpdateFromCPUM."); 229 STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_FS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleFS", STAMUNIT_OCCURENCES, "Already stale FS in UpdateFromCPUM."); 230 STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_GS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleGS", STAMUNIT_OCCURENCES, "Already stale GS in UpdateFromCPUM."); 231 232 STAM_REL_REG(pVM, &pVM->selm.s.StatStaleToUnstaleSReg, STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/StaleToUnstale", STAMUNIT_OCCURENCES, "Transitions from stale to unstale UpdateFromCPUM."); 233 234 STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_ES], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedES", STAMUNIT_OCCURENCES, "Updated hidden ES values in UpdateFromCPUM."); 235 STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_CS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedCS", STAMUNIT_OCCURENCES, "Updated hidden CS values in UpdateFromCPUM."); 236 STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_SS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedSS", STAMUNIT_OCCURENCES, "Updated hidden SS values in UpdateFromCPUM."); 237 STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_DS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedDS", STAMUNIT_OCCURENCES, "Updated hidden DS values in UpdateFromCPUM."); 238 STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_FS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedFS", STAMUNIT_OCCURENCES, "Updated hidden FS values in UpdateFromCPUM."); 239 STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_GS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedGS", STAMUNIT_OCCURENCES, "Updated hidden GS values in UpdateFromCPUM."); 240 241 STAM_REG( pVM, &pVM->selm.s.StatLoadHidSelGst, STAMTYPE_COUNTER, "/SELM/LoadHidSel/LoadedGuest", STAMUNIT_OCCURENCES, "SELMLoadHiddenSelectorReg: Loaded from guest tables."); 242 STAM_REG( pVM, &pVM->selm.s.StatLoadHidSelShw, STAMTYPE_COUNTER, "/SELM/LoadHidSel/LoadedShadow", STAMUNIT_OCCURENCES, "SELMLoadHiddenSelectorReg: Loaded from shadow tables."); 243 STAM_REL_REG(pVM, &pVM->selm.s.StatLoadHidSelReadErrors, STAMTYPE_COUNTER, "/SELM/LoadHidSel/GstReadErrors", STAMUNIT_OCCURENCES, "SELMLoadHiddenSelectorReg: Guest table read errors."); 244 STAM_REL_REG(pVM, &pVM->selm.s.StatLoadHidSelGstNoGood, STAMTYPE_COUNTER, "/SELM/LoadHidSel/NoGoodGuest", STAMUNIT_OCCURENCES, "SELMLoadHiddenSelectorReg: No good guest table entry."); 209 245 210 246 /* … … 774 810 775 811 812 /** 813 * Updates (syncs) the shadow GDT. 814 * 815 * @returns VBox status code. 816 * @param pVM The VM handle. 817 * @param pVCpu The current virtual CPU. 818 */ 776 819 static int selmR3UpdateShadowGdt(PVM pVM, PVMCPU pVCpu) 777 820 { … … 829 872 { 830 873 if (pu8DstInvalid != pu8Dst) 831 memset(pu8DstInvalid, 0, pu8Dst - pu8DstInvalid);874 RT_BZERO(pu8DstInvalid, pu8Dst - pu8DstInvalid); 832 875 GCPtrSrc += cb; 833 876 pu8Dst += cb; … … 854 897 /* If any GDTEs was invalidated, zero them. */ 855 898 if (cbEffLimit < pVM->selm.s.cbEffGuestGdtLimit) 856 memset(pu8DstInvalid + cbEffLimit + 1, 0, pVM->selm.s.cbEffGuestGdtLimit - cbEffLimit);899 RT_BZERO(pu8DstInvalid + cbEffLimit + 1, pVM->selm.s.cbEffGuestGdtLimit - cbEffLimit); 857 900 } 858 901 … … 916 959 { 917 960 if (pGDTE->Gen.u1Present) 918 { 919 /* 920 * Code and data selectors are generally 1:1, with the 921 * 'little' adjustment we do for DPL 0 selectors. 922 */ 923 if (pGDTE->Gen.u1DescType) 924 { 925 /* 926 * Hack for A-bit against Trap E on read-only GDT. 927 */ 928 /** @todo Fix this by loading ds and cs before turning off WP. */ 929 pGDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 930 931 /* 932 * All DPL 0 code and data segments are squeezed into DPL 1. 933 * 934 * We're skipping conforming segments here because those 935 * cannot give us any trouble. 936 */ 937 if ( pGDTE->Gen.u2Dpl == 0 938 && (pGDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 939 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) ) 940 pGDTE->Gen.u2Dpl = 1; 941 } 942 else 943 { 944 /* 945 * System type selectors are marked not present. 946 * Recompiler or special handling is required for these. 947 */ 948 /** @todo what about interrupt gates and rawr0? */ 949 pGDTE->Gen.u1Present = 0; 950 } 951 } 961 selmGuestToShadowDesc(pGDTE); 952 962 953 963 /* Next GDT entry. */ … … 990 1000 { 991 1001 if (pVM->selm.s.GuestGdtr.cbGdt > GDTR.cbGdt) 992 memset(pGDTE, 0, pVM->selm.s.GuestGdtr.cbGdt - GDTR.cbGdt);1002 RT_BZERO(pGDTE, pVM->selm.s.GuestGdtr.cbGdt - GDTR.cbGdt); 993 1003 } 994 1004 … … 1068 1078 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX; 1069 1079 } 1080 pVM->selm.s.cbLdtLimit = 0; 1070 1081 return VINF_SUCCESS; 1071 1082 } … … 1075 1086 */ 1076 1087 PX86DESC pDesc = &pVM->selm.s.paGdtR3[SelLdt >> X86_SEL_SHIFT]; 1077 RTGCPTR GCPtrLdt = X86DESC_BASE(*pDesc); 1078 unsigned cbLdt = X86DESC_LIMIT(*pDesc); 1079 if (pDesc->Gen.u1Granularity) 1080 cbLdt = (cbLdt << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1088 RTGCPTR GCPtrLdt = X86DESC_BASE(pDesc); 1089 uint32_t cbLdt = X86DESC_LIMIT_G(pDesc); 1081 1090 1082 1091 /* … … 1228 1237 { 1229 1238 if (pLDTE->Gen.u1Present) 1230 { 1231 /* 1232 * Code and data selectors are generally 1:1, with the 1233 * 'little' adjustment we do for DPL 0 selectors. 1234 */ 1235 if (pLDTE->Gen.u1DescType) 1236 { 1237 /* 1238 * Hack for A-bit against Trap E on read-only GDT. 1239 */ 1240 /** @todo Fix this by loading ds and cs before turning off WP. */ 1241 if (!(pLDTE->Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1242 pLDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 1243 1244 /* 1245 * All DPL 0 code and data segments are squeezed into DPL 1. 1246 * 1247 * We're skipping conforming segments here because those 1248 * cannot give us any trouble. 1249 */ 1250 if ( pLDTE->Gen.u2Dpl == 0 1251 && (pLDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 1252 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) ) 1253 pLDTE->Gen.u2Dpl = 1; 1254 } 1255 else 1256 { 1257 /* 1258 * System type selectors are marked not present. 1259 * Recompiler or special handling is required for these. 1260 */ 1261 /** @todo what about interrupt gates and rawr0? */ 1262 pLDTE->Gen.u1Present = 0; 1263 } 1264 } 1239 selmGuestToShadowDesc(pLDTE); 1265 1240 1266 1241 /* Next LDT entry. */ … … 1270 1245 else 1271 1246 { 1247 RT_BZERO(pShadowLDT, cbChunk); 1272 1248 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc=%Rrc\n", rc)); 1273 1249 rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, 0); … … 1289 1265 1290 1266 /** 1267 * Checks and updates segment selector registers. 1268 * 1269 * @returns VBox strict status code. 1270 * @retval VINF_EM_RESCHEDULE_REM if a stale register was found. 1271 * 1272 * @param pVM The VM handle. 1273 * @param pVCpu The current virtual CPU. 1274 */ 1275 static VBOXSTRICTRC selmR3UpdateSegmentRegisters(PVM pVM, PVMCPU pVCpu) 1276 { 1277 Assert(CPUMIsGuestInProtectedMode(pVCpu)); 1278 1279 /* 1280 * No stale selectors in V8086 mode. 1281 */ 1282 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1283 if (pCtx->eflags.Bits.u1VM) 1284 return VINF_SUCCESS; 1285 1286 /* 1287 * Check for stale selectors and load hidden register bits where they 1288 * are missing. 1289 */ 1290 uint32_t uCpl = CPUMGetGuestCPL(pVCpu); 1291 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 1292 PCPUMSELREG paSReg = CPUMCTX_FIRST_SREG(pCtx); 1293 for (uint32_t iSReg = 0; iSReg < X86_SREG_COUNT; iSReg++) 1294 { 1295 RTSEL const Sel = paSReg[iSReg].Sel & (X86_SEL_MASK | X86_SEL_LDT); 1296 if (Sel & (X86_SEL_MASK | X86_SEL_LDT)) 1297 { 1298 /* Get the shadow descriptor entry corresponding to this. */ 1299 static X86DESC const s_NotPresentDesc = { { 0 } }; 1300 PCX86DESC pDesc; 1301 if (!(Sel & X86_SEL_LDT)) 1302 { 1303 if ((Sel | (sizeof(*pDesc) - 1)) <= pCtx->gdtr.cbGdt) 1304 pDesc = &pVM->selm.s.paGdtR3[Sel >> X86_SEL_SHIFT]; 1305 else 1306 pDesc = &s_NotPresentDesc; 1307 } 1308 else 1309 { 1310 if ((Sel | (sizeof(*pDesc) - 1)) <= pVM->selm.s.cbLdtLimit) 1311 pDesc = &((PCX86DESC)((uintptr_t)pVM->selm.s.pvLdtR3 + pVM->selm.s.offLdtHyper))[Sel >> X86_SEL_SHIFT]; 1312 else 1313 pDesc = &s_NotPresentDesc; 1314 } 1315 1316 /* Check the segment register. */ 1317 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg])) 1318 { 1319 if (!(paSReg[iSReg].fFlags & CPUMSELREG_FLAGS_STALE)) 1320 { 1321 /* Did it go stale? */ 1322 if (selmIsSRegStale32(&paSReg[iSReg], pDesc, iSReg)) 1323 { 1324 Log2(("SELM: Detected stale %s=%#x (was valid)\n", g_aszSRegNms[iSReg], Sel)); 1325 STAM_REL_COUNTER_INC(&pVM->selm.s.aStatDetectedStaleSReg[iSReg]); 1326 paSReg[iSReg].fFlags |= CPUMSELREG_FLAGS_STALE; 1327 rcStrict = VINF_EM_RESCHEDULE_REM; 1328 } 1329 } 1330 else 1331 { 1332 /* Did it stop being stale? I.e. did the guest change it things 1333 back to the way they were? */ 1334 if (!selmIsSRegStale32(&paSReg[iSReg], pDesc, iSReg)) 1335 { 1336 STAM_REL_COUNTER_INC(&pVM->selm.s.StatStaleToUnstaleSReg); 1337 paSReg[iSReg].fFlags &= CPUMSELREG_FLAGS_STALE; 1338 } 1339 else 1340 { 1341 Log2(("SELM: Already stale %s=%#x\n", g_aszSRegNms[iSReg], Sel)); 1342 STAM_REL_COUNTER_INC(&pVM->selm.s.aStatAlreadyStaleSReg[iSReg]); 1343 rcStrict = VINF_EM_RESCHEDULE_REM; 1344 } 1345 } 1346 } 1347 /* Load the hidden registers if it's a valid descriptor for the 1348 current segment register. */ 1349 else if (selmIsShwDescGoodForSReg(&paSReg[iSReg], pDesc, iSReg, uCpl)) 1350 { 1351 selmLoadHiddenSRegFromShadowDesc(&paSReg[iSReg], pDesc); 1352 STAM_COUNTER_INC(&pVM->selm.s.aStatUpdatedSReg[iSReg]); 1353 } 1354 /* It's stale. */ 1355 else 1356 { 1357 Log2(("SELM: Detected stale %s=%#x (wasn't valid)\n", g_aszSRegNms[iSReg], Sel)); 1358 STAM_REL_COUNTER_INC(&pVM->selm.s.aStatDetectedStaleSReg[iSReg]); 1359 paSReg[iSReg].fFlags = CPUMSELREG_FLAGS_STALE; 1360 rcStrict = VINF_EM_RESCHEDULE_REM; 1361 } 1362 } 1363 /* else: 0 selector, ignore. */ 1364 } 1365 1366 return rcStrict; 1367 } 1368 1369 1370 /** 1291 1371 * Updates the Guest GDT & LDT virtualization based on current CPU state. 1292 1372 * … … 1295 1375 * @param pVCpu Pointer to the VMCPU. 1296 1376 */ 1297 VMMR3DECL( int) SELMR3UpdateFromCPUM(PVM pVM, PVMCPU pVCpu)1377 VMMR3DECL(VBOXSTRICTRC) SELMR3UpdateFromCPUM(PVM pVM, PVMCPU pVCpu) 1298 1378 { 1299 1379 if (pVM->selm.s.fDisableMonitoring) … … 1341 1421 } 1342 1422 1343 #if 0 1344 /* 1345 * Check for stale selectors and load hidden register bits where they 1346 * are missing. 1347 */ 1348 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1349 #endif 1350 rc = VINF_SUCCESS; 1423 /* 1424 * Check selector registers. 1425 */ 1426 VBOXSTRICTRC rcStrict = selmR3UpdateSegmentRegisters(pVM, pVCpu); 1351 1427 1352 1428 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a); 1353 return rc ;1429 return rcStrict; 1354 1430 } 1355 1431 … … 1731 1807 return rc; 1732 1808 } 1733 RTGCPTR GCPtrLDTEGuest = X86DESC_BASE(LDTDesc); 1734 unsigned cbLdt = X86DESC_LIMIT(LDTDesc); 1735 if (LDTDesc.Gen.u1Granularity) 1736 cbLdt = (cbLdt << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1809 RTGCPTR GCPtrLDTEGuest = X86DESC_BASE(&LDTDesc); 1810 uint32_t cbLdt = X86DESC_LIMIT_G(&LDTDesc); 1737 1811 1738 1812 /* … … 1978 2052 if (ppvLdt) 1979 2053 { 1980 *ppvLdt = (RTGCPTR)X86DESC_BASE( Desc);1981 *pcbLimit = X86DESC_LIMIT (Desc);2054 *ppvLdt = (RTGCPTR)X86DESC_BASE(&Desc); 2055 *pcbLimit = X86DESC_LIMIT_G(&Desc); 1982 2056 } 1983 2057 return VINF_SUCCESS; … … 2033 2107 return VERR_INVALID_SELECTOR; 2034 2108 2035 uint32_t cbLimit = X86DESC_LIMIT(Desc); 2036 if (Desc.Gen.u1Granularity) 2037 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK; 2109 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc); 2038 2110 if ((uint32_t)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > cbLimit) 2039 2111 return VERR_INVALID_SELECTOR; 2040 2112 2041 2113 /* calc the descriptor location. */ 2042 GCPtrDesc = X86DESC64_BASE( Desc);2114 GCPtrDesc = X86DESC64_BASE(&Desc); 2043 2115 GCPtrDesc += (Sel & X86_SEL_MASK); 2044 2116 } … … 2080 2152 else 2081 2153 { 2082 pSelInfo->cbLimit = X86DESC_LIMIT(Desc); 2083 if (Desc.Gen.u1Granularity) 2084 pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK; 2085 pSelInfo->GCPtrBase = X86DESC_BASE(Desc); 2154 pSelInfo->cbLimit = X86DESC_LIMIT_G(&Desc); 2155 pSelInfo->GCPtrBase = X86DESC_BASE(&Desc); 2086 2156 } 2087 2157 pSelInfo->SelGate = 0; … … 2093 2163 /* Note. LDT descriptors are weird in long mode, we ignore the footnote 2094 2164 in the AMD manual here as a simplification. */ 2095 pSelInfo->GCPtrBase = X86DESC64_BASE(Desc); 2096 pSelInfo->cbLimit = X86DESC_LIMIT(Desc); 2097 if (Desc.Gen.u1Granularity) 2098 pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK; 2099 pSelInfo->SelGate = 0; 2165 pSelInfo->GCPtrBase = X86DESC64_BASE(&Desc); 2166 pSelInfo->cbLimit = X86DESC_LIMIT_G(&Desc); 2167 pSelInfo->SelGate = 0; 2100 2168 } 2101 2169 else if ( Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE … … 2103 2171 || Desc.Gen.u4Type == AMD64_SEL_TYPE_SYS_INT_GATE) 2104 2172 { 2105 pSelInfo->cbLimit = X86DESC64_BASE( Desc);2173 pSelInfo->cbLimit = X86DESC64_BASE(&Desc); 2106 2174 pSelInfo->GCPtrBase = Desc.Gate.u16OffsetLow 2107 2175 | ((uint32_t)Desc.Gate.u16OffsetHigh << 16) … … 2140 2208 || !(pDesc->Gen.u4Type & 4)) 2141 2209 { 2142 pSelInfo->cbLimit = X86DESC_LIMIT(*pDesc); 2143 if (pDesc->Gen.u1Granularity) 2144 pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK; 2145 pSelInfo->GCPtrBase = X86DESC_BASE(*pDesc); 2146 pSelInfo->SelGate = 0; 2210 pSelInfo->cbLimit = X86DESC_LIMIT_G(pDesc); 2211 pSelInfo->GCPtrBase = X86DESC_BASE(pDesc); 2212 pSelInfo->SelGate = 0; 2147 2213 } 2148 2214 else if (pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_UNDEFINED4) … … 2245 2311 return VERR_INVALID_SELECTOR; 2246 2312 2247 unsigned cbLimit = X86DESC_LIMIT(Desc); 2248 if (Desc.Gen.u1Granularity) 2249 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK; 2250 if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > cbLimit) 2313 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc); 2314 if ((uint32_t)(Sel & X86_SEL_MASK) + sizeof(X86DESC) - 1 > cbLimit) 2251 2315 return VERR_INVALID_SELECTOR; 2252 2316 2253 2317 /* calc the descriptor location. */ 2254 GCPtrDesc = X86DESC_BASE( Desc);2318 GCPtrDesc = X86DESC_BASE(&Desc); 2255 2319 GCPtrDesc += (Sel & X86_SEL_MASK); 2256 2320 } … … 2466 2530 * Limit and Base and format the output. 2467 2531 */ 2468 uint32_t u32Limit = X86DESC_LIMIT(Desc); 2469 if (Desc.Gen.u1Granularity) 2470 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK; 2471 uint32_t u32Base = X86DESC_BASE(Desc); 2532 uint32_t u32Limit = X86DESC_LIMIT_G(&Desc); 2533 uint32_t u32Base = X86DESC_BASE(&Desc); 2472 2534 2473 2535 RTStrPrintf(pszOutput, cchOutput, "%04x - %08x %08x - base=%08x limit=%08x dpl=%d %s", -
trunk/src/VBox/VMM/VMMRC/SELMRC.cpp
r41965 r42407 36 36 37 37 38 /******************************************************************************* 39 * Global Variables * 40 *******************************************************************************/ 41 #ifdef LOG_ENABLED 42 /** Segment register names. */ 43 static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" }; 44 #endif 45 46 38 47 /** 39 48 * Synchronizes one GDT entry (guest -> shadow). 40 49 * 41 * @returns VBox status code (appropriate for trap handling and GC return). 42 * @param pVM Pointer to the VM. 50 * @returns VBox strict status code (appropriate for trap handling and GC 51 * return). 52 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT 53 * @retval VINF_SELM_SYNC_GDT 54 * @retval VINF_EM_RESCHEDULE_REM 55 * 56 * @param pVM Pointer to the VM. 57 * @param pVCpu The current virtual CPU. 43 58 * @param pRegFrame Trap register frame. 44 59 * @param iGDTEntry The GDT entry to sync. 45 * /46 static int selmGCSyncGDTEntry(PVM pVM, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry) 47 { 48 PVMCPU pVCpu = VMMGetCpu0(pVM); 49 60 * 61 * @remarks Caller checks that this isn't the LDT entry! 62 */ 63 static VBOXSTRICTRC selmRCSyncGDTEntry(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry) 64 { 50 65 Log2(("GDT %04X LDTR=%04X\n", iGDTEntry, CPUMGetGuestLDTR(pVCpu))); 51 66 … … 57 72 unsigned offEntry = iGDTEntry * sizeof(X86DESC); 58 73 if ( iGDTEntry >= SELM_GDT_ELEMENTS 59 || offEntry >GdtrGuest.cbGdt)60 return VINF_ EM_RAW_EMULATE_INSTR_GDT_FAULT;74 || offEntry > GdtrGuest.cbGdt) 75 return VINF_SUCCESS; /* ignore */ 61 76 62 77 /* … … 66 81 int rc = MMGCRamRead(pVM, &Desc, (uint8_t *)(uintptr_t)GdtrGuest.pGdt + offEntry, sizeof(X86DESC)); 67 82 if (RT_FAILURE(rc)) 68 return VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT; 83 { 84 rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, (uintptr_t)GdtrGuest.pGdt + offEntry, sizeof(X86DESC)); 85 if (RT_FAILURE(rc)) 86 { 87 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 88 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */ 89 return VINF_EM_RESCHEDULE_REM; 90 } 91 } 69 92 70 93 /* … … 85 108 if (Desc.Gen.u1Present) 86 109 { 87 Log(("selmGCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: detected conflict!!\n", Sel, &Desc)); 88 return VINF_SELM_SYNC_GDT; 89 } 90 Log(("selmGCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: potential conflict (still not present)!\n", Sel, &Desc)); 110 Log(("selmRCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: detected conflict!!\n", Sel, &Desc)); 111 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 112 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); 113 return VINF_SELM_SYNC_GDT; /** @todo this status code is ignored, unfortunately. */ 114 } 115 Log(("selmRCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: potential conflict (still not present)!\n", Sel, &Desc)); 91 116 92 117 /* Note: we can't continue below or else we'll change the shadow descriptor!! */ … … 96 121 97 122 /* 98 * Code and data selectors are generally 1:1, with the 99 * 'little' adjustment we do for DPL 0 selectors. 100 */ 101 PX86DESC pShadowDescr = &pVM->selm.s.paGdtRC[iGDTEntry]; 102 if (Desc.Gen.u1DescType) 103 { 104 /* 105 * Hack for A-bit against Trap E on read-only GDT. 106 */ 107 /** @todo Fix this by loading ds and cs before turning off WP. */ 108 Desc.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 109 110 /* 111 * All DPL 0 code and data segments are squeezed into DPL 1. 112 * 113 * We're skipping conforming segments here because those 114 * cannot give us any trouble. 115 */ 116 if ( Desc.Gen.u2Dpl == 0 117 && (Desc.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 118 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) ) 119 Desc.Gen.u2Dpl = 1; 120 } 121 else 122 { 123 /* 124 * System type selectors are marked not present. 125 * Recompiler or special handling is required for these. 126 */ 127 /** @todo what about interrupt gates and rawr0? */ 128 Desc.Gen.u1Present = 0; 129 } 130 //Log(("O: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(*pShadowDescr)), X86DESC_LIMIT(*pShadowDescr), (pShadowDescr->au32[1] >> 8) & 0xFFFF )); 123 * Convert the guest selector to a shadow selector and update the shadow GDT. 124 */ 125 selmGuestToShadowDesc(&Desc); 126 PX86DESC pShwDescr = &pVM->selm.s.paGdtRC[iGDTEntry]; 127 //Log(("O: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(*pShwDescr)), X86DESC_LIMIT(*pShwDescr), (pShwDescr->au32[1] >> 8) & 0xFFFF )); 131 128 //Log(("N: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(Desc)), X86DESC_LIMIT(Desc), (Desc.au32[1] >> 8) & 0xFFFF )); 132 *pSh adowDescr = Desc;129 *pShwDescr = Desc; 133 130 134 131 /* 135 132 * Detect and mark stale registers. 136 133 */ 137 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 138 PCPUMSELREG paSRegCtx = &pCtx->es; 139 PCPUMSELREG paSRegFrm = &pRegFrame->es; 140 for (unsigned i = 0; i <= X86_SREG_GS; i++) 141 if (Sel == (paSRegFrm[i].Sel & X86_SEL_MASK)) 142 { 143 /** @todo we clear the valid flag here, maybe we shouldn't... but that would 144 * require implementing handling of stale registers in raw-mode. 145 * Tricky, at least for SS and CS. */ 146 paSRegFrm[i].fFlags = CPUMSELREG_FLAGS_STALE; 147 paSRegCtx[i].fFlags = CPUMSELREG_FLAGS_STALE; 148 } 149 150 /* 151 * Check if we change the LDT selector. 152 */ 153 if (Sel == CPUMGetGuestLDTR(pVCpu)) /** @todo this isn't correct in two(+) ways! 1. It shouldn't be done until the LDTR is reloaded. 2. It caused the next instruction to be emulated. */ 154 { 155 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT); 156 return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT; 157 } 158 159 #ifdef LOG_ENABLED 160 if (Sel == (pRegFrame->cs.Sel & X86_SEL_MASK)) 161 Log(("GDT write to selector in CS register %04X\n", pRegFrame->cs.Sel)); 162 else if (Sel == (pRegFrame->ds.Sel & X86_SEL_MASK)) 163 Log(("GDT write to selector in DS register %04X\n", pRegFrame->ds.Sel)); 164 else if (Sel == (pRegFrame->es.Sel & X86_SEL_MASK)) 165 Log(("GDT write to selector in ES register %04X\n", pRegFrame->es.Sel)); 166 else if (Sel == (pRegFrame->fs.Sel & X86_SEL_MASK)) 167 Log(("GDT write to selector in FS register %04X\n", pRegFrame->fs.Sel)); 168 else if (Sel == (pRegFrame->gs.Sel & X86_SEL_MASK)) 169 Log(("GDT write to selector in GS register %04X\n", pRegFrame->gs.Sel)); 170 else if (Sel == (pRegFrame->ss.Sel & X86_SEL_MASK)) 171 Log(("GDT write to selector in SS register %04X\n", pRegFrame->ss.Sel)); 172 #endif 173 174 return VINF_SUCCESS; 175 } 134 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 135 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); Assert(CPUMCTX2CORE(pCtx) == pRegFrame); 136 PCPUMSELREG paSReg = CPUMCTX_FIRST_SREG(pCtx); 137 for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++) 138 { 139 if (Sel == (paSReg[iSReg].Sel & X86_SEL_MASK_RPL)) 140 { 141 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg])) 142 { 143 if (selmIsSRegStale32(&paSReg[iSReg], &Desc, iSReg)) 144 { 145 Log(("GDT write to selector in %s register %04X (now stale)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel)); 146 paSReg[iSReg].fFlags |= CPUMSELREG_FLAGS_STALE; 147 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */ 148 rcStrict = VINF_EM_RESCHEDULE_REM; 149 } 150 else if (paSReg[iSReg].fFlags & CPUMSELREG_FLAGS_STALE) 151 { 152 Log(("GDT write to selector in %s register %04X (no longer stale)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel)); 153 paSReg[iSReg].fFlags &= ~CPUMSELREG_FLAGS_STALE; 154 } 155 else 156 Log(("GDT write to selector in %s register %04X (no important change)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel)); 157 } 158 else 159 Log(("GDT write to selector in %s register %04X (out of sync)\n", paSReg[iSReg].Sel)); 160 } 161 } 162 163 /** @todo Detect stale LDTR as well? */ 164 165 return rcStrict; 166 } 167 168 169 /** 170 * Synchronizes any segment registers refering to the given GDT entry. 171 * 172 * This is called before any changes performed and shadowed, so it's possible to 173 * look in both the shadow and guest descriptor table entries for hidden 174 * register content. 175 * 176 * @param pVM Pointer to the VM. 177 * @param pVCpu The current virtual CPU. 178 * @param pRegFrame Trap register frame. 179 * @param iGDTEntry The GDT entry to sync. 180 */ 181 static void selmRCSyncGDTSegRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry) 182 { 183 /* 184 * Validate the offset. 185 */ 186 VBOXGDTR GdtrGuest; 187 CPUMGetGuestGDTR(pVCpu, &GdtrGuest); 188 unsigned offEntry = iGDTEntry * sizeof(X86DESC); 189 if ( iGDTEntry >= SELM_GDT_ELEMENTS 190 || offEntry > GdtrGuest.cbGdt) 191 return; 192 193 /* 194 * Sync outdated segment registers using this entry. 195 */ 196 PCX86DESC pDesc = &pVM->selm.s.CTX_SUFF(paGdt)[iGDTEntry]; 197 uint32_t uCpl = CPUMGetGuestCPL(pVCpu); 198 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); Assert(CPUMCTX2CORE(pCtx) == pRegFrame); 199 PCPUMSELREG paSReg = CPUMCTX_FIRST_SREG(pCtx); 200 for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++) 201 { 202 if (iGDTEntry == (paSReg[iSReg].Sel & X86_SEL_MASK_RPL)) 203 { 204 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg])) 205 { 206 if (selmIsShwDescGoodForSReg(&paSReg[iSReg], pDesc, iSReg, uCpl)) 207 { 208 selmLoadHiddenSRegFromShadowDesc(&paSReg[iSReg], pDesc); 209 Log(("selmRCSyncGDTSegRegs: Updated %s\n", g_aszSRegNms[iSReg])); 210 } 211 else 212 Log(("selmRCSyncGDTSegRegs: Bad shadow descriptor %#x (for %s): %.8Rhxs \n", 213 iGDTEntry, g_aszSRegNms[iSReg], pDesc)); 214 } 215 } 216 } 217 218 } 219 176 220 177 221 … … 195 239 196 240 /* 197 * First check if this is the LDT entry. 198 * LDT updates are problems since an invalid LDT entry will cause trouble during worldswitch. 199 */ 200 int rc; 201 if (CPUMGetGuestLDTR(pVCpu) / sizeof(X86DESC) == offRange / sizeof(X86DESC)) 202 { 203 Log(("LDTR selector change -> fall back to HC!!\n")); 204 rc = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT; 205 /** @todo We're not handling changed to the selectors in LDTR and TR correctly at all. 206 * We should ignore any changes to those and sync them only when they are loaded by the guest! */ 207 } 208 else 209 { 210 /* 211 * Attempt to emulate the instruction and sync the affected entries. 212 */ 213 /** @todo should check if any affected selectors are loaded. */ 214 uint32_t cb; 215 rc = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb); 216 if (RT_SUCCESS(rc) && cb) 217 { 218 unsigned iGDTE1 = offRange / sizeof(X86DESC); 219 int rc2 = selmGCSyncGDTEntry(pVM, pRegFrame, iGDTE1); 220 if (rc2 == VINF_SUCCESS) 221 { 222 Assert(cb); 223 unsigned iGDTE2 = (offRange + cb - 1) / sizeof(X86DESC); 241 * Check if any selectors might be affected. 242 */ 243 unsigned const iGDTE1 = offRange >> X86_SEL_SHIFT; 244 selmRCSyncGDTSegRegs(pVM, pVCpu, pRegFrame, iGDTE1); 245 if (((offRange + 8) >> X86_SEL_SHIFT) != iGDTE1) 246 selmRCSyncGDTSegRegs(pVM, pVCpu, pRegFrame, iGDTE1 + 1); 247 248 /* 249 * Attempt to emulate the instruction and sync the affected entries. 250 */ 251 uint32_t cb; 252 int rc = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb); 253 if (RT_SUCCESS(rc) && cb) 254 { 255 /* Check if the LDT was in any way affected. Do not sync the 256 shadow GDT if that's the case or we might have trouble in 257 the world switcher (or so they say). */ 258 unsigned const iLdt = CPUMGetGuestLDTR(pVCpu) >> X86_SEL_SHIFT; 259 unsigned const iGDTE2 = (offRange + cb - 1) >> X86_SEL_SHIFT; 260 if ( iGDTE1 == iLdt 261 || iGDTE2 == iLdt) 262 { 263 Log(("LDTR selector change -> fall back to HC!!\n")); 264 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 265 rc = VINF_SELM_SYNC_GDT; 266 /** @todo Implement correct stale LDT handling. */ 267 } 268 else 269 { 270 /* Sync the shadow GDT and continue provided the update didn't 271 cause any segment registers to go stale in any way. */ 272 int rc2 = selmRCSyncGDTEntry(pVM, pVCpu, pRegFrame, iGDTE1); 273 if (rc2 == VINF_SUCCESS || rc2 == VINF_EM_RESCHEDULE_REM) 274 { 275 if (rc == VINF_SUCCESS) 276 rc = rc2; 277 224 278 if (iGDTE1 != iGDTE2) 225 rc2 = selmGCSyncGDTEntry(pVM, pRegFrame, iGDTE2); 226 if (rc2 == VINF_SUCCESS) 279 { 280 rc2 = selmRCSyncGDTEntry(pVM, pVCpu, pRegFrame, iGDTE2); 281 if (rc == VINF_SUCCESS) 282 rc = rc2; 283 } 284 285 if (rc2 == VINF_SUCCESS || rc2 == VINF_EM_RESCHEDULE_REM) 227 286 { 228 287 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled); … … 230 289 } 231 290 } 291 292 /* sync failed, return to ring-3 and resync the GDT. */ 232 293 if (rc == VINF_SUCCESS || RT_FAILURE(rc2)) 233 294 rc = rc2; 234 295 } 235 else 236 { 237 Assert(RT_FAILURE(rc)); 238 if (rc == VERR_EM_INTERPRETER) 239 rc = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT; 240 } 241 } 242 if ( rc != VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT 243 && rc != VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT) 244 { 245 /* Not necessary when we need to go back to the host context to sync the LDT or TSS. */ 296 } 297 else 298 { 299 Assert(RT_FAILURE(rc)); 300 if (rc == VERR_EM_INTERPRETER) 301 rc = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT; 246 302 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 247 303 } 304 248 305 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled); 249 306 return rc; -
trunk/src/VBox/VMM/include/EMHandleRCTmpl.h
r42186 r42407 284 284 rc = VINF_EM_RESCHEDULE_REM; 285 285 break; 286 287 /* 288 * Conflict in GDT, resync and continue. 289 */ 290 case VINF_SELM_SYNC_GDT: 291 AssertMsg(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT), ("VINF_SELM_SYNC_GDT without VMCPU_FF_SELM_SYNC_GDT!\n")); 292 rc = VINF_SUCCESS; 293 break; 286 294 #endif 287 295 -
trunk/src/VBox/VMM/include/SELMInternal.h
r35346 r42407 23 23 #include <VBox/vmm/stam.h> 24 24 #include <VBox/vmm/cpum.h> 25 #include <VBox/log.h> 26 #include <iprt/x86.h> 25 27 26 28 … … 173 175 /** The number of times we had find free hypervisor selectors. */ 174 176 STAMCOUNTER StatScanForHyperSels; 177 /** Counts the times we detected state selectors in SELMR3UpdateFromCPUM. */ 178 STAMCOUNTER aStatDetectedStaleSReg[X86_SREG_COUNT]; 179 /** Counts the times we were called with already state selectors in 180 * SELMR3UpdateFromCPUM. */ 181 STAMCOUNTER aStatAlreadyStaleSReg[X86_SREG_COUNT]; 182 /** Counts the times we found a stale selector becomming valid again. */ 183 STAMCOUNTER StatStaleToUnstaleSReg; 184 #ifdef VBOX_WITH_STATISTICS 185 /** Times we updated hidden selector registers in CPUMR3UpdateFromCPUM. */ 186 STAMCOUNTER aStatUpdatedSReg[X86_SREG_COUNT]; 187 STAMCOUNTER StatLoadHidSelGst; 188 STAMCOUNTER StatLoadHidSelShw; 189 #endif 190 STAMCOUNTER StatLoadHidSelReadErrors; 191 STAMCOUNTER StatLoadHidSelGstNoGood; 175 192 } SELM, *PSELM; 176 193 … … 189 206 RT_C_DECLS_END 190 207 208 209 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 210 211 /** 212 * Checks if a shadow descriptor table entry is good for the given segment 213 * register. 214 * 215 * @returns @c true if good, @c false if not. 216 * @param pSReg The segment register. 217 * @param pShwDesc The shadow descriptor table entry. 218 * @param iSReg The segment register index (X86_SREG_XXX). 219 * @param uCpl The CPL. 220 */ 221 DECLINLINE(bool) selmIsShwDescGoodForSReg(PCCPUMSELREG pSReg, PCX86DESC pShwDesc, uint32_t iSReg, uint32_t uCpl) 222 { 223 /* 224 * See iemMiscValidateNewSS, iemCImpl_LoadSReg and intel+amd manuals. 225 */ 226 227 if (!pShwDesc->Gen.u1Present) 228 { 229 Log(("selmIsShwDescGoodForSReg: Not present\n")); 230 return false; 231 } 232 233 if (!pShwDesc->Gen.u1DescType) 234 { 235 Log(("selmIsShwDescGoodForSReg: System descriptor\n")); 236 return false; 237 } 238 239 if (iSReg == X86_SREG_SS) 240 { 241 if ((pShwDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE) 242 { 243 Log(("selmIsShwDescGoodForSReg: Stack must be writable\n")); 244 return false; 245 } 246 if (uCpl > (unsigned)pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available) 247 { 248 Log(("selmIsShwDescGoodForSReg: CPL(%d) > DPL(%d)\n", uCpl, pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available)); 249 return false; 250 } 251 } 252 else 253 { 254 if (iSReg == X86_SREG_CS) 255 { 256 if (!(pShwDesc->Gen.u4Type & X86_SEL_TYPE_CODE)) 257 { 258 Log(("selmIsShwDescGoodForSReg: CS needs code segment\n")); 259 return false; 260 } 261 } 262 else if ((pShwDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE) 263 { 264 Log(("selmIsShwDescGoodForSReg: iSReg=%u execute only\n", iSReg)); 265 return false; 266 } 267 268 if ( (pShwDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 269 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) 270 && ( ( (pSReg->Sel & X86_SEL_RPL) > (unsigned)pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available 271 && (pSReg->Sel & X86_SEL_RPL) != pShwDesc->Gen.u1Available ) 272 || uCpl > (unsigned)pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available ) ) 273 { 274 Log(("selmIsShwDescGoodForSReg: iSReg=%u DPL=%u CPL=%u RPL=%u\n", iSReg, 275 pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available, uCpl, pSReg->Sel & X86_SEL_RPL)); 276 return false; 277 } 278 } 279 280 return true; 281 } 282 283 284 /** 285 * Checks if a guest descriptor table entry is good for the given segment 286 * register. 287 * 288 * @returns @c true if good, @c false if not. 289 * @param pVCpu The current virtual CPU. 290 * @param pSReg The segment register. 291 * @param pGstDesc The guest descriptor table entry. 292 * @param iSReg The segment register index (X86_SREG_XXX). 293 * @param uCpl The CPL. 294 */ 295 DECLINLINE(bool) selmIsGstDescGoodForSReg(PVMCPU pVCpu, PCCPUMSELREG pSReg, PCX86DESC pGstDesc, uint32_t iSReg, uint32_t uCpl) 296 { 297 /* 298 * See iemMiscValidateNewSS, iemCImpl_LoadSReg and intel+amd manuals. 299 */ 300 301 if (!pGstDesc->Gen.u1Present) 302 { 303 Log(("selmIsGstDescGoodForSReg: Not present\n")); 304 return false; 305 } 306 307 if (!pGstDesc->Gen.u1DescType) 308 { 309 Log(("selmIsGstDescGoodForSReg: System descriptor\n")); 310 return false; 311 } 312 313 if (iSReg == X86_SREG_SS) 314 { 315 if ((pGstDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE) 316 { 317 Log(("selmIsGstDescGoodForSReg: Stack must be writable\n")); 318 return false; 319 } 320 if (uCpl > pGstDesc->Gen.u2Dpl) 321 { 322 Log(("selmIsGstDescGoodForSReg: CPL(%d) > DPL(%d)\n", uCpl, pGstDesc->Gen.u2Dpl)); 323 return false; 324 } 325 } 326 else 327 { 328 if (iSReg == X86_SREG_CS) 329 { 330 if (!(pGstDesc->Gen.u4Type & X86_SEL_TYPE_CODE)) 331 { 332 Log(("selmIsGstDescGoodForSReg: CS needs code segment\n")); 333 return false; 334 } 335 } 336 else if ((pGstDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE) 337 { 338 Log(("selmIsGstDescGoodForSReg: iSReg=%u execute only\n", iSReg)); 339 return false; 340 } 341 342 if ( (pGstDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 343 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) 344 && ( ( (pSReg->Sel & X86_SEL_RPL) > pGstDesc->Gen.u2Dpl 345 && ( (pSReg->Sel & X86_SEL_RPL) != 1 346 || !CPUMIsGuestInRawMode(pVCpu) ) ) 347 || uCpl > (unsigned)pGstDesc->Gen.u2Dpl 348 ) 349 ) 350 { 351 Log(("selmIsGstDescGoodForSReg: iSReg=%u DPL=%u CPL=%u RPL=%u InRawMode=%u\n", iSReg, 352 pGstDesc->Gen.u2Dpl, uCpl, pSReg->Sel & X86_SEL_RPL, CPUMIsGuestInRawMode(pVCpu))); 353 return false; 354 } 355 } 356 357 return true; 358 } 359 360 361 /** 362 * Converts a guest GDT or LDT entry to a shadow table entry. 363 * 364 * @param pDesc Guest entry on input, shadow entry on return. 365 */ 366 DECL_FORCE_INLINE(void) selmGuestToShadowDesc(PX86DESC pDesc) 367 { 368 /* 369 * Code and data selectors are generally 1:1, with the 370 * 'little' adjustment we do for DPL 0 selectors. 371 */ 372 if (pDesc->Gen.u1DescType) 373 { 374 /* 375 * Hack for A-bit against Trap E on read-only GDT. 376 */ 377 /** @todo Fix this by loading ds and cs before turning off WP. */ 378 pDesc->Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 379 380 /* 381 * All DPL 0 code and data segments are squeezed into DPL 1. 382 * 383 * We're skipping conforming segments here because those 384 * cannot give us any trouble. 385 */ 386 if ( pDesc->Gen.u2Dpl == 0 387 && (pDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 388 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) ) 389 { 390 pDesc->Gen.u2Dpl = 1; 391 pDesc->Gen.u1Available = 1; 392 } 393 else 394 pDesc->Gen.u1Available = 0; 395 } 396 else 397 { 398 /* 399 * System type selectors are marked not present. 400 * Recompiler or special handling is required for these. 401 */ 402 /** @todo what about interrupt gates and rawr0? */ 403 pDesc->Gen.u1Present = 0; 404 } 405 } 406 407 408 /** 409 * Checks if a segment register is stale given the shadow descriptor table 410 * entry. 411 * 412 * @returns @c true if stale, @c false if not. 413 * @param pSReg The segment register. 414 * @param pShwDesc The shadow descriptor entry. 415 * @param iSReg The segment register number (X86_SREG_XXX). 416 */ 417 DECLINLINE(bool) selmIsSRegStale32(PCCPUMSELREG pSReg, PCX86DESC pShwDesc, uint32_t iSReg) 418 { 419 if ( pSReg->Attr.n.u1Present != pShwDesc->Gen.u1Present 420 || pSReg->Attr.n.u4Type != pShwDesc->Gen.u4Type 421 || pSReg->Attr.n.u1DescType != pShwDesc->Gen.u1DescType 422 || pSReg->Attr.n.u1DefBig != pShwDesc->Gen.u1DefBig 423 || pSReg->Attr.n.u1Granularity != pShwDesc->Gen.u1Granularity 424 || pSReg->Attr.n.u2Dpl != pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available) 425 { 426 Log(("selmIsSRegStale32: Attributes changed (%#x -> %#x)\n", pSReg->Attr.u, X86DESC_GET_HID_ATTR(pShwDesc))); 427 return true; 428 } 429 430 if (pSReg->u64Base != X86DESC_BASE(pShwDesc)) 431 { 432 Log(("selmIsSRegStale32: base changed (%#llx -> %#llx)\n", pSReg->u64Base, X86DESC_BASE(pShwDesc))); 433 return true; 434 } 435 436 if (pSReg->u32Limit != X86DESC_LIMIT_G(pShwDesc)) 437 { 438 Log(("selmIsSRegStale32: limit changed (%#x -> %#x)\n", pSReg->u32Limit, X86DESC_LIMIT_G(pShwDesc))); 439 return true; 440 } 441 442 return false; 443 } 444 445 446 /** 447 * Loads the hidden bits of a selector register from a shadow descriptor table 448 * entry. 449 * 450 * @param pSReg The segment register in question. 451 * @param pShwDesc The shadow descriptor table entry. 452 */ 453 DECLINLINE(void) selmLoadHiddenSRegFromShadowDesc(PCPUMSELREG pSReg, PCX86DESC pShwDesc) 454 { 455 pSReg->Attr.u = X86DESC_GET_HID_ATTR(pShwDesc); 456 pSReg->Attr.n.u2Dpl -= pSReg->Attr.n.u1Available; 457 Assert(pSReg->Attr.n.u4Type & X86_SEL_TYPE_ACCESSED); 458 pSReg->u32Limit = X86DESC_LIMIT_G(pShwDesc); 459 pSReg->u64Base = X86DESC_BASE(pShwDesc); 460 pSReg->ValidSel = pSReg->Sel; 461 if (pSReg->Attr.n.u1Available) 462 pSReg->ValidSel &= ~(RTSEL)1; 463 pSReg->fFlags = CPUMSELREG_FLAGS_VALID; 464 } 465 466 467 /** 468 * Loads the hidden bits of a selector register from a guest descriptor table 469 * entry. 470 * 471 * @param pVCpu The current virtual CPU. 472 * @param pSReg The segment register in question. 473 * @param pGstDesc The guest descriptor table entry. 474 */ 475 DECLINLINE(void) selmLoadHiddenSRegFromGuestDesc(PVMCPU pVCpu, PCPUMSELREG pSReg, PCX86DESC pGstDesc) 476 { 477 pSReg->Attr.u = X86DESC_GET_HID_ATTR(pGstDesc); 478 pSReg->Attr.n.u4Type |= X86_SEL_TYPE_ACCESSED; 479 pSReg->u32Limit = X86DESC_LIMIT_G(pGstDesc); 480 pSReg->u64Base = X86DESC_BASE(pGstDesc); 481 pSReg->ValidSel = pSReg->Sel; 482 if ((pSReg->ValidSel & 1) && CPUMIsGuestInRawMode(pVCpu)) 483 pSReg->ValidSel &= ~(RTSEL)1; 484 pSReg->fFlags = CPUMSELREG_FLAGS_VALID; 485 } 486 487 #endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ 488 191 489 /** @} */ 192 490 -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r41976 r42407 994 994 GEN_CHECK_OFF(SELM, fGDTRangeRegistered); 995 995 GEN_CHECK_OFF(SELM, StatUpdateFromCPUM); 996 GEN_CHECK_OFF(SELM, StatStaleToUnstaleSReg); 997 GEN_CHECK_OFF(SELM, StatLoadHidSelGstNoGood); 996 998 997 999 GEN_CHECK_SIZE(TM); -
trunk/src/recompiler/VBoxRecompiler.c
r41906 r42407 138 138 static STAMCOUNTER gStatRefuseRing1or2; 139 139 static STAMCOUNTER gStatRefuseCanExecute; 140 static STAMCOUNTER gaStatRefuseStale[6]; 140 141 static STAMCOUNTER gStatREMGDTChange; 141 142 static STAMCOUNTER gStatREMIDTChange; … … 390 391 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution"); 391 392 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw"); 393 STAM_REG(pVM, &gaStatRefuseStale[R_ES], STAMTYPE_COUNTER, "/REM/Refuse/StaleES", STAMUNIT_OCCURENCES, "Raw mode refused because of stale ES"); 394 STAM_REG(pVM, &gaStatRefuseStale[R_CS], STAMTYPE_COUNTER, "/REM/Refuse/StaleCS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale CS"); 395 STAM_REG(pVM, &gaStatRefuseStale[R_SS], STAMTYPE_COUNTER, "/REM/Refuse/StaleSS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale SS"); 396 STAM_REG(pVM, &gaStatRefuseStale[R_DS], STAMTYPE_COUNTER, "/REM/Refuse/StaleDS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale DS"); 397 STAM_REG(pVM, &gaStatRefuseStale[R_FS], STAMTYPE_COUNTER, "/REM/Refuse/StaleFS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale FS"); 398 STAM_REG(pVM, &gaStatRefuseStale[R_GS], STAMTYPE_COUNTER, "/REM/Refuse/StaleGS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale GS"); 392 399 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes"); 393 400 … … 568 575 STAM_DEREG(pVM, &gStatRefuseRing1or2); 569 576 STAM_DEREG(pVM, &gStatRefuseCanExecute); 577 STAM_DEREG(pVM, &gaStatRefuseStale[0]); 578 STAM_DEREG(pVM, &gaStatRefuseStale[1]); 579 STAM_DEREG(pVM, &gaStatRefuseStale[2]); 580 STAM_DEREG(pVM, &gaStatRefuseStale[3]); 581 STAM_DEREG(pVM, &gaStatRefuseStale[4]); 582 STAM_DEREG(pVM, &gaStatRefuseStale[5]); 570 583 STAM_DEREG(pVM, &gStatFlushTBs); 571 584 … … 1664 1677 } 1665 1678 1679 /* 1680 * Stale hidden selectors means raw-mode is unsafe (being very careful). 1681 */ 1682 if (env->segs[R_CS].fVBoxFlags & CPUMSELREG_FLAGS_STALE) 1683 { 1684 Log2(("raw mode refused: stale CS (%#x)\n", env->segs[R_CS].selector)); 1685 STAM_COUNTER_INC(&gaStatRefuseStale[R_CS]); 1686 return EMSTATE_REM; 1687 } 1688 if (env->segs[R_SS].fVBoxFlags & CPUMSELREG_FLAGS_STALE) 1689 { 1690 Log2(("raw mode refused: stale SS (%#x)\n", env->segs[R_SS].selector)); 1691 STAM_COUNTER_INC(&gaStatRefuseStale[R_SS]); 1692 return EMSTATE_REM; 1693 } 1694 if (env->segs[R_DS].fVBoxFlags & CPUMSELREG_FLAGS_STALE) 1695 { 1696 Log2(("raw mode refused: stale DS (%#x)\n", env->segs[R_DS].selector)); 1697 STAM_COUNTER_INC(&gaStatRefuseStale[R_DS]); 1698 return EMSTATE_REM; 1699 } 1700 if (env->segs[R_ES].fVBoxFlags & CPUMSELREG_FLAGS_STALE) 1701 { 1702 Log2(("raw mode refused: stale ES (%#x)\n", env->segs[R_ES].selector)); 1703 STAM_COUNTER_INC(&gaStatRefuseStale[R_ES]); 1704 return EMSTATE_REM; 1705 } 1706 if (env->segs[R_FS].fVBoxFlags & CPUMSELREG_FLAGS_STALE) 1707 { 1708 Log2(("raw mode refused: stale FS (%#x)\n", env->segs[R_FS].selector)); 1709 STAM_COUNTER_INC(&gaStatRefuseStale[R_FS]); 1710 return EMSTATE_REM; 1711 } 1712 if (env->segs[R_GS].fVBoxFlags & CPUMSELREG_FLAGS_STALE) 1713 { 1714 Log2(("raw mode refused: stale GS (%#x)\n", env->segs[R_GS].selector)); 1715 STAM_COUNTER_INC(&gaStatRefuseStale[R_GS]); 1716 return EMSTATE_REM; 1717 } 1718 1666 1719 /* Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));*/ 1667 1720 *piException = EXCP_EXECUTE_RAW; … … 2042 2095 register const CPUMCTX *pCtx; 2043 2096 register unsigned fFlags; 2044 bool fHiddenSelRegsValid;2045 2097 unsigned i; 2046 2098 TRPMEVENT enmType; … … 2054 2106 pVM->rem.s.Env.pVCpu = pVCpu; 2055 2107 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu); 2056 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.2057 2108 2058 2109 Assert(!pVM->rem.s.fInREM); … … 2277 2328 if (fFlags & CPUM_CHANGED_LDTR) 2278 2329 { 2279 if ( fHiddenSelRegsValid || (pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID))2330 if (pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID) 2280 2331 { 2281 2332 pVM->rem.s.Env.ldt.selector = pCtx->ldtr.Sel; … … 2307 2358 * Sync TR unconditionally to make life simpler. 2308 2359 */ 2309 pVM->rem.s.Env.tr.selector = pCtx->tr.Sel; 2310 pVM->rem.s.Env.tr.base = pCtx->tr.u64Base; 2311 pVM->rem.s.Env.tr.limit = pCtx->tr.u32Limit; 2312 pVM->rem.s.Env.tr.flags = (pCtx->tr.Attr.u << 8) & 0xFFFFFF; 2360 pVM->rem.s.Env.tr.selector = pCtx->tr.Sel; 2361 pVM->rem.s.Env.tr.newselector = 0; 2362 pVM->rem.s.Env.tr.fVBoxFlags = pCtx->tr.fFlags; 2363 pVM->rem.s.Env.tr.base = pCtx->tr.u64Base; 2364 pVM->rem.s.Env.tr.limit = pCtx->tr.u32Limit; 2365 pVM->rem.s.Env.tr.flags = (pCtx->tr.Attr.u << 8) & 0xFFFFFF; 2313 2366 /* Note! do_interrupt will fault if the busy flag is still set... */ 2314 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;2367 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK; 2315 2368 2316 2369 /* 2317 2370 * Update selector registers. 2371 * 2318 2372 * This must be done *after* we've synced gdt, ldt and crX registers 2319 2373 * since we're reading the GDT/LDT om sync_seg. This will happen with 2320 2374 * saved state which takes a quick dip into rawmode for instance. 2321 */ 2322 /* 2323 * Stack; Note first check this one as the CPL might have changed. The 2324 * wrong CPL can cause QEmu to raise an exception in sync_seg!! 2325 */ 2326 2327 if (fHiddenSelRegsValid) 2328 { 2329 /* The hidden selector registers are valid in the CPU context. */ 2330 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */ 2331 2332 /* Set current CPL */ 2333 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl); 2334 2335 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, (pCtx->cs.Attr.u << 8) & 0xFFFFFF); 2336 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss.Sel, pCtx->ss.u64Base, pCtx->ss.u32Limit, (pCtx->ss.Attr.u << 8) & 0xFFFFFF); 2337 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds.Sel, pCtx->ds.u64Base, pCtx->ds.u32Limit, (pCtx->ds.Attr.u << 8) & 0xFFFFFF); 2338 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es.Sel, pCtx->es.u64Base, pCtx->es.u32Limit, (pCtx->es.Attr.u << 8) & 0xFFFFFF); 2339 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs.Sel, pCtx->fs.u64Base, pCtx->fs.u32Limit, (pCtx->fs.Attr.u << 8) & 0xFFFFFF); 2340 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs.Sel, pCtx->gs.u64Base, pCtx->gs.u32Limit, (pCtx->gs.Attr.u << 8) & 0xFFFFFF); 2341 } 2342 else 2343 { 2344 /* In 'normal' raw mode we don't have access to the hidden selector registers. */ 2345 /** @todo use hidden registers when possible and make CPUM/someone do the 2346 * reading of lazily maintained hidden registers. */ 2347 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss.Sel) 2348 { 2349 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss.Sel)); 2350 2351 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl); 2352 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss.Sel); 2353 #ifdef VBOX_WITH_STATISTICS 2354 if (pVM->rem.s.Env.segs[R_SS].newselector) 2355 { 2356 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]); 2357 } 2358 #endif 2359 } 2360 else 2361 pVM->rem.s.Env.segs[R_SS].newselector = 0; 2362 2363 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es.Sel) 2364 { 2365 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es.Sel)); 2366 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es.Sel); 2367 #ifdef VBOX_WITH_STATISTICS 2368 if (pVM->rem.s.Env.segs[R_ES].newselector) 2369 { 2370 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]); 2371 } 2372 #endif 2373 } 2374 else 2375 pVM->rem.s.Env.segs[R_ES].newselector = 0; 2376 2377 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs.Sel) 2378 { 2379 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs.Sel)); 2380 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs.Sel); 2381 #ifdef VBOX_WITH_STATISTICS 2382 if (pVM->rem.s.Env.segs[R_CS].newselector) 2383 { 2384 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]); 2385 } 2386 #endif 2387 } 2388 else 2389 pVM->rem.s.Env.segs[R_CS].newselector = 0; 2390 2391 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds.Sel) 2392 { 2393 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds.Sel)); 2394 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds.Sel); 2395 #ifdef VBOX_WITH_STATISTICS 2396 if (pVM->rem.s.Env.segs[R_DS].newselector) 2397 { 2398 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]); 2399 } 2400 #endif 2401 } 2402 else 2403 pVM->rem.s.Env.segs[R_DS].newselector = 0; 2404 2375 * 2376 * CPL/Stack; Note first check this one as the CPL might have changed. 2377 * The wrong CPL can cause QEmu to raise an exception in sync_seg!! 2378 */ 2379 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl); 2380 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */ 2381 #define SYNC_IN_SREG(a_pEnv, a_SReg, a_pRemSReg, a_pVBoxSReg) \ 2382 do \ 2383 { \ 2384 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, a_pVBoxSReg)) \ 2385 { \ 2386 cpu_x86_load_seg_cache(a_pEnv, R_##a_SReg, \ 2387 (a_pVBoxSReg)->Sel, \ 2388 (a_pVBoxSReg)->u64Base, \ 2389 (a_pVBoxSReg)->u32Limit, \ 2390 ((a_pVBoxSReg)->Attr.u << 8) & 0xFFFFFF); \ 2391 (a_pRemSReg)->fVBoxFlags = (a_pVBoxSReg)->fFlags; \ 2392 } \ 2393 /* This only-reload-if-changed stuff is the old approach, we should ditch it. */ \ 2394 else if ((a_pRemSReg)->selector != (a_pVBoxSReg)->Sel) \ 2395 { \ 2396 Log2(("REMR3State: " #a_SReg " changed from %04x to %04x!\n", \ 2397 (a_pRemSReg)->selector, (a_pVBoxSReg)->Sel)); \ 2398 sync_seg(a_pEnv, R_##a_SReg, (a_pVBoxSReg)->Sel); \ 2399 if ((a_pRemSReg)->newselector) \ 2400 STAM_COUNTER_INC(&gStatSelOutOfSync[R_##a_SReg]); \ 2401 } \ 2402 else \ 2403 (a_pRemSReg)->newselector = 0; \ 2404 } while (0) 2405 2406 SYNC_IN_SREG(&pVM->rem.s.Env, CS, &pVM->rem.s.Env.segs[R_CS], &pCtx->cs); 2407 SYNC_IN_SREG(&pVM->rem.s.Env, SS, &pVM->rem.s.Env.segs[R_SS], &pCtx->ss); 2408 SYNC_IN_SREG(&pVM->rem.s.Env, DS, &pVM->rem.s.Env.segs[R_DS], &pCtx->ds); 2409 SYNC_IN_SREG(&pVM->rem.s.Env, ES, &pVM->rem.s.Env.segs[R_ES], &pCtx->es); 2410 SYNC_IN_SREG(&pVM->rem.s.Env, FS, &pVM->rem.s.Env.segs[R_FS], &pCtx->fs); 2411 SYNC_IN_SREG(&pVM->rem.s.Env, GS, &pVM->rem.s.Env.segs[R_GS], &pCtx->gs); 2405 2412 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might 2406 2413 * be the same but not the base/limit. */ 2407 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs.Sel)2408 {2409 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs.Sel));2410 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs.Sel);2411 #ifdef VBOX_WITH_STATISTICS2412 if (pVM->rem.s.Env.segs[R_FS].newselector)2413 {2414 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);2415 }2416 #endif2417 }2418 else2419 pVM->rem.s.Env.segs[R_FS].newselector = 0;2420 2421 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs.Sel)2422 {2423 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs.Sel));2424 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs.Sel);2425 #ifdef VBOX_WITH_STATISTICS2426 if (pVM->rem.s.Env.segs[R_GS].newselector)2427 {2428 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);2429 }2430 #endif2431 }2432 else2433 pVM->rem.s.Env.segs[R_GS].newselector = 0;2434 }2435 2414 2436 2415 /* … … 2683 2662 /* Qemu and AMD/Intel have different ideas about the busy flag ... */ 2684 2663 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF 2685 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 82686 : 0)2664 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 2665 : 0) 2687 2666 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID) 2688 2667 ) … … 2905 2884 /* Qemu and AMD/Intel have different ideas about the busy flag ... */ 2906 2885 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF 2907 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 82908 : 0)2886 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 2887 : 0) 2909 2888 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID) 2910 2889 ) -
trunk/src/recompiler/target-i386/cpu.h
r38319 r42407 65 65 # include <VBox/vmm/vmm.h> 66 66 # include <VBox/vmm/stam.h> 67 # include <VBox/vmm/cpumctx.h> 67 68 #endif /* VBOX */ 68 69 … … 532 533 typedef struct SegmentCache { 533 534 uint32_t selector; 535 #ifdef VBOX 536 /** The new selector is saved here when we are unable to sync it before invoking the recompiled code. */ 537 uint16_t newselector; 538 uint16_t fVBoxFlags; 539 #endif 534 540 target_ulong base; 535 541 uint32_t limit; 536 542 uint32_t flags; 537 #ifdef VBOX538 /** The new selector is saved here when we are unable to sync it before invoking the recompiled code. */539 uint32_t newselector;540 #endif541 543 } SegmentCache; 542 544 … … 942 944 sc->base = base; 943 945 sc->limit = limit; 946 #ifndef VBOX 944 947 sc->flags = flags; 945 #ifdef VBOX 948 #else 949 if (flags & DESC_P_MASK) 950 flags |= DESC_A_MASK; /* Make sure the A bit is set to avoid trouble. */ 951 sc->flags = flags; 946 952 sc->newselector = 0; 953 sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID; 947 954 #endif 948 955 -
trunk/src/recompiler/target-i386/op_helper.c
r39969 r42407 254 254 sc->limit = get_seg_limit(e1, e2); 255 255 sc->flags = e2; 256 #ifdef VBOX 257 sc->newselector = 0; 258 sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID; 259 #endif 256 260 } 257 261 … … 557 561 env->tr.limit = tss_limit; 558 562 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; 563 #ifdef VBOX 564 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID; 565 env->tr.newselector = 0; 566 #endif 559 567 560 568 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { … … 596 604 env->ldt.limit = 0; 597 605 env->ldt.flags = 0; 606 #ifdef VBOX 607 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID; 608 env->ldt.newselector = 0; 609 #endif 598 610 599 611 /* load the LDT */ … … 1954 1966 env->ldt.limit = ldl_phys(sm_state + 0x7e74); 1955 1967 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8; 1968 #ifdef VBOX 1969 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID; 1970 env->ldt.newselector = 0; 1971 #endif 1956 1972 1957 1973 env->idt.base = ldq_phys(sm_state + 0x7e88); … … 1962 1978 env->tr.limit = ldl_phys(sm_state + 0x7e94); 1963 1979 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8; 1980 #ifdef VBOX 1981 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID; 1982 env->tr.newselector = 0; 1983 #endif 1964 1984 1965 1985 EAX = ldq_phys(sm_state + 0x7ff8); … … 2008 2028 env->tr.limit = ldl_phys(sm_state + 0x7f60); 2009 2029 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8; 2030 #ifdef VBOX 2031 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID; 2032 env->tr.newselector = 0; 2033 #endif 2010 2034 2011 2035 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff; … … 2013 2037 env->ldt.limit = ldl_phys(sm_state + 0x7f7c); 2014 2038 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8; 2039 #ifdef VBOX 2040 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID; 2041 env->ldt.newselector = 0; 2042 #endif 2015 2043 2016 2044 env->gdt.base = ldl_phys(sm_state + 0x7f74); … … 2448 2476 env->ldt.base = 0; 2449 2477 env->ldt.limit = 0; 2478 #ifdef VBOX 2479 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID; 2480 env->ldt.newselector = 0; 2481 #endif 2450 2482 } else { 2451 2483 if (selector & 0x4) … … 2510 2542 env->tr.limit = 0; 2511 2543 env->tr.flags = 0; 2544 #ifdef VBOX 2545 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID; 2546 env->tr.newselector = 0; 2547 #endif 2512 2548 } else { 2513 2549 if (selector & 0x4) … … 5724 5760 5725 5761 /* Successful sync. */ 5726 env1->segs[seg_reg].newselector = 0;5762 Assert(env1->segs[seg_reg].newselector == 0); 5727 5763 } 5728 5764 else … … 5741 5777 load_segment(&e1, &e2, selector); 5742 5778 cpu_x86_load_seg_cache(env, R_CS, selector, 5743 get_seg_base(e1, e2),5744 get_seg_limit(e1, e2),5745 e2);5779 get_seg_base(e1, e2), 5780 get_seg_limit(e1, e2), 5781 e2); 5746 5782 } 5747 5783 else … … 5753 5789 5754 5790 /* Successful sync. */ 5755 env1->segs[seg_reg].newselector = 0;5791 Assert(env1->segs[seg_reg].newselector == 0); 5756 5792 } 5757 5793 else
Note:
See TracChangeset
for help on using the changeset viewer.

