Changeset 65933 in vbox
- Timestamp:
- Mar 3, 2017 1:21:40 PM (8 years ago)
- Location:
- trunk
- Files:
-
- 7 edited
-
include/VBox/vmm/cpum.h (modified) (9 diffs)
-
include/VBox/vmm/hm_svm.h (modified) (2 diffs)
-
include/VBox/vmm/iem.h (modified) (1 diff)
-
src/VBox/VMM/VMMAll/IEMAll.cpp (modified) (3 diffs)
-
src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h (modified) (4 diffs)
-
src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h (modified) (3 diffs)
-
src/VBox/VMM/VMMR0/HMSVMR0.cpp (modified) (10 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r65907 r65933 1162 1162 * 1163 1163 * @returns true if in real mode, otherwise false. 1164 * @param pCtx Current CPU context 1164 * @param pCtx Current CPU context. 1165 1165 */ 1166 1166 DECLINLINE(bool) CPUMIsGuestInRealModeEx(PCPUMCTX pCtx) … … 1173 1173 * 1174 1174 * @returns @c true if it is, @c false if not. 1175 * @param pCtx Current CPU context 1175 * @param pCtx Current CPU context. 1176 1176 */ 1177 1177 DECLINLINE(bool) CPUMIsGuestInRealOrV86ModeEx(PCPUMCTX pCtx) … … 1185 1185 * 1186 1186 * @returns @c true if it is, @c false if not. 1187 * @param pCtx Current CPU context 1187 * @param pCtx Current CPU context. 1188 1188 */ 1189 1189 DECLINLINE(bool) CPUMIsGuestInV86ModeEx(PCPUMCTX pCtx) … … 1196 1196 * 1197 1197 * @returns true if in paged protected mode, otherwise false. 1198 * @param pCtx Current CPU context 1198 * @param pCtx Current CPU context. 1199 1199 */ 1200 1200 DECLINLINE(bool) CPUMIsGuestInPagedProtectedModeEx(PCPUMCTX pCtx) … … 1207 1207 * 1208 1208 * @returns true if in long mode, otherwise false. 1209 * @param pCtx Current CPU context 1209 * @param pCtx Current CPU context. 1210 1210 */ 1211 1211 DECLINLINE(bool) CPUMIsGuestInLongModeEx(PCPUMCTX pCtx) … … 1220 1220 * 1221 1221 * @returns true if in 64 bits protected mode, otherwise false. 1222 * @param pCtx Current CPU context 1222 * @param pCtx Current CPU context. 1223 1223 */ 1224 1224 DECLINLINE(bool) CPUMIsGuestIn64BitCodeEx(PCPUMCTX pCtx) … … 1235 1235 * 1236 1236 * @returns true if paging is enabled, otherwise false. 1237 * @param pCtx Current CPU context 1237 * @param pCtx Current CPU context. 1238 1238 */ 1239 1239 DECLINLINE(bool) CPUMIsGuestPagingEnabledEx(PCPUMCTX pCtx) … … 1246 1246 * 1247 1247 * @returns true if in PAE mode, otherwise false. 1248 * @param pCtx Current CPU context 1248 * @param pCtx Current CPU context. 1249 1249 */ 1250 1250 DECLINLINE(bool) CPUMIsGuestInPAEModeEx(PCPUMCTX pCtx) … … 1255 1255 && CPUMIsGuestPagingEnabledEx(pCtx) 1256 1256 && !(pCtx->msrEFER & MSR_K6_EFER_LMA)); 1257 } 1258 1259 /** 1260 * Tests is if the guest has AMD SVM enabled or not. 1261 * 1262 * @returns true if SMV is enabled, otherwise false. 1263 * @param pCtx Current CPU context. 1264 */ 1265 DECLINLINE(bool) CPUMIsGuestSvmEnabled(PCPUMCTX pCtx) 1266 { 1267 return RT_BOOL(pCtx->msrEFER & MSR_K6_EFER_SVME); 1257 1268 } 1258 1269 -
trunk/include/VBox/vmm/hm_svm.h
r65904 r65933 805 805 /** Pointer to the SVMVMCB structure. */ 806 806 typedef SVMVMCB *PSVMVMCB; 807 /** Pointer to a const SVMVMCB structure. */ 808 typedef const SVMVMCB *PCSVMVMCB; 807 809 AssertCompileMemberOffset(SVMVMCB, ctrl, 0x00); 808 810 AssertCompileMemberOffset(SVMVMCB, ctrl.u16InterceptRdCRx, 0x00); … … 890 892 #endif /* IN_RING0 */ 891 893 894 /** 895 * Segment attribute conversion between CPU and AMD-V VMCB format. 896 * 897 * The CPU format of the segment attribute is described in X86DESCATTRBITS 898 * which is 16-bits (i.e. includes 4 bits of the segment limit). 899 * 900 * The AMD-V VMCB format the segment attribute is compact 12-bits (strictly 901 * only the attribute bits and nothing else). Upper 4-bits are unused. 902 */ 903 #define HMSVM_CPU_2_VMCB_SEG_ATTR(a) ( ((a) & 0xff) | (((a) & 0xf000) >> 4) ) 904 #define HMSVM_VMCB_2_CPU_SEG_ATTR(a) ( ((a) & 0xff) | (((a) & 0x0f00) << 4) ) 905 906 /** @def HMSVM_SEG_REG_COPY_TO_VMCB 907 * Copies the specified segment register to a VMCB from a virtual CPU context. 908 * 909 * @param a_pCtx The virtual-CPU context. 910 * @param a_pVmcb The VMCB. 911 * @param REG The segment register in the VMCB struct. (CS, DS, FS 912 * etc.) 913 * @param reg The segment register in the virtual CPU struct (cs, ds, 914 * fs etc.) 915 */ 916 #define HMSVM_SEG_REG_COPY_TO_VMCB(a_pCtx, a_pVmcb, REG, reg) \ 917 do \ 918 { \ 919 Assert((a_pCtx)->reg.fFlags & CPUMSELREG_FLAGS_VALID); \ 920 Assert((a_pCtx)->reg.ValidSel == (a_pCtx)->reg.Sel); \ 921 (a_pVmcb)->guest.REG.u16Sel = (a_pCtx)->reg.Sel; \ 922 (a_pVmcb)->guest.REG.u32Limit = (a_pCtx)->reg.u32Limit; \ 923 (a_pVmcb)->guest.REG.u64Base = (a_pCtx)->reg.u64Base; \ 924 (a_pVmcb)->guest.REG.u16Attr = HMSVM_CPU_2_VMCB_SEG_ATTR((a_pCtx)->reg.Attr.u); \ 925 } while (0) 926 927 /** @def HMSVM_SEG_REG_COPY_TO_VMCB 928 * Copies the specified segment register from the VMCB to a virtual CPU 929 * context. 930 * 931 * @param a_pCtx The virtual-CPU context. 932 * @param a_pVmcb The VMCB. 933 * @param REG The segment register in the VMCB struct. (CS, DS, FS 934 * etc.) 935 * @param reg The segment register in the virtual CPU struct (cs, ds, 936 * fs etc.) 937 */ 938 #define HMSVM_SEG_REG_COPY_FROM_VMCB(a_pCtx, a_pVmcb, REG, reg) \ 939 do \ 940 { \ 941 (a_pCtx)->reg.Sel = (a_pVmcb)->guest.REG.u16Sel; \ 942 (a_pCtx)->reg.ValidSel = (a_pVmcb)->guest.REG.u16Sel; \ 943 (a_pCtx)->reg.fFlags = CPUMSELREG_FLAGS_VALID; \ 944 (a_pCtx)->reg.u32Limit = (a_pVmcb)->guest.REG.u32Limit; \ 945 (a_pCtx)->reg.u64Base = (a_pVmcb)->guest.REG.u64Base; \ 946 (a_pCtx)->reg.Attr.u = HMSVM_VMCB_2_CPU_SEG_ATTR((a_pVmcb)->guest.REG.u16Attr); \ 947 } while (0) 948 /** @} */ 949 950 892 951 /** @} */ 893 952 -
trunk/include/VBox/vmm/iem.h
r65904 r65933 135 135 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr); 136 136 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr); 137 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr); 138 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr); 137 139 #endif 138 140 /** @} */ -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r65919 r65933 367 367 368 368 #ifdef VBOX_WITH_NESTED_HWVIRT 369 /** 369 /** 370 * Check the common SVM instruction preconditions. 371 */ 372 #define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \ 373 do { \ 374 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \ 375 { \ 376 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \ 377 return iemRaiseUndefinedOpcode(pVCpu); \ 378 } \ 379 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \ 380 { \ 381 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \ 382 return iemRaiseUndefinedOpcode(pVCpu); \ 383 } \ 384 if (pVCpu->iem.s.uCpl != 0) \ 385 { \ 386 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \ 387 return iemRaiseGeneralProtectionFault0(pVCpu); \ 388 } \ 389 } while (0) 390 391 /** 392 * Check if an SVM is enabled. 393 */ 394 #define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu))) 395 396 /** 370 397 * Check if an SVM control/instruction intercept is set. 371 398 */ … … 11673 11700 { \ 11674 11701 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \ 11675 return IEMOP_RAISE_INVALID_ LOCK_PREFIX(); \11702 return IEMOP_RAISE_INVALID_OPCODE(); \ 11676 11703 } while (0) 11704 11705 #if 0 11706 #ifdef VBOX_WITH_NESTED_HWVIRT 11707 /** The instruction raises an \#UD when SVM is not enabled. */ 11708 #define IEMOP_HLP_NEEDS_SVM_ENABLED() \ 11709 do \ 11710 { \ 11711 if (IEM_IS_SVM_ENABLED(pVCpu)) \ 11712 return IEMOP_RAISE_INVALID_OPCODE(); \ 11713 } while (0) 11714 #endif 11715 #endif 11677 11716 11678 11717 /** The instruction is not available in 64-bit mode, throw \#UD if we're in … … 14948 14987 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 14949 14988 } 14989 14990 14991 /** 14992 * Interface for HM and EM to emulate the VMLOAD instruction. 14993 * 14994 * @returns Strict VBox status code. 14995 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 14996 * @param cbInstr The instruction length in bytes. 14997 * @thread EMT(pVCpu) 14998 */ 14999 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr) 15000 { 15001 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3); 15002 15003 iemInitExec(pVCpu, false /*fBypassHandlers*/); 15004 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload); 15005 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 15006 } 15007 15008 15009 /** 15010 * Interface for HM and EM to emulate the VMSAVE instruction. 15011 * 15012 * @returns Strict VBox status code. 15013 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 15014 * @param cbInstr The instruction length in bytes. 15015 * @thread EMT(pVCpu) 15016 */ 15017 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr) 15018 { 15019 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3); 15020 15021 iemInitExec(pVCpu, false /*fBypassHandlers*/); 15022 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave); 15023 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 15024 } 14950 15025 #endif /* VBOX_WITH_NESTED_HWVIRT */ 14951 15026 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r65913 r65933 5877 5877 #ifdef VBOX_WITH_NESTED_HWVIRT 5878 5878 /** 5879 * Implements 'VMLOAD'. 5880 */ 5881 IEM_CIMPL_DEF_0(iemCImpl_vmload) 5882 { 5883 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5884 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload); 5885 #ifndef IN_RC 5886 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD)) 5887 { 5888 Log(("vmload: Guest intercept -> VMexit\n")); 5889 HMNstGstSvmVmExit(pVCpu, SVM_EXIT_VMLOAD); 5890 return VINF_EM_RESCHEDULE; 5891 } 5892 #endif 5893 5894 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax; 5895 if (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK) 5896 { 5897 Log(("vmload: VMCB physaddr (%#RGp) not 4K aligned -> #GP(0)\n", GCPhysVmcb)); 5898 return iemRaiseGeneralProtectionFault0(pVCpu); 5899 } 5900 5901 void *pvVmcb; 5902 PGMPAGEMAPLOCK PgLockVmcb; 5903 VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_R, &pvVmcb, &PgLockVmcb); 5904 if (rcStrict == VINF_SUCCESS) 5905 { 5906 PCSVMVMCB pVmcb = (PCSVMVMCB)pvVmcb; 5907 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcb, FS, fs); 5908 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcb, GS, gs); 5909 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcb, TR, tr); 5910 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcb, LDTR, ldtr); 5911 5912 pCtx->msrKERNELGSBASE = pVmcb->guest.u64KernelGSBase; 5913 pCtx->msrSTAR = pVmcb->guest.u64STAR; 5914 pCtx->msrLSTAR = pVmcb->guest.u64LSTAR; 5915 pCtx->msrCSTAR = pVmcb->guest.u64CSTAR; 5916 pCtx->msrSFMASK = pVmcb->guest.u64SFMASK; 5917 5918 pCtx->SysEnter.cs = pVmcb->guest.u64SysEnterCS; 5919 pCtx->SysEnter.esp = pVmcb->guest.u64SysEnterESP; 5920 pCtx->SysEnter.eip = pVmcb->guest.u64SysEnterEIP; 5921 5922 iemMemPageUnmap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_R, pvVmcb, &PgLockVmcb); 5923 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5924 } 5925 return rcStrict; 5926 } 5927 5928 5929 /** 5930 * Implements 'VMSAVE'. 5931 */ 5932 IEM_CIMPL_DEF_0(iemCImpl_vmsave) 5933 { 5934 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5935 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmsave); 5936 #ifndef IN_RC 5937 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE)) 5938 { 5939 Log(("vmsave: Guest intercept -> VMexit\n")); 5940 HMNstGstSvmVmExit(pVCpu, SVM_EXIT_VMSAVE); 5941 return VINF_EM_RESCHEDULE; 5942 } 5943 #endif 5944 5945 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax; 5946 if (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK) 5947 { 5948 Log(("vmsave: VMCB physaddr (%#RGp) not 4K aligned -> #GP(0)\n", GCPhysVmcb)); 5949 return iemRaiseGeneralProtectionFault0(pVCpu); 5950 } 5951 5952 void *pvVmcb; 5953 PGMPAGEMAPLOCK PgLockVmcb; 5954 VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_RW, &pvVmcb, &PgLockVmcb); 5955 if (rcStrict == VINF_SUCCESS) 5956 { 5957 PSVMVMCB pVmcb = (PSVMVMCB)pvVmcb; 5958 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, FS, fs); 5959 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, GS, gs); 5960 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, TR, tr); 5961 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, LDTR, ldtr); 5962 5963 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE; 5964 pVmcb->guest.u64STAR = pCtx->msrSTAR; 5965 pVmcb->guest.u64LSTAR = pCtx->msrLSTAR; 5966 pVmcb->guest.u64CSTAR = pCtx->msrCSTAR; 5967 pVmcb->guest.u64SFMASK = pCtx->msrSFMASK; 5968 5969 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs; 5970 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp; 5971 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip; 5972 5973 iemMemPageUnmap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_R, pvVmcb, &PgLockVmcb); 5974 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5975 } 5976 return rcStrict; 5977 } 5978 5979 5980 /** 5879 5981 * Implements 'CLGI'. 5880 5982 */ … … 5882 5984 { 5883 5985 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5884 if (!(pCtx->msrEFER & MSR_K6_EFER_SVME)) 5885 { 5886 Log2(("clgi: EFER.SVME not enabled -> #UD\n")); 5887 return iemRaiseUndefinedOpcode(pVCpu); 5888 } 5889 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) 5890 { 5891 Log2(("clgi: Real or v8086 mode -> #UD\n")); 5892 return iemRaiseUndefinedOpcode(pVCpu); 5893 } 5894 if (pVCpu->iem.s.uCpl != 0) 5895 { 5896 Log2(("clgi: CPL != 0 -> #GP(0)\n")); 5897 return iemRaiseGeneralProtectionFault0(pVCpu); 5898 } 5986 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, clgi); 5899 5987 #ifndef IN_RC 5900 5988 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI)) 5901 5989 { 5902 Log 2(("clgi: Guest intercept -> VMexit\n"));5990 Log(("clgi: Guest intercept -> VMexit\n")); 5903 5991 HMNstGstSvmVmExit(pVCpu, SVM_EXIT_CLGI); 5904 5992 return VINF_EM_RESCHEDULE; … … 5918 6006 { 5919 6007 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5920 if (!(pCtx->msrEFER & MSR_K6_EFER_SVME)) 5921 { 5922 Log2(("stgi: EFER.SVME not enabled -> #UD\n")); 5923 return iemRaiseUndefinedOpcode(pVCpu); 5924 } 5925 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) 5926 { 5927 Log2(("stgi: Real or v8086 mode -> #UD\n")); 5928 return iemRaiseUndefinedOpcode(pVCpu); 5929 } 5930 if (pVCpu->iem.s.uCpl != 0) 5931 { 5932 Log2(("stgi: CPL != 0 -> #GP(0)\n")); 5933 return iemRaiseGeneralProtectionFault0(pVCpu); 5934 } 6008 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, stgi); 5935 6009 #ifndef IN_RC 5936 6010 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI)) … … 5943 6017 5944 6018 pCtx->hwvirt.svm.fGif = 1; 6019 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6020 return VINF_SUCCESS; 6021 } 6022 6023 6024 /** 6025 * Implements 'INVLPGA'. 6026 */ 6027 IEM_CIMPL_DEF_0(iemCImpl_invlpga) 6028 { 6029 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6030 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga); 6031 #ifndef IN_RC 6032 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA)) 6033 { 6034 Log2(("invlpga: Guest intercept -> VMexit\n")); 6035 HMNstGstSvmVmExit(pVCpu, SVM_EXIT_INVLPGA); 6036 return VINF_EM_RESCHEDULE; 6037 } 6038 #endif 6039 6040 RTGCPTR const GCPtrPage = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax; 6041 /** @todo PGM needs virtual ASID support. */ 6042 #if 0 6043 uint32_t const uAsid = pCtx->ecx; 6044 #endif 6045 PGMInvalidatePage(pVCpu, GCPtrPage); 5945 6046 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5946 6047 return VINF_SUCCESS; -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
r65904 r65933 445 445 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmmcall); 446 446 447 #ifdef VBOX_WITH_NESTED_HWVIRT 448 /** Opcode 0x0f 0x01 0xda. */ 449 FNIEMOP_DEF(iemOp_Grp7_Amd_vmload) 450 { 451 IEMOP_MNEMONIC(vmload, "vmload"); 452 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmload); 453 } 454 455 456 /** Opcode 0x0f 0x01 0xdb. */ 457 FNIEMOP_DEF(iemOp_Grp7_Amd_vmsave) 458 { 459 IEMOP_MNEMONIC(vmsave, "vmsave"); 460 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmsave); 461 } 462 463 464 /** Opcode 0x0f 0x01 0xdc. */ 465 FNIEMOP_DEF(iemOp_Grp7_Amd_stgi) 466 { 467 IEMOP_MNEMONIC(stgi, "stgi"); 468 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stgi); 469 } 470 471 472 /** Opcode 0x0f 0x01 0xdd. */ 473 FNIEMOP_DEF(iemOp_Grp7_Amd_clgi) 474 { 475 IEMOP_MNEMONIC(clgi, "clgi"); 476 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clgi); 477 } 478 479 480 /** Opcode 0x0f 0x01 0xdf. */ 481 FNIEMOP_DEF(iemOp_Grp7_Amd_invlpga) 482 { 483 IEMOP_MNEMONIC(invlpga, "invlpga"); 484 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_invlpga); 485 } 486 #else 447 487 /** Opcode 0x0f 0x01 0xda. */ 448 488 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmload); … … 451 491 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmsave); 452 492 453 #ifdef VBOX_WITH_NESTED_HWVIRT454 /** Opcode 0x0f 0x01 0xdc. */455 FNIEMOP_DEF(iemOp_Grp7_Amd_stgi)456 {457 IEMOP_MNEMONIC(stgi, "stgi");458 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stgi);459 }460 461 /** Opcode 0x0f 0x01 0xdd. */462 FNIEMOP_DEF(iemOp_Grp7_Amd_clgi)463 {464 IEMOP_MNEMONIC(clgi, "clgi");465 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clgi);466 }467 #else468 493 /** Opcode 0x0f 0x01 0xdc. */ 469 494 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_stgi); … … 471 496 /** Opcode 0x0f 0x01 0xdd. */ 472 497 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_clgi); 498 499 /** Opcode 0x0f 0x01 0xdf. */ 500 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_invlpga); 473 501 #endif /* VBOX_WITH_NESTED_HWVIRT */ 474 502 475 503 /** Opcode 0x0f 0x01 0xde. */ 476 504 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_skinit); 477 478 /** Opcode 0x0f 0x01 0xdf. */479 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_invlpga);480 505 481 506 /** Opcode 0x0f 0x01 /4. */ -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r65912 r65933 62 62 * switch to a "static DECLCALLBACK(int)". */ 63 63 #define HMSVM_EXIT_DECL static int 64 65 /** @name Segment attribute conversion between CPU and AMD-V VMCB format.66 *67 * The CPU format of the segment attribute is described in X86DESCATTRBITS68 * which is 16-bits (i.e. includes 4 bits of the segment limit).69 *70 * The AMD-V VMCB format the segment attribute is compact 12-bits (strictly71 * only the attribute bits and nothing else). Upper 4-bits are unused.72 *73 * @{ */74 #define HMSVM_CPU_2_VMCB_SEG_ATTR(a) ( ((a) & 0xff) | (((a) & 0xf000) >> 4) )75 #define HMSVM_VMCB_2_CPU_SEG_ATTR(a) ( ((a) & 0xff) | (((a) & 0x0f00) << 4) )76 /** @} */77 78 /** @name Macros for loading, storing segment registers to/from the VMCB.79 * @{ */80 #define HMSVM_LOAD_SEG_REG(REG, reg) \81 do \82 { \83 Assert(pCtx->reg.fFlags & CPUMSELREG_FLAGS_VALID); \84 Assert(pCtx->reg.ValidSel == pCtx->reg.Sel); \85 pVmcb->guest.REG.u16Sel = pCtx->reg.Sel; \86 pVmcb->guest.REG.u32Limit = pCtx->reg.u32Limit; \87 pVmcb->guest.REG.u64Base = pCtx->reg.u64Base; \88 pVmcb->guest.REG.u16Attr = HMSVM_CPU_2_VMCB_SEG_ATTR(pCtx->reg.Attr.u); \89 } while (0)90 91 #define HMSVM_SAVE_SEG_REG(REG, reg) \92 do \93 { \94 pMixedCtx->reg.Sel = pVmcb->guest.REG.u16Sel; \95 pMixedCtx->reg.ValidSel = pVmcb->guest.REG.u16Sel; \96 pMixedCtx->reg.fFlags = CPUMSELREG_FLAGS_VALID; \97 pMixedCtx->reg.u32Limit = pVmcb->guest.REG.u32Limit; \98 pMixedCtx->reg.u64Base = pVmcb->guest.REG.u64Base; \99 pMixedCtx->reg.Attr.u = HMSVM_VMCB_2_CPU_SEG_ATTR(pVmcb->guest.REG.u16Attr); \100 } while (0)101 /** @} */102 64 103 65 /** Macro for checking and returning from the using function for … … 311 273 static FNSVMEXITHANDLER hmR0SvmExitClgi; 312 274 static FNSVMEXITHANDLER hmR0SvmExitStgi; 275 static FNSVMEXITHANDLER hmR0SvmExitVmload; 276 static FNSVMEXITHANDLER hmR0SvmExitVmsave; 313 277 #endif 314 278 /** @} */ … … 1336 1300 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS)) 1337 1301 { 1338 HMSVM_ LOAD_SEG_REG(CS, cs);1339 HMSVM_ LOAD_SEG_REG(SS, ss);1340 HMSVM_ LOAD_SEG_REG(DS, ds);1341 HMSVM_ LOAD_SEG_REG(ES, es);1342 HMSVM_ LOAD_SEG_REG(FS, fs);1343 HMSVM_ LOAD_SEG_REG(GS, gs);1302 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, CS, cs); 1303 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, SS, ss); 1304 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, DS, ds); 1305 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, ES, es); 1306 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, FS, fs); 1307 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, GS, gs); 1344 1308 1345 1309 pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl; … … 1351 1315 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR)) 1352 1316 { 1353 HMSVM_ LOAD_SEG_REG(TR, tr);1317 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, TR, tr); 1354 1318 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR); 1355 1319 } … … 1358 1322 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR)) 1359 1323 { 1360 HMSVM_ LOAD_SEG_REG(LDTR, ldtr);1324 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, LDTR, ldtr); 1361 1325 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR); 1362 1326 } … … 1986 1950 * Guest segment registers (includes FS, GS base MSRs for 64-bit guests). 1987 1951 */ 1988 HMSVM_S AVE_SEG_REG(CS, cs);1989 HMSVM_S AVE_SEG_REG(SS, ss);1990 HMSVM_S AVE_SEG_REG(DS, ds);1991 HMSVM_S AVE_SEG_REG(ES, es);1992 HMSVM_S AVE_SEG_REG(FS, fs);1993 HMSVM_S AVE_SEG_REG(GS, gs);1952 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, CS, cs); 1953 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, SS, ss); 1954 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, DS, ds); 1955 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, ES, es); 1956 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, FS, fs); 1957 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, GS, gs); 1994 1958 1995 1959 /* … … 2041 2005 * ASSUME that it's normally correct and that we're in 32-bit or 64-bit mode. 2042 2006 */ 2043 HMSVM_S AVE_SEG_REG(TR, tr);2007 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, TR, tr); 2044 2008 if (pMixedCtx->tr.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY) 2045 2009 { … … 2054 2018 * Guest Descriptor-Table registers. 2055 2019 */ 2056 HMSVM_S AVE_SEG_REG(LDTR, ldtr);2020 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, LDTR, ldtr); 2057 2021 pMixedCtx->gdtr.cbGdt = pVmcb->guest.GDTR.u32Limit; 2058 2022 pMixedCtx->gdtr.pGdt = pVmcb->guest.GDTR.u64Base; … … 3688 3652 3689 3653 #ifdef VBOX_WITH_NESTED_HWVIRT 3690 case SVM_EXIT_CLGI: return hmR0SvmExitClgi(pVCpu, pCtx, pSvmTransient); 3691 case SVM_EXIT_STGI: return hmR0SvmExitStgi(pVCpu, pCtx, pSvmTransient); 3654 case SVM_EXIT_CLGI: return hmR0SvmExitClgi(pVCpu, pCtx, pSvmTransient); 3655 case SVM_EXIT_STGI: return hmR0SvmExitStgi(pVCpu, pCtx, pSvmTransient); 3656 case SVM_EXIT_VMLOAD: return hmR0SvmExitVmload(pVCpu, pCtx, pSvmTransient); 3657 case SVM_EXIT_VMSAVE: return hmR0SvmExitVmsave(pVCpu, pCtx, pSvmTransient); 3692 3658 #else 3693 3659 case SVM_EXIT_CLGI: 3694 3660 case SVM_EXIT_STGI: 3661 case SVM_EXIT_VMLOAD: 3662 case SVM_EXIT_VMSAVE: 3695 3663 #endif 3696 3664 case SVM_EXIT_INVLPGA: 3697 3665 case SVM_EXIT_RSM: 3698 3666 case SVM_EXIT_VMRUN: 3699 case SVM_EXIT_VMLOAD:3700 case SVM_EXIT_VMSAVE:3701 3667 case SVM_EXIT_SKINIT: 3702 3668 return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient); … … 5697 5663 #ifdef VBOX_WITH_NESTED_HWVIRT 5698 5664 /** 5699 * \#VMEXIT handler for RDPMC (SVM_EXIT_CLGI). Conditional 5700 * \#VMEXIT. 5665 * \#VMEXIT handler for CLGI (SVM_EXIT_CLGI). Conditional \#VMEXIT. 5701 5666 */ 5702 5667 HMSVM_EXIT_DECL hmR0SvmExitClgi(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 5703 5668 { 5704 5669 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 5705 PVM pVM = pVCpu->CTX_SUFF(pVM); 5706 if (pVM->cpum.ro.GuestFeatures.fSvm) 5707 { 5708 /** @todo Stat. */ 5709 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClgi); */ 5710 VBOXSTRICTRC rcStrict = IEMExecDecodedClgi(pVCpu, 3); 5711 return VBOXSTRICTRC_VAL(rcStrict); 5712 } 5713 return hmR0SvmExitXcptUD(pVCpu, pCtx, pSvmTransient); 5714 } 5715 5716 5717 /** 5718 * \#VMEXIT handler for RDPMC (SVM_EXIT_STGI). Conditional 5719 * \#VMEXIT. 5670 /** @todo Stat. */ 5671 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClgi); */ 5672 VBOXSTRICTRC rcStrict = IEMExecDecodedClgi(pVCpu, 3); 5673 return VBOXSTRICTRC_VAL(rcStrict); 5674 } 5675 5676 5677 /** 5678 * \#VMEXIT handler for STGI (SVM_EXIT_STGI). Conditional \#VMEXIT. 5720 5679 */ 5721 5680 HMSVM_EXIT_DECL hmR0SvmExitStgi(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 5722 5681 { 5723 5682 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 5724 PVM pVM = pVCpu->CTX_SUFF(pVM); 5725 if (pVM->cpum.ro.GuestFeatures.fSvm) 5726 { 5727 /** @todo Stat. */ 5728 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitStgi); */ 5729 VBOXSTRICTRC rcStrict = IEMExecDecodedStgi(pVCpu, 3); 5730 return VBOXSTRICTRC_VAL(rcStrict); 5731 } 5732 return hmR0SvmExitXcptUD(pVCpu, pCtx, pSvmTransient); 5683 /** @todo Stat. */ 5684 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitStgi); */ 5685 VBOXSTRICTRC rcStrict = IEMExecDecodedStgi(pVCpu, 3); 5686 return VBOXSTRICTRC_VAL(rcStrict); 5687 } 5688 5689 5690 /** 5691 * \#VMEXIT handler for VMLOAD (SVM_EXIT_VMLOAD). Conditional \#VMEXIT. 5692 */ 5693 HMSVM_EXIT_DECL hmR0SvmExitVmload(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 5694 { 5695 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 5696 /** @todo Stat. */ 5697 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmload); */ 5698 VBOXSTRICTRC rcStrict = IEMExecDecodedVmload(pVCpu, 3); 5699 return VBOXSTRICTRC_VAL(rcStrict); 5700 } 5701 5702 5703 /** 5704 * \#VMEXIT handler for VMSAVE (SVM_EXIT_VMSAVE). Conditional \#VMEXIT. 5705 */ 5706 HMSVM_EXIT_DECL hmR0SvmExitVmsave(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 5707 { 5708 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 5709 /** @todo Stat. */ 5710 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmsave); */ 5711 VBOXSTRICTRC rcStrict = IEMExecDecodedVmsave(pVCpu, 3); 5712 return VBOXSTRICTRC_VAL(rcStrict); 5733 5713 } 5734 5714 #endif /* VBOX_WITH_NESTED_HWVIRT */
Note:
See TracChangeset
for help on using the changeset viewer.

