Changeset 98980 in vbox
- Timestamp:
- Mar 15, 2023 11:46:48 AM (19 months ago)
- Location:
- trunk
- Files:
-
- 1 added
- 8 edited
- 1 copied
-
include/VBox/vmm/gim.h (modified) (1 diff)
-
include/VBox/vmm/iem-armv8.h (added)
-
include/VBox/vmm/iem-x86-amd64.h (copied) (copied from trunk/include/VBox/vmm/iem.h ) (5 diffs)
-
include/VBox/vmm/iem.h (modified) (4 diffs)
-
src/VBox/VMM/Makefile.kmk (modified) (1 diff)
-
src/VBox/VMM/VMMR3/EM.cpp (modified) (26 diffs)
-
src/VBox/VMM/VMMR3/EMR3Nem.cpp (modified) (7 diffs)
-
src/VBox/VMM/VMMR3/IEMR3.cpp (modified) (7 diffs)
-
src/VBox/VMM/include/GIMHvInternal.h (modified) (1 diff)
-
src/VBox/VMM/include/GIMKvmInternal.h (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/gim.h
r98103 r98980 207 207 VMM_INT_DECL(VBOXSTRICTRC) GIMXcptUD(PVMCPUCC pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis, uint8_t *pcbInstr); 208 208 VMM_INT_DECL(bool) GIMShouldTrapXcptUD(PVMCPUCC pVCpu); 209 #if !defined(VBOX_VMM_TARGET_ARMV8) 209 210 VMM_INT_DECL(VBOXSTRICTRC) GIMReadMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue); 210 211 VMM_INT_DECL(VBOXSTRICTRC) GIMWriteMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue); 212 #endif 211 213 VMM_INT_DECL(int) GIMQueryHypercallOpcodeBytes(PVM pVM, void *pvBuf, size_t cbBuf, 212 214 size_t *pcbWritten, uint16_t *puDisOpcode); -
trunk/include/VBox/vmm/iem-x86-amd64.h
r98959 r98980 34 34 */ 35 35 36 #ifndef VBOX_INCLUDED_vmm_iem_ h37 #define VBOX_INCLUDED_vmm_iem_ h36 #ifndef VBOX_INCLUDED_vmm_iem_x86_amd64_h 37 #define VBOX_INCLUDED_vmm_iem_x86_amd64_h 38 38 #ifndef RT_WITHOUT_PRAGMA_ONCE 39 39 # pragma once 40 40 #endif 41 41 42 #include <VBox/types.h>43 #include <VBox/vmm/trpm.h>44 42 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 45 43 # include <VBox/vmm/hm_vmx.h> 46 44 #endif 47 #include <iprt/assert.h>48 45 49 46 50 47 RT_C_DECLS_BEGIN 51 48 52 /** @defgroup grp_iem The Interpreted Execution Manager API. 53 * @ingroup grp_vmm 54 * @{ 55 */ 56 57 /** @name IEMXCPTRAISEINFO_XXX - Extra info. on a recursive exception situation. 58 * 59 * This is primarily used by HM for working around a PGM limitation (see 60 * @bugref{6607}) and special NMI/IRET handling. In the future, this may be 61 * used for diagnostics. 62 * 63 * @{ 64 */ 65 typedef uint32_t IEMXCPTRAISEINFO; 66 /** Pointer to a IEMXCPTINFO type. */ 67 typedef IEMXCPTRAISEINFO *PIEMXCPTRAISEINFO; 68 /** No addition info. available. */ 69 #define IEMXCPTRAISEINFO_NONE RT_BIT_32(0) 70 /** Delivery of a \#AC caused another \#AC. */ 71 #define IEMXCPTRAISEINFO_AC_AC RT_BIT_32(1) 72 /** Delivery of a \#PF caused another \#PF. */ 73 #define IEMXCPTRAISEINFO_PF_PF RT_BIT_32(2) 74 /** Delivery of a \#PF caused some contributory exception. */ 75 #define IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT RT_BIT_32(3) 76 /** Delivery of an external interrupt caused an exception. */ 77 #define IEMXCPTRAISEINFO_EXT_INT_XCPT RT_BIT_32(4) 78 /** Delivery of an external interrupt caused an \#PF. */ 79 #define IEMXCPTRAISEINFO_EXT_INT_PF RT_BIT_32(5) 80 /** Delivery of a software interrupt caused an exception. */ 81 #define IEMXCPTRAISEINFO_SOFT_INT_XCPT RT_BIT_32(6) 82 /** Delivery of an NMI caused an exception. */ 83 #define IEMXCPTRAISEINFO_NMI_XCPT RT_BIT_32(7) 84 /** Delivery of an NMI caused a \#PF. */ 85 #define IEMXCPTRAISEINFO_NMI_PF RT_BIT_32(8) 86 /** Can re-execute the instruction at CS:RIP. */ 87 #define IEMXCPTRAISEINFO_CAN_REEXEC_INSTR RT_BIT_32(9) 88 /** @} */ 89 90 91 /** @name IEMXCPTRAISE_XXX - Ways to handle a recursive exception condition. 49 /** @addtogroup grp_iem 92 50 * @{ */ 93 typedef enum IEMXCPTRAISE94 {95 /** Raise the current (second) exception. */96 IEMXCPTRAISE_CURRENT_XCPT = 0,97 /** Re-raise the previous (first) event (for HM, unused by IEM). */98 IEMXCPTRAISE_PREV_EVENT,99 /** Re-execute instruction at CS:RIP (for HM, unused by IEM). */100 IEMXCPTRAISE_REEXEC_INSTR,101 /** Raise a \#DF exception. */102 IEMXCPTRAISE_DOUBLE_FAULT,103 /** Raise a triple fault. */104 IEMXCPTRAISE_TRIPLE_FAULT,105 /** Cause a CPU hang. */106 IEMXCPTRAISE_CPU_HANG,107 /** Invalid sequence of events. */108 IEMXCPTRAISE_INVALID = 0x7fffffff109 } IEMXCPTRAISE;110 /** Pointer to a IEMXCPTRAISE type. */111 typedef IEMXCPTRAISE *PIEMXCPTRAISE;112 /** @} */113 114 115 /** @name Operand or addressing mode.116 * @{ */117 typedef uint8_t IEMMODE;118 #define IEMMODE_16BIT 0119 #define IEMMODE_32BIT 1120 #define IEMMODE_64BIT 2121 /** @} */122 123 124 /** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.125 * @{ */126 /** CPU exception. */127 #define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)128 /** External interrupt (from PIC, APIC, whatever). */129 #define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)130 /** Software interrupt (int or into, not bound).131 * Returns to the following instruction */132 #define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)133 /** Takes an error code. */134 #define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)135 /** Takes a CR2. */136 #define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)137 /** Generated by the breakpoint instruction. */138 #define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)139 /** Generated by a DRx instruction breakpoint and RF should be cleared. */140 #define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)141 /** Generated by the icebp instruction. */142 #define IEM_XCPT_FLAGS_ICEBP_INSTR RT_BIT_32(7)143 /** Generated by the overflow instruction. */144 #define IEM_XCPT_FLAGS_OF_INSTR RT_BIT_32(8)145 /** @} */146 51 147 52 … … 180 85 * dicates the behaviour here. */ 181 86 #define IEMTARGETCPU_CURRENT UINT32_C(9) 182 /** @} */183 184 185 /** @name IEM status codes.186 *187 * Not quite sure how this will play out in the end, just aliasing safe status188 * codes for now.189 *190 * @{ */191 #define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE192 87 /** @} */ 193 88 … … 288 183 #endif 289 184 290 VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu);291 VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten);292 VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,293 const void *pvOpcodeBytes, size_t cbOpcodeBytes);294 VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten);295 VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,296 const void *pvOpcodeBytes, size_t cbOpcodeBytes);297 VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu);298 VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions);299 /** Statistics returned by IEMExecForExits. */300 typedef struct IEMEXECFOREXITSTATS301 {302 uint32_t cInstructions;303 uint32_t cExits;304 uint32_t cMaxExitDistance;305 uint32_t cReserved;306 } IEMEXECFOREXITSTATS;307 /** Pointer to statistics returned by IEMExecForExits. */308 typedef IEMEXECFOREXITSTATS *PIEMEXECFOREXITSTATS;309 VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,310 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats);311 VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu);312 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,313 uint8_t cbInstr);314 315 VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp);316 VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp);317 318 VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu);319 VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr);320 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu);321 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller);322 VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr,323 uint64_t *puCr2);324 VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,325 uint8_t uCurVector, PIEMXCPTRAISEINFO pXcptRaiseInfo);326 327 185 /** @name Given Instruction Interpreters 328 186 * @{ */ … … 399 257 /** @} */ 400 258 401 /** @defgroup grp_iem_r 3 The IEM Host Context Ring-3API.259 /** @defgroup grp_iem_r0 The IEM Host Context Ring-0 API. 402 260 * @{ 403 261 */ … … 405 263 /** @} */ 406 264 407 408 /** @defgroup grp_iem_r3 The IEM Host Context Ring-3 API.409 * @{410 */411 VMMR3DECL(int) IEMR3Init(PVM pVM);412 VMMR3DECL(int) IEMR3Term(PVM pVM);413 VMMR3DECL(void) IEMR3Relocate(PVM pVM);414 VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict);415 265 /** @} */ 416 266 417 /** @} */418 419 267 RT_C_DECLS_END 420 268 421 #endif /* !VBOX_INCLUDED_vmm_iem_ h */422 269 #endif /* !VBOX_INCLUDED_vmm_iem_x86_amd64_h */ 270 -
trunk/include/VBox/vmm/iem.h
r98103 r98980 42 42 #include <VBox/types.h> 43 43 #include <VBox/vmm/trpm.h> 44 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 45 # include <VBox/vmm/hm_vmx.h> 44 #include <iprt/assert.h> 45 46 #ifdef VBOX_VMM_TARGET_ARMV8 47 # include <VBox/vmm/iem-armv8.h> 48 #else 49 # include <VBox/vmm/iem-x86-amd64.h> 46 50 #endif 47 #include <iprt/assert.h>48 51 49 52 … … 146 149 147 150 148 /** @name IEMTARGETCPU_XXX - IEM target CPU specification.149 *150 * This is a gross simpliciation of CPUMMICROARCH for dealing with really old151 * CPUs which didn't have much in the way of hinting at supported instructions152 * and features. This slowly changes with the introduction of CPUID with the153 * Intel Pentium.154 *155 * @{156 */157 /** The dynamic target CPU mode is for getting thru the BIOS and then use158 * the debugger or modifying instruction behaviour (e.g. HLT) to switch to a159 * different target CPU. */160 #define IEMTARGETCPU_DYNAMIC UINT32_C(0)161 /** Intel 8086/8088. */162 #define IEMTARGETCPU_8086 UINT32_C(1)163 /** NEC V20/V30.164 * @remarks must be between 8086 and 80186. */165 #define IEMTARGETCPU_V20 UINT32_C(2)166 /** Intel 80186/80188. */167 #define IEMTARGETCPU_186 UINT32_C(3)168 /** Intel 80286. */169 #define IEMTARGETCPU_286 UINT32_C(4)170 /** Intel 80386. */171 #define IEMTARGETCPU_386 UINT32_C(5)172 /** Intel 80486. */173 #define IEMTARGETCPU_486 UINT32_C(6)174 /** Intel Pentium . */175 #define IEMTARGETCPU_PENTIUM UINT32_C(7)176 /** Intel PentiumPro. */177 #define IEMTARGETCPU_PPRO UINT32_C(8)178 /** A reasonably current CPU, probably newer than the pentium pro when it comes179 * to the feature set and behaviour. Generally the CPUID info and CPU vendor180 * dicates the behaviour here. */181 #define IEMTARGETCPU_CURRENT UINT32_C(9)182 /** @} */183 184 185 151 /** @name IEM status codes. 186 152 * … … 192 158 /** @} */ 193 159 194 195 /** The CPUMCTX_EXTRN_XXX mask required to be cleared when interpreting anything.196 * IEM will ASSUME the caller of IEM APIs has ensured these are already present. */197 #define IEM_CPUMCTX_EXTRN_MUST_MASK ( CPUMCTX_EXTRN_GPRS_MASK \198 | CPUMCTX_EXTRN_RIP \199 | CPUMCTX_EXTRN_RFLAGS \200 | CPUMCTX_EXTRN_SS \201 | CPUMCTX_EXTRN_CS \202 | CPUMCTX_EXTRN_CR0 \203 | CPUMCTX_EXTRN_CR3 \204 | CPUMCTX_EXTRN_CR4 \205 | CPUMCTX_EXTRN_APIC_TPR \206 | CPUMCTX_EXTRN_EFER \207 | CPUMCTX_EXTRN_DR7 )208 /** The CPUMCTX_EXTRN_XXX mask needed when injecting an exception/interrupt.209 * IEM will import missing bits, callers are encouraged to make these registers210 * available prior to injection calls if fetching state anyway. */211 #define IEM_CPUMCTX_EXTRN_XCPT_MASK ( IEM_CPUMCTX_EXTRN_MUST_MASK \212 | CPUMCTX_EXTRN_CR2 \213 | CPUMCTX_EXTRN_SREG_MASK \214 | CPUMCTX_EXTRN_TABLE_MASK )215 /** The CPUMCTX_EXTRN_XXX mask required to be cleared when calling any216 * IEMExecDecoded API not using memory. IEM will ASSUME the caller of IEM217 * APIs has ensured these are already present.218 * @note ASSUMES execution engine has checked for instruction breakpoints219 * during decoding. */220 #define IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK ( CPUMCTX_EXTRN_RIP \221 | CPUMCTX_EXTRN_RFLAGS \222 | CPUMCTX_EXTRN_SS /* for CPL */ \223 | CPUMCTX_EXTRN_CS /* for mode */ \224 | CPUMCTX_EXTRN_CR0 /* for mode */ \225 | CPUMCTX_EXTRN_EFER /* for mode */ )226 /** The CPUMCTX_EXTRN_XXX mask required to be cleared when calling any227 * IEMExecDecoded API using memory. IEM will ASSUME the caller of IEM228 * APIs has ensured these are already present.229 * @note ASSUMES execution engine has checked for instruction breakpoints230 * during decoding. */231 #define IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK ( IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK \232 | CPUMCTX_EXTRN_CR3 /* for page tables */ \233 | CPUMCTX_EXTRN_CR4 /* for mode paging mode */ \234 | CPUMCTX_EXTRN_DR7 /* for memory breakpoints */ )235 236 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX237 /** The CPUMCTX_EXTRN_XXX mask needed when calling IEMExecDecodedVmlaunchVmresume().238 * IEM will ASSUME the caller has ensured these are already present. */239 # define IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK ( IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK \240 | CPUMCTX_EXTRN_CR2 \241 | CPUMCTX_EXTRN_HWVIRT )242 243 /** The CPUMCTX_EXTRN_XXX mask that the IEM VM-exit code will import on-demand when244 * needed, primarily because there are several IEM VM-exit interface functions and245 * some of which may not cause a VM-exit at all.246 *247 * This is currently unused, but keeping it here in case we can get away a bit more248 * fine-grained state handling.249 *250 * @note Update HM_CHANGED_VMX_VMEXIT_MASK if something here changes. */251 # define IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK ( CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 \252 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6 \253 | CPUMCTX_EXTRN_EFER \254 | CPUMCTX_EXTRN_SYSENTER_MSRS \255 | CPUMCTX_EXTRN_OTHER_MSRS /* for PAT MSR */ \256 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS \257 | CPUMCTX_EXTRN_SREG_MASK \258 | CPUMCTX_EXTRN_TR \259 | CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_IDTR \260 | CPUMCTX_EXTRN_HWVIRT )261 #endif262 263 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM264 /** The CPUMCTX_EXTRN_XXX mask needed when calling IEMExecSvmVmexit().265 * IEM will ASSUME the caller has ensured these are already present. */266 # define IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK ( CPUMCTX_EXTRN_RSP \267 | CPUMCTX_EXTRN_RAX \268 | CPUMCTX_EXTRN_RIP \269 | CPUMCTX_EXTRN_RFLAGS \270 | CPUMCTX_EXTRN_CS \271 | CPUMCTX_EXTRN_SS \272 | CPUMCTX_EXTRN_DS \273 | CPUMCTX_EXTRN_ES \274 | CPUMCTX_EXTRN_GDTR \275 | CPUMCTX_EXTRN_IDTR \276 | CPUMCTX_EXTRN_CR_MASK \277 | CPUMCTX_EXTRN_EFER \278 | CPUMCTX_EXTRN_DR6 \279 | CPUMCTX_EXTRN_DR7 \280 | CPUMCTX_EXTRN_OTHER_MSRS \281 | CPUMCTX_EXTRN_HWVIRT \282 | CPUMCTX_EXTRN_APIC_TPR \283 | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ)284 285 /** The CPUMCTX_EXTRN_XXX mask needed when calling IEMExecDecodedVmrun().286 * IEM will ASSUME the caller has ensured these are already present. */287 # define IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK288 #endif289 160 290 161 VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu); … … 325 196 uint8_t uCurVector, PIEMXCPTRAISEINFO pXcptRaiseInfo); 326 197 327 /** @name Given Instruction Interpreters328 * @{ */329 VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,330 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked);331 VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,332 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked);333 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg);334 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg);335 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg);336 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg);337 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg);338 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg);339 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr);340 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst);341 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr);342 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr);343 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr);344 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage);345 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,346 uint64_t uType);347 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr);348 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr);349 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr);350 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr);351 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr);352 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr);353 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr);354 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr);355 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr);356 357 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM358 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPUCC pVCpu, uint8_t cbInstr);359 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPUCC pVCpu, uint8_t cbInstr);360 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPUCC pVCpu, uint8_t cbInstr);361 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPUCC pVCpu, uint8_t cbInstr);362 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPUCC pVCpu, uint8_t cbInstr);363 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPUCC pVCpu, uint8_t cbInstr);364 VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);365 #endif366 367 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX368 VMM_INT_DECL(void) IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst);369 VMM_INT_DECL(void) IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val);370 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Val, bool fWrite);371 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPUCC pVCpu);372 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPUCC pVCpu);373 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending);374 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcpt(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo);375 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcptNmi(PVMCPUCC pVCpu);376 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTripleFault(PVMCPUCC pVCpu);377 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPUCC pVCpu, uint8_t uVector);378 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);379 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr);380 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTrapLike(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);381 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTaskSwitch(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo);382 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicAccess(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo);383 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t uExitQual);384 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);385 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);386 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);387 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);388 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);389 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPUCC pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId);390 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);391 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPUCC pVCpu, uint8_t cbInstr);392 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);393 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT394 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvept(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);395 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptViolation(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo);396 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptMisconfig(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr, PCVMXVEXITEVENTINFO pExitEventInfo);397 # endif398 #endif399 /** @} */400 401 /** @defgroup grp_iem_r3 The IEM Host Context Ring-3 API.402 * @{403 */404 VMMR0_INT_DECL(int) IEMR0InitVM(PGVM pGVM);405 /** @} */406 407 408 198 /** @defgroup grp_iem_r3 The IEM Host Context Ring-3 API. 409 199 * @{ -
trunk/src/VBox/VMM/Makefile.kmk
r98970 r98980 370 370 VMMR3/EM.cpp \ 371 371 VMMR3/EMR3Dbg.cpp \ 372 VMMR3/EMHM.cpp \373 372 VMMR3/EMR3Nem.cpp \ 374 373 VMMR3/GCM.cpp \ 375 374 VMMR3/GIM.cpp \ 376 VMMR3/GIMHv.cpp \377 VMMR3/GIMKvm.cpp \378 VMMR3/GIMMinimal.cpp \379 375 VMMR3/IEMR3.cpp \ 380 376 VMMR3/IOM.cpp \ -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r98103 r98980 695 695 696 696 697 #if !defined(VBOX_VMM_TARGET_ARMV8) 697 698 /** 698 699 * Handle pending ring-3 I/O port write. … … 838 839 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, emR3ExecuteSplitLockInstructionRendezvous, pVCpu); 839 840 } 841 #endif /* VBOX_VMM_TARGET_ARMV8 */ 840 842 841 843 … … 1040 1042 static int emR3RemStep(PVM pVM, PVMCPU pVCpu) 1041 1043 { 1044 #if defined(VBOX_VMM_TARGET_ARMV8) 1045 Log3(("emR3RemStep: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu))); 1046 #else 1042 1047 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu))); 1048 #endif 1043 1049 1044 1050 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM); 1045 1051 1052 #if defined(VBOX_VMM_TARGET_ARMV8) 1053 Log3(("emR3RemStep: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu))); 1054 #else 1046 1055 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu))); 1056 #endif 1047 1057 return rc; 1048 1058 } … … 1068 1078 { 1069 1079 #ifdef LOG_ENABLED 1080 # if defined(VBOX_VMM_TARGET_ARMV8) 1081 Log3(("EM: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu))); 1082 # else 1070 1083 uint32_t cpl = CPUMGetGuestCPL(pVCpu); 1071 1084 … … 1074 1087 else 1075 1088 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u)); 1089 # endif 1076 1090 #endif 1077 1091 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a); … … 1209 1223 } 1210 1224 Log(("Single step END:\n")); 1225 #if defined(VBOX_VMM_TARGET_ARMV8) 1226 AssertReleaseFailed(); 1227 #else 1211 1228 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF); 1229 #endif 1212 1230 pVCpu->em.s.enmState = enmOldState; 1213 1231 return VINF_EM_RESCHEDULE; … … 1230 1248 static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone) 1231 1249 { 1250 #if defined(VBOX_VMM_TARGET_ARMV8) 1251 LogFlow(("emR3ExecuteIemThenRem: %RGv\n", CPUMGetGuestFlatPC(pVCpu))); 1252 #else 1232 1253 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu))); 1254 #endif 1233 1255 *pfFFDone = false; 1234 1256 … … 1335 1357 PDMCritSectBothFF(pVM, pVCpu); 1336 1358 1359 #if !defined(VBOX_VMM_TARGET_ARMV8) 1337 1360 /* Update CR3 (Nested Paging case for HM). */ 1338 1361 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) … … 1344 1367 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 1345 1368 } 1369 #endif 1346 1370 1347 1371 /* IEM has pending work (typically memory write after INS instruction). */ … … 1372 1396 1373 1397 1398 #if !defined(VBOX_VMM_TARGET_ARMV8) 1374 1399 /** 1375 1400 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit. … … 1457 1482 return VINF_NO_CHANGE; 1458 1483 } 1484 #endif 1459 1485 1460 1486 … … 1712 1738 TMR3TimerQueuesDo(pVM); 1713 1739 1740 #if !defined(VBOX_VMM_TARGET_ARMV8) 1714 1741 /* 1715 1742 * Pick up asynchronously posted interrupts into the APIC. … … 1749 1776 * delivered. */ 1750 1777 1751 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX1778 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1752 1779 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER)) 1753 1780 { … … 1788 1815 Assert(!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER)); 1789 1816 } 1790 # endif1817 # endif 1791 1818 1792 1819 /* … … 1821 1848 if (0) 1822 1849 { } 1823 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX1850 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1824 1851 /* 1825 1852 * VMX NMI-window VM-exit. … … 1842 1869 UPDATE_RC(); 1843 1870 } 1844 # endif1871 # endif 1845 1872 /* 1846 1873 * NMIs (take priority over external interrupts). … … 1849 1876 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx)) 1850 1877 { 1851 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX1878 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1852 1879 if ( fInVmxNonRootMode 1853 1880 && CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT)) … … 1858 1885 } 1859 1886 else 1860 # endif1861 # ifdef VBOX_WITH_NESTED_HWVIRT_SVM1887 # endif 1888 # ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1862 1889 if ( fInSvmHwvirtMode 1863 1890 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI)) … … 1869 1896 } 1870 1897 else 1871 # endif1898 # endif 1872 1899 { 1873 1900 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_TRAP); … … 1888 1915 } 1889 1916 } 1890 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX1917 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1891 1918 /* 1892 1919 * VMX Interrupt-window VM-exits. … … 1904 1931 UPDATE_RC(); 1905 1932 } 1906 # endif1907 # ifdef VBOX_WITH_NESTED_HWVIRT_SVM1933 # endif 1934 # ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1908 1935 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is 1909 1936 * actually pending like we currently do. */ 1910 # endif1937 # endif 1911 1938 /* 1912 1939 * External interrupts. … … 1953 1980 rc2 = VINF_EM_RESCHEDULE; 1954 1981 } 1955 # ifdef VBOX_STRICT1982 # ifdef VBOX_STRICT 1956 1983 if (fInjected) 1957 1984 rcIrq = rc2; 1958 # endif1985 # endif 1959 1986 } 1960 1987 UPDATE_RC(); … … 1973 2000 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector)); 1974 2001 rc2 = VINF_EM_RESCHEDULE; 1975 # ifdef VBOX_STRICT2002 # ifdef VBOX_STRICT 1976 2003 rcIrq = rc2; 1977 # endif2004 # endif 1978 2005 } 1979 2006 UPDATE_RC(); … … 1982 2009 } /* CPUMGetGuestGif */ 1983 2010 } 2011 #else 2012 bool fWakeupPending = false; 2013 AssertReleaseFailed(); 2014 /** @todo */ 2015 #endif 1984 2016 1985 2017 /* … … 2210 2242 fFFDone = false; 2211 2243 2212 #if def VBOX_STRICT2244 #if defined(VBOX_STRICT) && !defined(VBOX_VMM_TARGET_ARMV8) 2213 2245 CPUMAssertGuestRFlagsCookie(pVM, pVCpu); 2214 2246 #endif … … 2643 2675 else 2644 2676 { 2645 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF)); 2677 #if defined(VBOX_VMM_TARGET_ARMV8) 2678 bool fIgnoreInterrupts = false; 2679 AssertReleaseFailed(); 2680 #else 2681 bool fIgnoreInterrupts = !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF); 2682 #endif 2683 rc = VMR3WaitHalted(pVM, pVCpu, fIgnoreInterrupts); 2646 2684 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to 2647 2685 check VMCPU_FF_UPDATE_APIC here. */ -
trunk/src/VBox/VMM/VMMR3/EMR3Nem.cpp
r98103 r98980 96 96 return VINF_EM_RESCHEDULE; 97 97 98 #if defined(VBOX_VMM_TARGET_ARMV8) 99 uint64_t const uOldPc = pVCpu->cpum.GstCtx.Pc.u64; 100 #else 98 101 uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip; 102 #endif 99 103 for (;;) 100 104 { … … 142 146 * Done? 143 147 */ 148 #if defined(VBOX_VMM_TARGET_ARMV8) 149 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PC); 150 if ( (rcStrict != VINF_SUCCESS && rcStrict != VINF_EM_DBG_STEPPED) 151 || !(fFlags & EM_ONE_INS_FLAGS_RIP_CHANGE) 152 || pVCpu->cpum.GstCtx.Pc.u64 != uOldPc) 153 { 154 if (rcStrict == VINF_SUCCESS && pVCpu->cpum.GstCtx.Pc.u64 != uOldPc) 155 rcStrict = VINF_EM_DBG_STEPPED; 156 Log(("emR3NemSingleInstruction: returns %Rrc (pc %llx -> %llx)\n", 157 VBOXSTRICTRC_VAL(rcStrict), uOldPc, pVCpu->cpum.GstCtx.Pc.u64)); 158 CPUM_IMPORT_EXTRN_RET(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK); 159 return rcStrict; 160 } 161 #else 144 162 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP); 145 163 if ( (rcStrict != VINF_SUCCESS && rcStrict != VINF_EM_DBG_STEPPED) … … 154 172 return rcStrict; 155 173 } 174 #endif 156 175 } 157 176 } … … 181 200 * Log it. 182 201 */ 202 #ifdef VBOX_VMM_TARGET_ARMV8 203 Log(("EMINS: %RGv SP_EL0=%RGv SP_EL1=%RGv\n", (RTGCPTR)pVCpu->cpum.GstCtx.Pc.u64, 204 (RTGCPTR)pVCpu->cpum.GstCtx.aSpReg[0].u64, 205 (RTGCPTR)pVCpu->cpum.GstCtx.aSpReg[1].u64)); 206 if (pszPrefix) 207 { 208 DBGFR3_INFO_LOG(pVM, pVCpu, "cpumguest", pszPrefix); 209 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, pszPrefix); 210 } 211 # else 183 212 Log(("EMINS: %04x:%RGv RSP=%RGv\n", pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip, (RTGCPTR)pVCpu->cpum.GstCtx.rsp)); 184 213 if (pszPrefix) … … 187 216 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, pszPrefix); 188 217 } 218 # endif 189 219 #endif 190 220 … … 340 370 VBOXSTRICTRC rcStrict = VERR_IPE_UNINITIALIZED_STATUS; 341 371 372 #ifdef VBOX_VMM_TARGET_ARMV8 373 LogFlow(("emR3NemExecute%d: (pc=%RGv)\n", pVCpu->idCpu, (RTGCPTR)pVCpu->cpum.GstCtx.Pc.u64)); 374 #else 342 375 LogFlow(("emR3NemExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip)); 376 #endif 343 377 *pfFFDone = false; 344 378 … … 374 408 } 375 409 376 #if def LOG_ENABLED410 #if defined(LOG_ENABLED) && !defined(VBOX_VMM_TARGET_ARMV8) 377 411 /* 378 412 * Log important stuff before entering GC. -
trunk/src/VBox/VMM/VMMR3/IEMR3.cpp
r98103 r98980 58 58 59 59 60 #if !defined(VBOX_VMM_TARGET_ARMV8) 60 61 static const char *iemGetTargetCpuName(uint32_t enmTargetCpu) 61 62 { … … 76 77 } 77 78 } 79 #endif 78 80 79 81 … … 89 91 VMMR3DECL(int) IEMR3Init(PVM pVM) 90 92 { 91 int rc; 92 93 #if !defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL) 93 94 /* 94 95 * Read configuration. … … 96 97 PCFGMNODE pIem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "IEM"); 97 98 98 #ifndef VBOX_WITHOUT_CPUID_HOST_CALL99 99 /** @cfgm{/IEM/CpuIdHostCall, boolean, false} 100 100 * Controls whether the custom VBox specific CPUID host call interface is 101 101 * enabled or not. */ 102 102 # ifdef DEBUG_bird 103 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, true);103 int rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, true); 104 104 # else 105 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, false);105 int rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, false); 106 106 # endif 107 107 AssertLogRelRCReturn(rc, rc); … … 170 170 "", "/IEM/CPU%u/Interrupts/%02x", idCpu, i); 171 171 172 #if defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING)172 #if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING) 173 173 /* Instruction statistics: */ 174 174 # define IEM_DO_INSTR_STAT(a_Name, a_szDesc) \ … … 188 188 pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM); 189 189 pVCpu->iem.s.enmHostCpuVendor = CPUMGetHostCpuVendor(pVM); 190 #if !defined(VBOX_VMM_TARGET_ARMV8) 190 191 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL 191 192 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_VIA /*??*/ 192 193 ? IEMTARGETCPU_EFL_BEHAVIOR_INTEL : IEMTARGETCPU_EFL_BEHAVIOR_AMD; 193 # if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)194 # if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) 194 195 if (pVCpu->iem.s.enmCpuVendor == pVCpu->iem.s.enmHostCpuVendor) 195 196 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE; 196 197 else 197 # endif198 # endif 198 199 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0]; 199 200 #if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC 200 #else 201 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE; 202 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0]; 203 #endif 204 205 #if !defined(VBOX_VMM_TARGET_ARMV8) && (IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC) 201 206 switch (pVM->cpum.ro.GuestFeatures.enmMicroarch) 202 207 { … … 240 245 } 241 246 242 #if def VBOX_WITH_NESTED_HWVIRT_VMX247 #if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) 243 248 /* 244 249 * Register the per-VM VMX APIC-access page handler type. -
trunk/src/VBox/VMM/include/GIMHvInternal.h
r98103 r98980 1370 1370 VMM_INT_DECL(VBOXSTRICTRC) gimHvHypercall(PVMCPUCC pVCpu, PCPUMCTX pCtx); 1371 1371 VMM_INT_DECL(VBOXSTRICTRC) gimHvHypercallEx(PVMCPUCC pVCpu, PCPUMCTX pCtx, unsigned uDisOpcode, uint8_t cbInstr); 1372 #if !defined(VBOX_VMM_TARGET_ARMV8) 1372 1373 VMM_INT_DECL(VBOXSTRICTRC) gimHvReadMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue); 1373 1374 VMM_INT_DECL(VBOXSTRICTRC) gimHvWriteMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uRawValue); 1375 #endif 1374 1376 1375 1377 VMM_INT_DECL(void) gimHvStartStimer(PVMCPUCC pVCpu, PCGIMHVSTIMER pHvStimer); -
trunk/src/VBox/VMM/include/GIMKvmInternal.h
r98103 r98980 271 271 VMM_INT_DECL(bool) gimKvmAreHypercallsEnabled(PVMCPU pVCpu); 272 272 VMM_INT_DECL(VBOXSTRICTRC) gimKvmHypercall(PVMCPUCC pVCpu, PCPUMCTX pCtx); 273 #if !defined(VBOX_VMM_TARGET_ARMV8) 273 274 VMM_INT_DECL(VBOXSTRICTRC) gimKvmReadMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue); 274 275 VMM_INT_DECL(VBOXSTRICTRC) gimKvmWriteMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uRawValue); 276 #endif 275 277 VMM_INT_DECL(bool) gimKvmShouldTrapXcptUD(PVM pVM); 276 278 VMM_INT_DECL(VBOXSTRICTRC) gimKvmXcptUD(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis, uint8_t *pcbInstr);
Note:
See TracChangeset
for help on using the changeset viewer.

