VirtualBox

Changeset 98980 in vbox


Ignore:
Timestamp:
Mar 15, 2023 11:46:48 AM (19 months ago)
Author:
vboxsync
Message:

VMM: More ARMv8 x86/amd64 separation work, get past IEM, bugref:10385

Location:
trunk
Files:
1 added
8 edited
1 copied

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/gim.h

    r98103 r98980  
    207207VMM_INT_DECL(VBOXSTRICTRC)  GIMXcptUD(PVMCPUCC pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis, uint8_t *pcbInstr);
    208208VMM_INT_DECL(bool)          GIMShouldTrapXcptUD(PVMCPUCC pVCpu);
     209#if !defined(VBOX_VMM_TARGET_ARMV8)
    209210VMM_INT_DECL(VBOXSTRICTRC)  GIMReadMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue);
    210211VMM_INT_DECL(VBOXSTRICTRC)  GIMWriteMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue);
     212#endif
    211213VMM_INT_DECL(int)           GIMQueryHypercallOpcodeBytes(PVM pVM, void *pvBuf, size_t cbBuf,
    212214                                                         size_t *pcbWritten, uint16_t *puDisOpcode);
  • trunk/include/VBox/vmm/iem-x86-amd64.h

    r98959 r98980  
    3434 */
    3535
    36 #ifndef VBOX_INCLUDED_vmm_iem_h
    37 #define VBOX_INCLUDED_vmm_iem_h
     36#ifndef VBOX_INCLUDED_vmm_iem_x86_amd64_h
     37#define VBOX_INCLUDED_vmm_iem_x86_amd64_h
    3838#ifndef RT_WITHOUT_PRAGMA_ONCE
    3939# pragma once
    4040#endif
    4141
    42 #include <VBox/types.h>
    43 #include <VBox/vmm/trpm.h>
    4442#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    4543# include <VBox/vmm/hm_vmx.h>
    4644#endif
    47 #include <iprt/assert.h>
    4845
    4946
    5047RT_C_DECLS_BEGIN
    5148
    52 /** @defgroup grp_iem       The Interpreted Execution Manager API.
    53  * @ingroup grp_vmm
    54  * @{
    55  */
    56 
    57 /** @name IEMXCPTRAISEINFO_XXX - Extra info. on a recursive exception situation.
    58  *
    59  * This is primarily used by HM for working around a PGM limitation (see
    60  * @bugref{6607}) and special NMI/IRET handling. In the future, this may be
    61  * used for diagnostics.
    62  *
    63  * @{
    64  */
    65 typedef uint32_t IEMXCPTRAISEINFO;
    66 /** Pointer to a IEMXCPTINFO type. */
    67 typedef IEMXCPTRAISEINFO *PIEMXCPTRAISEINFO;
    68 /** No addition info. available. */
    69 #define IEMXCPTRAISEINFO_NONE                    RT_BIT_32(0)
    70 /** Delivery of a \#AC caused another \#AC. */
    71 #define IEMXCPTRAISEINFO_AC_AC                   RT_BIT_32(1)
    72 /** Delivery of a \#PF caused another \#PF. */
    73 #define IEMXCPTRAISEINFO_PF_PF                   RT_BIT_32(2)
    74 /** Delivery of a \#PF caused some contributory exception. */
    75 #define IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT    RT_BIT_32(3)
    76 /** Delivery of an external interrupt caused an exception. */
    77 #define IEMXCPTRAISEINFO_EXT_INT_XCPT            RT_BIT_32(4)
    78 /** Delivery of an external interrupt caused an \#PF. */
    79 #define IEMXCPTRAISEINFO_EXT_INT_PF              RT_BIT_32(5)
    80 /** Delivery of a software interrupt caused an exception. */
    81 #define IEMXCPTRAISEINFO_SOFT_INT_XCPT           RT_BIT_32(6)
    82 /** Delivery of an NMI caused an exception. */
    83 #define IEMXCPTRAISEINFO_NMI_XCPT                RT_BIT_32(7)
    84 /** Delivery of an NMI caused a \#PF. */
    85 #define IEMXCPTRAISEINFO_NMI_PF                  RT_BIT_32(8)
    86 /** Can re-execute the instruction at CS:RIP. */
    87 #define IEMXCPTRAISEINFO_CAN_REEXEC_INSTR        RT_BIT_32(9)
    88 /** @} */
    89 
    90 
    91 /** @name IEMXCPTRAISE_XXX - Ways to handle a recursive exception condition.
     49/** @addtogroup grp_iem
    9250 * @{ */
    93 typedef enum IEMXCPTRAISE
    94 {
    95     /** Raise the current (second) exception. */
    96     IEMXCPTRAISE_CURRENT_XCPT = 0,
    97     /** Re-raise the previous (first) event (for HM, unused by IEM). */
    98     IEMXCPTRAISE_PREV_EVENT,
    99     /** Re-execute instruction at CS:RIP (for HM, unused by IEM). */
    100     IEMXCPTRAISE_REEXEC_INSTR,
    101     /** Raise a \#DF exception. */
    102     IEMXCPTRAISE_DOUBLE_FAULT,
    103     /** Raise a triple fault. */
    104     IEMXCPTRAISE_TRIPLE_FAULT,
    105     /** Cause a CPU hang. */
    106     IEMXCPTRAISE_CPU_HANG,
    107     /** Invalid sequence of events. */
    108     IEMXCPTRAISE_INVALID = 0x7fffffff
    109 } IEMXCPTRAISE;
    110 /** Pointer to a IEMXCPTRAISE type. */
    111 typedef IEMXCPTRAISE *PIEMXCPTRAISE;
    112 /** @} */
    113 
    114 
    115 /** @name Operand or addressing mode.
    116  * @{ */
    117 typedef uint8_t IEMMODE;
    118 #define IEMMODE_16BIT 0
    119 #define IEMMODE_32BIT 1
    120 #define IEMMODE_64BIT 2
    121 /** @} */
    122 
    123 
    124 /** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
    125  * @{ */
    126 /** CPU exception. */
    127 #define IEM_XCPT_FLAGS_T_CPU_XCPT       RT_BIT_32(0)
    128 /** External interrupt (from PIC, APIC, whatever). */
    129 #define IEM_XCPT_FLAGS_T_EXT_INT        RT_BIT_32(1)
    130 /** Software interrupt (int or into, not bound).
    131  * Returns to the following instruction */
    132 #define IEM_XCPT_FLAGS_T_SOFT_INT       RT_BIT_32(2)
    133 /** Takes an error code. */
    134 #define IEM_XCPT_FLAGS_ERR              RT_BIT_32(3)
    135 /** Takes a CR2. */
    136 #define IEM_XCPT_FLAGS_CR2              RT_BIT_32(4)
    137 /** Generated by the breakpoint instruction. */
    138 #define IEM_XCPT_FLAGS_BP_INSTR         RT_BIT_32(5)
    139 /** Generated by a DRx instruction breakpoint and RF should be cleared. */
    140 #define IEM_XCPT_FLAGS_DRx_INSTR_BP     RT_BIT_32(6)
    141 /** Generated by the icebp instruction. */
    142 #define IEM_XCPT_FLAGS_ICEBP_INSTR      RT_BIT_32(7)
    143 /** Generated by the overflow instruction. */
    144 #define IEM_XCPT_FLAGS_OF_INSTR         RT_BIT_32(8)
    145 /** @}  */
    14651
    14752
     
    18085 * dicates the behaviour here. */
    18186#define IEMTARGETCPU_CURRENT    UINT32_C(9)
    182 /** @} */
    183 
    184 
    185 /** @name IEM status codes.
    186  *
    187  * Not quite sure how this will play out in the end, just aliasing safe status
    188  * codes for now.
    189  *
    190  * @{ */
    191 #define VINF_IEM_RAISED_XCPT    VINF_EM_RESCHEDULE
    19287/** @} */
    19388
     
    288183#endif
    289184
    290 VMMDECL(VBOXSTRICTRC)       IEMExecOne(PVMCPUCC pVCpu);
    291 VMMDECL(VBOXSTRICTRC)       IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten);
    292 VMMDECL(VBOXSTRICTRC)       IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
    293                                                          const void *pvOpcodeBytes, size_t cbOpcodeBytes);
    294 VMMDECL(VBOXSTRICTRC)       IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten);
    295 VMMDECL(VBOXSTRICTRC)       IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
    296                                                                const void *pvOpcodeBytes, size_t cbOpcodeBytes);
    297 VMMDECL(VBOXSTRICTRC)       IEMExecOneIgnoreLock(PVMCPUCC pVCpu);
    298 VMMDECL(VBOXSTRICTRC)       IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions);
    299 /** Statistics returned by IEMExecForExits. */
    300 typedef struct IEMEXECFOREXITSTATS
    301 {
    302     uint32_t cInstructions;
    303     uint32_t cExits;
    304     uint32_t cMaxExitDistance;
    305     uint32_t cReserved;
    306 } IEMEXECFOREXITSTATS;
    307 /** Pointer to statistics returned by IEMExecForExits. */
    308 typedef IEMEXECFOREXITSTATS *PIEMEXECFOREXITSTATS;
    309 VMMDECL(VBOXSTRICTRC)       IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
    310                                             uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats);
    311 VMMDECL(VBOXSTRICTRC)       IEMInjectTrpmEvent(PVMCPUCC pVCpu);
    312 VMM_INT_DECL(VBOXSTRICTRC)  IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
    313                                           uint8_t cbInstr);
    314 
    315 VMM_INT_DECL(int)           IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp);
    316 VMM_INT_DECL(int)           IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp);
    317 
    318 VMM_INT_DECL(void)          IEMTlbInvalidateAll(PVMCPUCC pVCpu);
    319 VMM_INT_DECL(void)          IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr);
    320 VMM_INT_DECL(void)          IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu);
    321 VMM_INT_DECL(void)          IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller);
    322 VMM_INT_DECL(bool)          IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr,
    323                                               uint64_t *puCr2);
    324 VMM_INT_DECL(IEMXCPTRAISE)  IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
    325                                                      uint8_t uCurVector, PIEMXCPTRAISEINFO pXcptRaiseInfo);
    326 
    327185/** @name Given Instruction Interpreters
    328186 * @{ */
     
    399257/** @}  */
    400258
    401 /** @defgroup grp_iem_r3     The IEM Host Context Ring-3 API.
     259/** @defgroup grp_iem_r0     The IEM Host Context Ring-0 API.
    402260 * @{
    403261 */
     
    405263/** @} */
    406264
    407 
    408 /** @defgroup grp_iem_r3     The IEM Host Context Ring-3 API.
    409  * @{
    410  */
    411 VMMR3DECL(int)      IEMR3Init(PVM pVM);
    412 VMMR3DECL(int)      IEMR3Term(PVM pVM);
    413 VMMR3DECL(void)     IEMR3Relocate(PVM pVM);
    414 VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict);
    415265/** @} */
    416266
    417 /** @} */
    418 
    419267RT_C_DECLS_END
    420268
    421 #endif /* !VBOX_INCLUDED_vmm_iem_h */
    422 
     269#endif /* !VBOX_INCLUDED_vmm_iem_x86_amd64_h */
     270
  • trunk/include/VBox/vmm/iem.h

    r98103 r98980  
    4242#include <VBox/types.h>
    4343#include <VBox/vmm/trpm.h>
    44 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    45 # include <VBox/vmm/hm_vmx.h>
     44#include <iprt/assert.h>
     45
     46#ifdef VBOX_VMM_TARGET_ARMV8
     47# include <VBox/vmm/iem-armv8.h>
     48#else
     49# include <VBox/vmm/iem-x86-amd64.h>
    4650#endif
    47 #include <iprt/assert.h>
    4851
    4952
     
    146149
    147150
    148 /** @name IEMTARGETCPU_XXX - IEM target CPU specification.
    149  *
    150  * This is a gross simpliciation of CPUMMICROARCH for dealing with really old
    151  * CPUs which didn't have much in the way of hinting at supported instructions
    152  * and features.  This slowly changes with the introduction of CPUID with the
    153  * Intel Pentium.
    154  *
    155  * @{
    156  */
    157 /** The dynamic target CPU mode is for getting thru the BIOS and then use
    158  * the debugger or modifying instruction behaviour (e.g. HLT) to switch to a
    159  * different target CPU. */
    160 #define IEMTARGETCPU_DYNAMIC    UINT32_C(0)
    161 /** Intel 8086/8088.  */
    162 #define IEMTARGETCPU_8086       UINT32_C(1)
    163 /** NEC V20/V30.
    164  * @remarks must be between 8086 and 80186. */
    165 #define IEMTARGETCPU_V20        UINT32_C(2)
    166 /** Intel 80186/80188.  */
    167 #define IEMTARGETCPU_186        UINT32_C(3)
    168 /** Intel 80286.  */
    169 #define IEMTARGETCPU_286        UINT32_C(4)
    170 /** Intel 80386.  */
    171 #define IEMTARGETCPU_386        UINT32_C(5)
    172 /** Intel 80486.  */
    173 #define IEMTARGETCPU_486        UINT32_C(6)
    174 /** Intel Pentium .  */
    175 #define IEMTARGETCPU_PENTIUM    UINT32_C(7)
    176 /** Intel PentiumPro.  */
    177 #define IEMTARGETCPU_PPRO       UINT32_C(8)
    178 /** A reasonably current CPU, probably newer than the pentium pro when it comes
    179  * to the feature set and behaviour.  Generally the CPUID info and CPU vendor
    180  * dicates the behaviour here. */
    181 #define IEMTARGETCPU_CURRENT    UINT32_C(9)
    182 /** @} */
    183 
    184 
    185151/** @name IEM status codes.
    186152 *
     
    192158/** @} */
    193159
    194 
    195 /** The CPUMCTX_EXTRN_XXX mask required to be cleared when interpreting anything.
    196  * IEM will ASSUME the caller of IEM APIs has ensured these are already present. */
    197 #define IEM_CPUMCTX_EXTRN_MUST_MASK                (  CPUMCTX_EXTRN_GPRS_MASK \
    198                                                     | CPUMCTX_EXTRN_RIP \
    199                                                     | CPUMCTX_EXTRN_RFLAGS \
    200                                                     | CPUMCTX_EXTRN_SS \
    201                                                     | CPUMCTX_EXTRN_CS \
    202                                                     | CPUMCTX_EXTRN_CR0 \
    203                                                     | CPUMCTX_EXTRN_CR3 \
    204                                                     | CPUMCTX_EXTRN_CR4 \
    205                                                     | CPUMCTX_EXTRN_APIC_TPR \
    206                                                     | CPUMCTX_EXTRN_EFER \
    207                                                     | CPUMCTX_EXTRN_DR7 )
    208 /** The CPUMCTX_EXTRN_XXX mask needed when injecting an exception/interrupt.
    209  * IEM will import missing bits, callers are encouraged to make these registers
    210  * available prior to injection calls if fetching state anyway.  */
    211 #define IEM_CPUMCTX_EXTRN_XCPT_MASK                (  IEM_CPUMCTX_EXTRN_MUST_MASK \
    212                                                     | CPUMCTX_EXTRN_CR2 \
    213                                                     | CPUMCTX_EXTRN_SREG_MASK \
    214                                                     | CPUMCTX_EXTRN_TABLE_MASK )
    215 /** The CPUMCTX_EXTRN_XXX mask required to be cleared when calling any
    216  * IEMExecDecoded API not using memory.  IEM will ASSUME the caller of IEM
    217  * APIs has ensured these are already present.
    218  * @note ASSUMES execution engine has checked for instruction breakpoints
    219  *       during decoding. */
    220 #define IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK (  CPUMCTX_EXTRN_RIP \
    221                                                     | CPUMCTX_EXTRN_RFLAGS \
    222                                                     | CPUMCTX_EXTRN_SS   /* for CPL */ \
    223                                                     | CPUMCTX_EXTRN_CS   /* for mode */ \
    224                                                     | CPUMCTX_EXTRN_CR0  /* for mode */ \
    225                                                     | CPUMCTX_EXTRN_EFER /* for mode */ )
    226 /** The CPUMCTX_EXTRN_XXX mask required to be cleared when calling any
    227  * IEMExecDecoded API using memory.  IEM will ASSUME the caller of IEM
    228  * APIs has ensured these are already present.
    229  * @note ASSUMES execution engine has checked for instruction breakpoints
    230  *       during decoding. */
    231 #define IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK    (  IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK \
    232                                                     | CPUMCTX_EXTRN_CR3 /* for page tables */ \
    233                                                     | CPUMCTX_EXTRN_CR4 /* for mode paging mode */ \
    234                                                     | CPUMCTX_EXTRN_DR7 /* for memory breakpoints */ )
    235 
    236 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    237 /** The CPUMCTX_EXTRN_XXX mask needed when calling IEMExecDecodedVmlaunchVmresume().
    238  * IEM will ASSUME the caller has ensured these are already present. */
    239 # define IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK        (  IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK \
    240                                                     | CPUMCTX_EXTRN_CR2 \
    241                                                     | CPUMCTX_EXTRN_HWVIRT )
    242 
    243 /** The CPUMCTX_EXTRN_XXX mask that the IEM VM-exit code will import on-demand when
    244  *  needed, primarily because there are several IEM VM-exit interface functions and
    245  *  some of which may not cause a VM-exit at all.
    246  *
    247  *  This is currently unused, but keeping it here in case we can get away a bit more
    248  *  fine-grained state handling.
    249  *
    250  *  @note Update HM_CHANGED_VMX_VMEXIT_MASK if something here changes. */
    251 # define IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK         (  CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 \
    252                                                     | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6 \
    253                                                     | CPUMCTX_EXTRN_EFER \
    254                                                     | CPUMCTX_EXTRN_SYSENTER_MSRS \
    255                                                     | CPUMCTX_EXTRN_OTHER_MSRS    /* for PAT MSR */ \
    256                                                     | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS \
    257                                                     | CPUMCTX_EXTRN_SREG_MASK \
    258                                                     | CPUMCTX_EXTRN_TR \
    259                                                     | CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_IDTR \
    260                                                     | CPUMCTX_EXTRN_HWVIRT )
    261 #endif
    262 
    263 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    264 /** The CPUMCTX_EXTRN_XXX mask needed when calling IEMExecSvmVmexit().
    265  * IEM will ASSUME the caller has ensured these are already present. */
    266 # define IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK         (  CPUMCTX_EXTRN_RSP \
    267                                                     | CPUMCTX_EXTRN_RAX \
    268                                                     | CPUMCTX_EXTRN_RIP \
    269                                                     | CPUMCTX_EXTRN_RFLAGS \
    270                                                     | CPUMCTX_EXTRN_CS \
    271                                                     | CPUMCTX_EXTRN_SS \
    272                                                     | CPUMCTX_EXTRN_DS \
    273                                                     | CPUMCTX_EXTRN_ES \
    274                                                     | CPUMCTX_EXTRN_GDTR \
    275                                                     | CPUMCTX_EXTRN_IDTR \
    276                                                     | CPUMCTX_EXTRN_CR_MASK \
    277                                                     | CPUMCTX_EXTRN_EFER \
    278                                                     | CPUMCTX_EXTRN_DR6 \
    279                                                     | CPUMCTX_EXTRN_DR7 \
    280                                                     | CPUMCTX_EXTRN_OTHER_MSRS \
    281                                                     | CPUMCTX_EXTRN_HWVIRT \
    282                                                     | CPUMCTX_EXTRN_APIC_TPR \
    283                                                     | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ)
    284 
    285 /** The CPUMCTX_EXTRN_XXX mask needed when calling IEMExecDecodedVmrun().
    286  *  IEM will ASSUME the caller has ensured these are already present. */
    287 # define IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK          IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK
    288 #endif
    289160
    290161VMMDECL(VBOXSTRICTRC)       IEMExecOne(PVMCPUCC pVCpu);
     
    325196                                                     uint8_t uCurVector, PIEMXCPTRAISEINFO pXcptRaiseInfo);
    326197
    327 /** @name Given Instruction Interpreters
    328  * @{ */
    329 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
    330                                                  bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked);
    331 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
    332                                                 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked);
    333 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg);
    334 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg);
    335 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg);
    336 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg);
    337 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg);
    338 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg);
    339 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr);
    340 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst);
    341 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr);
    342 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr);
    343 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr);
    344 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedInvlpg(PVMCPUCC pVCpu,  uint8_t cbInstr, RTGCPTR GCPtrPage);
    345 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
    346                                                   uint64_t uType);
    347 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr);
    348 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr);
    349 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr);
    350 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr);
    351 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr);
    352 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr);
    353 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr);
    354 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr);
    355 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr);
    356 
    357 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    358 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedClgi(PVMCPUCC pVCpu, uint8_t cbInstr);
    359 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedStgi(PVMCPUCC pVCpu, uint8_t cbInstr);
    360 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedVmload(PVMCPUCC pVCpu, uint8_t cbInstr);
    361 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedVmsave(PVMCPUCC pVCpu, uint8_t cbInstr);
    362 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedInvlpga(PVMCPUCC pVCpu, uint8_t cbInstr);
    363 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedVmrun(PVMCPUCC pVCpu, uint8_t cbInstr);
    364 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
    365 #endif
    366 
    367 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    368 VMM_INT_DECL(void)          IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst);
    369 VMM_INT_DECL(void)          IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val);
    370 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVirtApicAccessMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Val, bool fWrite);
    371 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVmexitApicWrite(PVMCPUCC pVCpu);
    372 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVmexitPreemptTimer(PVMCPUCC pVCpu);
    373 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending);
    374 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVmexitXcpt(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo);
    375 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVmexitXcptNmi(PVMCPUCC pVCpu);
    376 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVmexitTripleFault(PVMCPUCC pVCpu);
    377 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVmexitStartupIpi(PVMCPUCC pVCpu, uint8_t uVector);
    378 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVmexitInstrWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
    379 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr);
    380 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVmexitTrapLike(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
    381 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVmexitTaskSwitch(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo);
    382 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVmexitApicAccess(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo);
    383 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t uExitQual);
    384 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedVmread(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
    385 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedVmwrite(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
    386 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedVmptrld(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
    387 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedVmptrst(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
    388 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedVmclear(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
    389 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedVmlaunchVmresume(PVMCPUCC pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId);
    390 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedVmxon(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
    391 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedVmxoff(PVMCPUCC pVCpu, uint8_t cbInstr);
    392 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedInvvpid(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
    393 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    394 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedInvept(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo);
    395 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVmexitEptViolation(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo);
    396 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecVmxVmexitEptMisconfig(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr, PCVMXVEXITEVENTINFO pExitEventInfo);
    397 # endif
    398 #endif
    399 /** @}  */
    400 
    401 /** @defgroup grp_iem_r3     The IEM Host Context Ring-3 API.
    402  * @{
    403  */
    404 VMMR0_INT_DECL(int) IEMR0InitVM(PGVM pGVM);
    405 /** @} */
    406 
    407 
    408198/** @defgroup grp_iem_r3     The IEM Host Context Ring-3 API.
    409199 * @{
  • trunk/src/VBox/VMM/Makefile.kmk

    r98970 r98980  
    370370        VMMR3/EM.cpp \
    371371        VMMR3/EMR3Dbg.cpp \
    372         VMMR3/EMHM.cpp \
    373372        VMMR3/EMR3Nem.cpp \
    374373        VMMR3/GCM.cpp \
    375374        VMMR3/GIM.cpp \
    376         VMMR3/GIMHv.cpp \
    377         VMMR3/GIMKvm.cpp \
    378         VMMR3/GIMMinimal.cpp \
    379375        VMMR3/IEMR3.cpp \
    380376        VMMR3/IOM.cpp \
  • trunk/src/VBox/VMM/VMMR3/EM.cpp

    r98103 r98980  
    695695
    696696
     697#if !defined(VBOX_VMM_TARGET_ARMV8)
    697698/**
    698699 * Handle pending ring-3 I/O port write.
     
    838839    return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, emR3ExecuteSplitLockInstructionRendezvous, pVCpu);
    839840}
     841#endif /* VBOX_VMM_TARGET_ARMV8 */
    840842
    841843
     
    10401042static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
    10411043{
     1044#if defined(VBOX_VMM_TARGET_ARMV8)
     1045    Log3(("emR3RemStep: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu)));
     1046#else
    10421047    Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu),  CPUMGetGuestEIP(pVCpu)));
     1048#endif
    10431049
    10441050    int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
    10451051
     1052#if defined(VBOX_VMM_TARGET_ARMV8)
     1053    Log3(("emR3RemStep: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu)));
     1054#else
    10461055    Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu),  CPUMGetGuestEIP(pVCpu)));
     1056#endif
    10471057    return rc;
    10481058}
     
    10681078{
    10691079#ifdef LOG_ENABLED
     1080# if defined(VBOX_VMM_TARGET_ARMV8)
     1081    Log3(("EM: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu)));
     1082# else
    10701083    uint32_t cpl = CPUMGetGuestCPL(pVCpu);
    10711084
     
    10741087    else
    10751088        Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
     1089# endif
    10761090#endif
    10771091    STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
     
    12091223    }
    12101224    Log(("Single step END:\n"));
     1225#if defined(VBOX_VMM_TARGET_ARMV8)
     1226    AssertReleaseFailed();
     1227#else
    12111228    CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
     1229#endif
    12121230    pVCpu->em.s.enmState = enmOldState;
    12131231    return VINF_EM_RESCHEDULE;
     
    12301248static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
    12311249{
     1250#if defined(VBOX_VMM_TARGET_ARMV8)
     1251    LogFlow(("emR3ExecuteIemThenRem: %RGv\n", CPUMGetGuestFlatPC(pVCpu)));
     1252#else
    12321253    LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
     1254#endif
    12331255    *pfFFDone = false;
    12341256
     
    13351357        PDMCritSectBothFF(pVM, pVCpu);
    13361358
     1359#if !defined(VBOX_VMM_TARGET_ARMV8)
    13371360    /* Update CR3 (Nested Paging case for HM). */
    13381361    if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
     
    13441367        Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
    13451368    }
     1369#endif
    13461370
    13471371    /* IEM has pending work (typically memory write after INS instruction). */
     
    13721396
    13731397
     1398#if !defined(VBOX_VMM_TARGET_ARMV8)
    13741399/**
    13751400 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
     
    14571482    return VINF_NO_CHANGE;
    14581483}
     1484#endif
    14591485
    14601486
     
    17121738            TMR3TimerQueuesDo(pVM);
    17131739
     1740#if !defined(VBOX_VMM_TARGET_ARMV8)
    17141741        /*
    17151742         * Pick up asynchronously posted interrupts into the APIC.
     
    17491776         *        delivered. */
    17501777
    1751 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     1778# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    17521779        if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER))
    17531780        {
     
    17881815            Assert(!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER));
    17891816        }
    1790 #endif
     1817# endif
    17911818
    17921819        /*
     
    18211848                if (0)
    18221849                { }
    1823 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     1850# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    18241851                /*
    18251852                 * VMX NMI-window VM-exit.
     
    18421869                    UPDATE_RC();
    18431870                }
    1844 #endif
     1871# endif
    18451872                /*
    18461873                 * NMIs (take priority over external interrupts).
     
    18491876                         && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
    18501877                {
    1851 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     1878# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    18521879                    if (   fInVmxNonRootMode
    18531880                        && CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
     
    18581885                    }
    18591886                    else
    1860 #endif
    1861 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
     1887# endif
     1888# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    18621889                    if (   fInSvmHwvirtMode
    18631890                        && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
     
    18691896                    }
    18701897                    else
    1871 #endif
     1898# endif
    18721899                    {
    18731900                        rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_TRAP);
     
    18881915                    }
    18891916                }
    1890 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     1917# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    18911918                /*
    18921919                 * VMX Interrupt-window VM-exits.
     
    19041931                    UPDATE_RC();
    19051932                }
    1906 #endif
    1907 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
     1933# endif
     1934# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    19081935                /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
    19091936                 *        actually pending like we currently do. */
    1910 #endif
     1937# endif
    19111938                /*
    19121939                 * External interrupts.
     
    19531980                                rc2 = VINF_EM_RESCHEDULE;
    19541981                            }
    1955 #ifdef VBOX_STRICT
     1982# ifdef VBOX_STRICT
    19561983                            if (fInjected)
    19571984                                rcIrq = rc2;
    1958 #endif
     1985# endif
    19591986                        }
    19601987                        UPDATE_RC();
     
    19732000                            Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
    19742001                            rc2 = VINF_EM_RESCHEDULE;
    1975 #ifdef VBOX_STRICT
     2002# ifdef VBOX_STRICT
    19762003                            rcIrq = rc2;
    1977 #endif
     2004# endif
    19782005                        }
    19792006                        UPDATE_RC();
     
    19822009            } /* CPUMGetGuestGif */
    19832010        }
     2011#else
     2012        bool fWakeupPending = false;
     2013        AssertReleaseFailed();
     2014        /** @todo */
     2015#endif
    19842016
    19852017        /*
     
    22102242                fFFDone = false;
    22112243
    2212 #ifdef VBOX_STRICT
     2244#if defined(VBOX_STRICT) && !defined(VBOX_VMM_TARGET_ARMV8)
    22132245            CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
    22142246#endif
     
    26432675                    else
    26442676                    {
    2645                         rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
     2677#if defined(VBOX_VMM_TARGET_ARMV8)
     2678                        bool fIgnoreInterrupts = false;
     2679                        AssertReleaseFailed();
     2680#else
     2681                        bool fIgnoreInterrupts = !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF);
     2682#endif
     2683                        rc = VMR3WaitHalted(pVM, pVCpu, fIgnoreInterrupts);
    26462684                        /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
    26472685                           check VMCPU_FF_UPDATE_APIC here. */
  • trunk/src/VBox/VMM/VMMR3/EMR3Nem.cpp

    r98103 r98980  
    9696        return VINF_EM_RESCHEDULE;
    9797
     98#if defined(VBOX_VMM_TARGET_ARMV8)
     99    uint64_t const uOldPc = pVCpu->cpum.GstCtx.Pc.u64;
     100#else
    98101    uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip;
     102#endif
    99103    for (;;)
    100104    {
     
    142146         * Done?
    143147         */
     148#if defined(VBOX_VMM_TARGET_ARMV8)
     149        CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PC);
     150        if (   (rcStrict != VINF_SUCCESS && rcStrict != VINF_EM_DBG_STEPPED)
     151            || !(fFlags & EM_ONE_INS_FLAGS_RIP_CHANGE)
     152            || pVCpu->cpum.GstCtx.Pc.u64 != uOldPc)
     153        {
     154            if (rcStrict == VINF_SUCCESS && pVCpu->cpum.GstCtx.Pc.u64 != uOldPc)
     155                rcStrict = VINF_EM_DBG_STEPPED;
     156            Log(("emR3NemSingleInstruction: returns %Rrc (pc %llx -> %llx)\n",
     157                 VBOXSTRICTRC_VAL(rcStrict), uOldPc, pVCpu->cpum.GstCtx.Pc.u64));
     158            CPUM_IMPORT_EXTRN_RET(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK);
     159            return rcStrict;
     160        }
     161#else
    144162        CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
    145163        if (   (rcStrict != VINF_SUCCESS && rcStrict != VINF_EM_DBG_STEPPED)
     
    154172            return rcStrict;
    155173        }
     174#endif
    156175    }
    157176}
     
    181200     * Log it.
    182201     */
     202#ifdef VBOX_VMM_TARGET_ARMV8
     203    Log(("EMINS: %RGv SP_EL0=%RGv SP_EL1=%RGv\n", (RTGCPTR)pVCpu->cpum.GstCtx.Pc.u64,
     204                                                  (RTGCPTR)pVCpu->cpum.GstCtx.aSpReg[0].u64,
     205                                                  (RTGCPTR)pVCpu->cpum.GstCtx.aSpReg[1].u64));
     206    if (pszPrefix)
     207    {
     208        DBGFR3_INFO_LOG(pVM, pVCpu, "cpumguest", pszPrefix);
     209        DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, pszPrefix);
     210    }
     211# else
    183212    Log(("EMINS: %04x:%RGv RSP=%RGv\n", pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip, (RTGCPTR)pVCpu->cpum.GstCtx.rsp));
    184213    if (pszPrefix)
     
    187216        DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, pszPrefix);
    188217    }
     218# endif
    189219#endif
    190220
     
    340370    VBOXSTRICTRC rcStrict = VERR_IPE_UNINITIALIZED_STATUS;
    341371
     372#ifdef VBOX_VMM_TARGET_ARMV8
     373    LogFlow(("emR3NemExecute%d: (pc=%RGv)\n", pVCpu->idCpu, (RTGCPTR)pVCpu->cpum.GstCtx.Pc.u64));
     374#else
    342375    LogFlow(("emR3NemExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip));
     376#endif
    343377    *pfFFDone = false;
    344378
     
    374408        }
    375409
    376 #ifdef LOG_ENABLED
     410#if defined(LOG_ENABLED) && !defined(VBOX_VMM_TARGET_ARMV8)
    377411        /*
    378412         * Log important stuff before entering GC.
  • trunk/src/VBox/VMM/VMMR3/IEMR3.cpp

    r98103 r98980  
    5858
    5959
     60#if !defined(VBOX_VMM_TARGET_ARMV8)
    6061static const char *iemGetTargetCpuName(uint32_t enmTargetCpu)
    6162{
     
    7677    }
    7778}
     79#endif
    7880
    7981
     
    8991VMMR3DECL(int)      IEMR3Init(PVM pVM)
    9092{
    91     int rc;
    92 
     93#if !defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)
    9394    /*
    9495     * Read configuration.
     
    9697    PCFGMNODE pIem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "IEM");
    9798
    98 #ifndef VBOX_WITHOUT_CPUID_HOST_CALL
    9999    /** @cfgm{/IEM/CpuIdHostCall, boolean, false}
    100100     * Controls whether the custom VBox specific CPUID host call interface is
    101101     * enabled or not. */
    102102# ifdef DEBUG_bird
    103     rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, true);
     103    int rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, true);
    104104# else
    105     rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, false);
     105    int rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, false);
    106106# endif
    107107    AssertLogRelRCReturn(rc, rc);
     
    170170                            "", "/IEM/CPU%u/Interrupts/%02x", idCpu, i);
    171171
    172 #if defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING)
     172#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING)
    173173        /* Instruction statistics: */
    174174# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) \
     
    188188            pVCpu->iem.s.enmCpuVendor                     = CPUMGetGuestCpuVendor(pVM);
    189189            pVCpu->iem.s.enmHostCpuVendor                 = CPUMGetHostCpuVendor(pVM);
     190#if !defined(VBOX_VMM_TARGET_ARMV8)
    190191            pVCpu->iem.s.aidxTargetCpuEflFlavour[0]       =    pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL
    191192                                                            || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_VIA /*??*/
    192193                                                          ? IEMTARGETCPU_EFL_BEHAVIOR_INTEL : IEMTARGETCPU_EFL_BEHAVIOR_AMD;
    193 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
     194# if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
    194195            if (pVCpu->iem.s.enmCpuVendor == pVCpu->iem.s.enmHostCpuVendor)
    195196                pVCpu->iem.s.aidxTargetCpuEflFlavour[1]   = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
    196197            else
    197 #endif
     198# endif
    198199                pVCpu->iem.s.aidxTargetCpuEflFlavour[1]   = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
    199 
    200 #if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
     200#else
     201            pVCpu->iem.s.aidxTargetCpuEflFlavour[0]   = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
     202            pVCpu->iem.s.aidxTargetCpuEflFlavour[1]   = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
     203#endif
     204
     205#if !defined(VBOX_VMM_TARGET_ARMV8) && (IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC)
    201206            switch (pVM->cpum.ro.GuestFeatures.enmMicroarch)
    202207            {
     
    240245    }
    241246
    242 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     247#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    243248    /*
    244249     * Register the per-VM VMX APIC-access page handler type.
  • trunk/src/VBox/VMM/include/GIMHvInternal.h

    r98103 r98980  
    13701370VMM_INT_DECL(VBOXSTRICTRC)      gimHvHypercall(PVMCPUCC pVCpu, PCPUMCTX pCtx);
    13711371VMM_INT_DECL(VBOXSTRICTRC)      gimHvHypercallEx(PVMCPUCC pVCpu, PCPUMCTX pCtx, unsigned uDisOpcode, uint8_t cbInstr);
     1372#if !defined(VBOX_VMM_TARGET_ARMV8)
    13721373VMM_INT_DECL(VBOXSTRICTRC)      gimHvReadMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue);
    13731374VMM_INT_DECL(VBOXSTRICTRC)      gimHvWriteMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uRawValue);
     1375#endif
    13741376
    13751377VMM_INT_DECL(void)              gimHvStartStimer(PVMCPUCC pVCpu, PCGIMHVSTIMER pHvStimer);
  • trunk/src/VBox/VMM/include/GIMKvmInternal.h

    r98103 r98980  
    271271VMM_INT_DECL(bool)              gimKvmAreHypercallsEnabled(PVMCPU pVCpu);
    272272VMM_INT_DECL(VBOXSTRICTRC)      gimKvmHypercall(PVMCPUCC pVCpu, PCPUMCTX pCtx);
     273#if !defined(VBOX_VMM_TARGET_ARMV8)
    273274VMM_INT_DECL(VBOXSTRICTRC)      gimKvmReadMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue);
    274275VMM_INT_DECL(VBOXSTRICTRC)      gimKvmWriteMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uRawValue);
     276#endif
    275277VMM_INT_DECL(bool)              gimKvmShouldTrapXcptUD(PVM pVM);
    276278VMM_INT_DECL(VBOXSTRICTRC)      gimKvmXcptUD(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis, uint8_t *pcbInstr);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette