VirtualBox

source: vbox/trunk/include/VBox/vmm/hm.h

Last change on this file was 100140, checked in by vboxsync, 12 months ago

VMM/EM: Do not do scheduling based on whether HM has been used and is 'active', because that's not a reliable property (especially after restoring saved state) and it's not correct to go to the recompiler all the time after HM was unable to execute a piece of code. This is probably a problem resurfacing after kicking out the IEM_THEN_REM state from EM and resurrecting the REM state. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.3 KB
RevLine 
[35361]1/** @file
[44373]2 * HM - Intel/AMD VM Hardware Assisted Virtualization Manager (VMM)
[35361]3 */
4
5/*
[98103]6 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
[35361]7 *
[96407]8 * This file is part of VirtualBox base platform packages, as
9 * available from https://www.virtualbox.org.
[35361]10 *
[96407]11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation, in version 3 of the
14 * License.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see <https://www.gnu.org/licenses>.
23 *
[35361]24 * The contents of this file may alternatively be used under the terms
25 * of the Common Development and Distribution License Version 1.0
[96407]26 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
27 * in the VirtualBox distribution, in which case the provisions of the
[35361]28 * CDDL are applicable instead of those of the GPL.
29 *
30 * You may elect to license modified versions of this file under the
31 * terms and conditions of either the GPL or the CDDL or both.
[96407]32 *
33 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
[35361]34 */
35
[76558]36#ifndef VBOX_INCLUDED_vmm_hm_h
37#define VBOX_INCLUDED_vmm_hm_h
[76507]38#ifndef RT_WITHOUT_PRAGMA_ONCE
39# pragma once
40#endif
[35361]41
42#include <VBox/vmm/pgm.h>
43#include <VBox/vmm/cpum.h>
[43387]44#include <VBox/vmm/vmm.h>
[66022]45#include <VBox/vmm/hm_svm.h>
[73389]46#include <VBox/vmm/hm_vmx.h>
[67529]47#include <VBox/vmm/trpm.h>
[35361]48#include <iprt/mp.h>
49
50
[58110]51/** @defgroup grp_hm The Hardware Assisted Virtualization Manager API
52 * @ingroup grp_vmm
[35361]53 * @{
54 */
55
56RT_C_DECLS_BEGIN
57
58/**
[45701]59 * Checks whether HM (VT-x/AMD-V) is being used by this VM.
[35361]60 *
[58106]61 * @retval true if used.
[70948]62 * @retval false if software virtualization (raw-mode) or NEM is used.
[45701]63 *
64 * @param a_pVM The cross context VM structure.
[70977]65 * @deprecated Please use VM_IS_RAW_MODE_ENABLED, VM_IS_HM_OR_NEM_ENABLED, or
66 * VM_IS_HM_ENABLED instead.
[44373]67 * @internal
[35361]68 */
[45618]69#if defined(VBOX_STRICT) && defined(IN_RING3)
[48565]70# define HMIsEnabled(a_pVM) HMIsEnabledNotMacro(a_pVM)
[45618]71#else
[48565]72# define HMIsEnabled(a_pVM) ((a_pVM)->fHMEnabled)
[45618]73#endif
[35361]74
[45701]75/**
[73322]76 * Checks whether raw-mode context is required for HM purposes
[45701]77 *
[73322]78 * @retval true if required by HM for doing switching the cpu to 64-bit mode.
79 * @retval false if not required by HM.
[45701]80 *
81 * @param a_pVM The cross context VM structure.
82 * @internal
83 */
84#if HC_ARCH_BITS == 64
[73322]85# define HMIsRawModeCtxNeeded(a_pVM) (false)
[45701]86#else
[73322]87# define HMIsRawModeCtxNeeded(a_pVM) ((a_pVM)->fHMNeedRawModeCtx)
[45701]88#endif
89
[45749]90/**
91 * Checks whether we're in the special hardware virtualization context.
92 * @returns true / false.
93 * @param a_pVCpu The caller's cross context virtual CPU structure.
94 * @thread EMT
95 */
96#ifdef IN_RING0
[48565]97# define HMIsInHwVirtCtx(a_pVCpu) (VMCPU_GET_STATE(a_pVCpu) == VMCPUSTATE_STARTED_HM)
[45749]98#else
[48565]99# define HMIsInHwVirtCtx(a_pVCpu) (false)
[45749]100#endif
101
[45754]102/**
103 * Checks whether we're in the special hardware virtualization context and we
104 * cannot perform long jump without guru meditating and possibly messing up the
105 * host and/or guest state.
106 *
107 * This is after we've turned interrupts off and such.
108 *
109 * @returns true / false.
110 * @param a_pVCpu The caller's cross context virtual CPU structure.
111 * @thread EMT
112 */
113#ifdef IN_RING0
114# define HMIsInHwVirtNoLongJmpCtx(a_pVCpu) (VMCPU_GET_STATE(a_pVCpu) == VMCPUSTATE_STARTED_EXEC)
115#else
116# define HMIsInHwVirtNoLongJmpCtx(a_pVCpu) (false)
117#endif
[45749]118
[67529]119/** @name All-context HM API.
120 * @{ */
[45618]121VMMDECL(bool) HMIsEnabledNotMacro(PVM pVM);
[80281]122VMMDECL(bool) HMCanExecuteGuest(PVMCC pVM, PVMCPUCC pVCpu, PCCPUMCTX pCtx);
123VMM_INT_DECL(int) HMInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCVirt);
124VMM_INT_DECL(bool) HMHasPendingIrq(PVMCC pVM);
[80268]125VMM_INT_DECL(bool) HMSetSingleInstruction(PVMCC pVM, PVMCPUCC pVCpu, bool fEnable);
[73246]126VMM_INT_DECL(bool) HMIsSvmActive(PVM pVM);
127VMM_INT_DECL(bool) HMIsVmxActive(PVM pVM);
[76993]128VMM_INT_DECL(const char *) HMGetVmxDiagDesc(VMXVDIAG enmDiag);
129VMM_INT_DECL(const char *) HMGetVmxExitName(uint32_t uExit);
130VMM_INT_DECL(const char *) HMGetSvmExitName(uint32_t uExit);
131VMM_INT_DECL(void) HMDumpHwvirtVmxState(PVMCPU pVCpu);
[87488]132VMM_INT_DECL(void) HMHCChangedPagingMode(PVM pVM, PVMCPUCC pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode);
[76993]133VMM_INT_DECL(void) HMGetVmxMsrsFromHwvirtMsrs(PCSUPHWVIRTMSRS pMsrs, PVMXMSRS pVmxMsrs);
134VMM_INT_DECL(void) HMGetSvmMsrsFromHwvirtMsrs(PCSUPHWVIRTMSRS pMsrs, PSVMMSRS pSvmMsrs);
[73606]135/** @} */
136
137/** @name All-context VMX helpers.
[75440]138 *
139 * These are hardware-assisted VMX functions (used by IEM/REM/CPUM and HM). Helpers
140 * based purely on the Intel VT-x specification (used by IEM/REM and HM) can be
141 * found in CPUM.
[73606]142 * @{ */
[93725]143#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
[81733]144VMM_INT_DECL(bool) HMIsSubjectToVmxPreemptTimerErratum(void);
[93725]145#endif
[80281]146VMM_INT_DECL(bool) HMCanExecuteVmxGuest(PVMCC pVM, PVMCPUCC pVCpu, PCCPUMCTX pCtx);
[79637]147VMM_INT_DECL(TRPMEVENT) HMVmxEventTypeToTrpmEventType(uint32_t uIntInfo);
[81002]148VMM_INT_DECL(uint32_t) HMTrpmEventTypeToVmxEventType(uint8_t uVector, TRPMEVENT enmTrpmEvent, bool fIcebp);
[67529]149/** @} */
[44373]150
[67529]151/** @name All-context SVM helpers.
[70462]152 *
[75440]153 * These are hardware-assisted SVM functions (used by IEM/REM/CPUM and HM). Helpers
154 * based purely on the AMD SVM specification (used by IEM/REM and HM) can be found
155 * in CPUM.
[67529]156 * @{ */
[77902]157VMM_INT_DECL(TRPMEVENT) HMSvmEventToTrpmEventType(PCSVMEVENT pSvmEvent, uint8_t uVector);
[67529]158/** @} */
159
[35361]160#ifndef IN_RC
[74287]161
162/** @name R0, R3 HM (VMX/SVM agnostic) handlers.
163 * @{ */
[76993]164VMM_INT_DECL(int) HMFlushTlb(PVMCPU pVCpu);
[80268]165VMM_INT_DECL(int) HMFlushTlbOnAllVCpus(PVMCC pVM);
[80281]166VMM_INT_DECL(int) HMInvalidatePageOnAllVCpus(PVMCC pVM, RTGCPTR GCVirt);
[80268]167VMM_INT_DECL(int) HMInvalidatePhysPage(PVMCC pVM, RTGCPHYS GCPhys);
[87515]168VMM_INT_DECL(bool) HMAreNestedPagingAndFullGuestExecEnabled(PVMCC pVM);
[87518]169VMM_INT_DECL(bool) HMIsLongModeAllowed(PVMCC pVM);
[87515]170VMM_INT_DECL(bool) HMIsNestedPagingActive(PVMCC pVM);
[73389]171VMM_INT_DECL(bool) HMIsMsrBitmapActive(PVM pVM);
[79345]172# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
173VMM_INT_DECL(void) HMNotifyVmxNstGstVmexit(PVMCPU pVCpu);
174VMM_INT_DECL(void) HMNotifyVmxNstGstCurrentVmcsChanged(PVMCPU pVCpu);
[78220]175# endif
[74287]176/** @} */
177
178/** @name R0, R3 SVM handlers.
179 * @{ */
[87511]180VMM_INT_DECL(bool) HMIsSvmVGifActive(PCVMCC pVM);
[72746]181# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
[80281]182VMM_INT_DECL(void) HMNotifySvmNstGstVmexit(PVMCPUCC pVCpu, PCPUMCTX pCtx);
[72746]183# endif
[93725]184# if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
[76993]185VMM_INT_DECL(int) HMIsSubjectToSvmErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping);
[93725]186# endif
[80268]187VMM_INT_DECL(int) HMHCMaybeMovTprSvmHypercall(PVMCC pVM, PVMCPUCC pVCpu);
[74287]188/** @} */
189
[44373]190#else /* Nops in RC: */
[74287]191
192/** @name RC HM (VMX/SVM agnostic) handlers.
193 * @{ */
[76993]194# define HMFlushTlb(pVCpu) do { } while (0)
195# define HMFlushTlbOnAllVCpus(pVM) do { } while (0)
[73606]196# define HMInvalidatePageOnAllVCpus(pVM, GCVirt) do { } while (0)
197# define HMInvalidatePhysPage(pVM, GCVirt) do { } while (0)
198# define HMAreNestedPagingAndFullGuestExecEnabled(pVM) false
199# define HMIsLongModeAllowed(pVM) false
200# define HMIsNestedPagingActive(pVM) false
201# define HMIsMsrBitmapsActive(pVM) false
[74287]202/** @} */
203
204/** @name RC SVM handlers.
205 * @{ */
[76993]206# define HMIsSvmVGifActive(pVM) false
207# define HMNotifySvmNstGstVmexit(pVCpu, pCtx) do { } while (0)
[93725]208# if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
209# define HMIsSubjectToSvmErratum170(puFamily, puModel, puStepping) false
210# endif
[80268]211# define HMHCMaybeMovTprSvmHypercall(pVM, pVCpu) do { } while (0)
[74287]212/** @} */
213
[35361]214#endif
215
[93963]216/** @name HMVMX_READ_XXX - Flags for reading auxiliary VM-exit VMCS fields.
217 *
218 * These flags allow reading VMCS fields that are not necessarily part of the
219 * guest-CPU state but are needed while handling VM-exits.
220 *
221 * @note If you add any fields here, make sure to update VMXR0GetExitAuxInfo.
222 *
223 * @{
224 */
225#define HMVMX_READ_IDT_VECTORING_INFO RT_BIT_32(0)
226#define HMVMX_READ_IDT_VECTORING_ERROR_CODE RT_BIT_32(1)
227#define HMVMX_READ_EXIT_QUALIFICATION RT_BIT_32(2)
228#define HMVMX_READ_EXIT_INSTR_LEN RT_BIT_32(3)
229#define HMVMX_READ_EXIT_INTERRUPTION_INFO RT_BIT_32(4)
230#define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE RT_BIT_32(5)
231#define HMVMX_READ_EXIT_INSTR_INFO RT_BIT_32(6)
232#define HMVMX_READ_GUEST_LINEAR_ADDR RT_BIT_32(7)
233#define HMVMX_READ_GUEST_PHYSICAL_ADDR RT_BIT_32(8)
234#define HMVMX_READ_GUEST_PENDING_DBG_XCPTS RT_BIT_32(9)
235
236/** All the VMCS fields required for processing of exception/NMI VM-exits. */
237#define HMVMX_READ_XCPT_INFO ( HMVMX_READ_EXIT_INTERRUPTION_INFO \
238 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE \
239 | HMVMX_READ_EXIT_INSTR_LEN \
240 | HMVMX_READ_IDT_VECTORING_INFO \
241 | HMVMX_READ_IDT_VECTORING_ERROR_CODE)
242
243/** Mask of all valid HMVMX_READ_XXX flags. */
244#define HMVMX_READ_VALID_MASK ( HMVMX_READ_IDT_VECTORING_INFO \
245 | HMVMX_READ_IDT_VECTORING_ERROR_CODE \
246 | HMVMX_READ_EXIT_QUALIFICATION \
247 | HMVMX_READ_EXIT_INSTR_LEN \
248 | HMVMX_READ_EXIT_INTERRUPTION_INFO \
249 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE \
250 | HMVMX_READ_EXIT_INSTR_INFO \
251 | HMVMX_READ_GUEST_LINEAR_ADDR \
252 | HMVMX_READ_GUEST_PHYSICAL_ADDR \
253 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
254/** @} */
255
[93966]256#ifdef IN_RING0
257/** @defgroup grp_hm_r0 The HM ring-0 Context API
258 * @{
259 */
[93963]260/**
261 * HM VM-exit auxiliary info.
262 */
263typedef union
264{
265 /** VMX VM-exit auxiliary info. */
266 VMXEXITAUX Vmx;
267 /** SVM \#VMEXIT auxiliary info. */
268 SVMEXITAUX Svm;
269} HMEXITAUX;
270/** Pointer to HM-exit auxiliary info union. */
271typedef HMEXITAUX *PHMEXITAUX;
272/** Pointer to a const HM-exit auxiliary info union. */
273typedef const HMEXITAUX *PCHMEXITAUX;
274
[44373]275VMMR0_INT_DECL(int) HMR0Init(void);
276VMMR0_INT_DECL(int) HMR0Term(void);
[80281]277VMMR0_INT_DECL(int) HMR0InitVM(PVMCC pVM);
278VMMR0_INT_DECL(int) HMR0TermVM(PVMCC pVM);
279VMMR0_INT_DECL(int) HMR0EnableAllCpus(PVMCC pVM);
[56381]280# ifdef VBOX_WITH_RAW_MODE
[80281]281VMMR0_INT_DECL(int) HMR0EnterSwitcher(PVMCC pVM, VMMSWITCHER enmSwitcher, bool *pfVTxDisabled);
282VMMR0_INT_DECL(void) HMR0LeaveSwitcher(PVMCC pVM, bool fVTxDisabled);
[56381]283# endif
[35361]284
[80281]285VMMR0_INT_DECL(int) HMR0SetupVM(PVMCC pVM);
286VMMR0_INT_DECL(int) HMR0RunGuestCode(PVMCC pVM, PVMCPUCC pVCpu);
287VMMR0_INT_DECL(int) HMR0Enter(PVMCPUCC pVCpu);
288VMMR0_INT_DECL(int) HMR0LeaveCpu(PVMCPUCC pVCpu);
[53615]289VMMR0_INT_DECL(void) HMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser);
[80281]290VMMR0_INT_DECL(void) HMR0NotifyCpumUnloadedGuestFpuState(PVMCPUCC VCpu);
291VMMR0_INT_DECL(void) HMR0NotifyCpumModifiedHostCr0(PVMCPUCC VCpu);
[53615]292VMMR0_INT_DECL(bool) HMR0SuspendPending(void);
[80281]293VMMR0_INT_DECL(int) HMR0InvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCVirt);
294VMMR0_INT_DECL(int) HMR0ImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat);
[93963]295VMMR0_INT_DECL(int) HMR0GetExitAuxInfo(PVMCPUCC pVCpu, PHMEXITAUX pHmExitAux, uint32_t fWhat);
[35361]296/** @} */
297#endif /* IN_RING0 */
298
299
300#ifdef IN_RING3
[58110]301/** @defgroup grp_hm_r3 The HM ring-3 Context API
[35361]302 * @{
303 */
[44373]304VMMR3DECL(bool) HMR3IsEnabled(PUVM pUVM);
305VMMR3DECL(bool) HMR3IsNestedPagingActive(PUVM pUVM);
[87519]306VMMR3DECL(bool) HMR3AreVirtApicRegsEnabled(PUVM pUVM);
[60307]307VMMR3DECL(bool) HMR3IsPostedIntrsEnabled(PUVM pUVM);
[52419]308VMMR3DECL(bool) HMR3IsVpidActive(PUVM pUVM);
309VMMR3DECL(bool) HMR3IsUXActive(PUVM pUVM);
[47737]310VMMR3DECL(bool) HMR3IsSvmEnabled(PUVM pUVM);
311VMMR3DECL(bool) HMR3IsVmxEnabled(PUVM pUVM);
[35361]312
[44373]313VMMR3_INT_DECL(int) HMR3Init(PVM pVM);
314VMMR3_INT_DECL(int) HMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
315VMMR3_INT_DECL(void) HMR3Relocate(PVM pVM);
316VMMR3_INT_DECL(int) HMR3Term(PVM pVM);
317VMMR3_INT_DECL(void) HMR3Reset(PVM pVM);
318VMMR3_INT_DECL(void) HMR3ResetCpu(PVMCPU pVCpu);
319VMMR3_INT_DECL(void) HMR3CheckError(PVM pVM, int iStatusCode);
[58938]320VMMR3_INT_DECL(void) HMR3NotifyDebugEventChanged(PVM pVM);
321VMMR3_INT_DECL(void) HMR3NotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu);
[100140]322# if 0 /* evil */
[78254]323VMMR3_INT_DECL(bool) HMR3IsActive(PCVMCPU pVCpu);
[100140]324# endif
[44373]325VMMR3_INT_DECL(int) HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
326VMMR3_INT_DECL(int) HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
[72983]327VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu);
[78254]328VMMR3_INT_DECL(bool) HMR3IsRescheduleRequired(PVM pVM, PCCPUMCTX pCtx);
[44373]329VMMR3_INT_DECL(bool) HMR3IsVmxPreemptionTimerUsed(PVM pVM);
[35361]330/** @} */
331#endif /* IN_RING3 */
332
333/** @} */
334RT_C_DECLS_END
335
336
[76585]337#endif /* !VBOX_INCLUDED_vmm_hm_h */
[35361]338
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use