VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 96860

Last change on this file since 96860 was 96747, checked in by vboxsync, 20 months ago

VMM/Nested VMX: bugref:10092 Conditional compile fixes for VBOX_WITH_NESTED_HWVIRT_VMX_EPT. Comments, naming and conservative state saving for nested EPT violation/misconfig for now.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 492.5 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 96747 2022-09-15 16:58:38Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41
42/** Use the function table. */
43#define HMVMX_USE_FUNCTION_TABLE
44
45/** Determine which tagged-TLB flush handler to use. */
46#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
47#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
48#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
49#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
50
51/** Assert that all the given fields have been read from the VMCS. */
52#ifdef VBOX_STRICT
53# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
54 do { \
55 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
56 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
57 } while (0)
58#else
59# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
60#endif
61
62/**
63 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
64 * guest using hardware-assisted VMX.
65 *
66 * This excludes state like GPRs (other than RSP) which are always are
67 * swapped and restored across the world-switch and also registers like EFER,
68 * MSR which cannot be modified by the guest without causing a VM-exit.
69 */
70#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
71 | CPUMCTX_EXTRN_RFLAGS \
72 | CPUMCTX_EXTRN_RSP \
73 | CPUMCTX_EXTRN_SREG_MASK \
74 | CPUMCTX_EXTRN_TABLE_MASK \
75 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
76 | CPUMCTX_EXTRN_SYSCALL_MSRS \
77 | CPUMCTX_EXTRN_SYSENTER_MSRS \
78 | CPUMCTX_EXTRN_TSC_AUX \
79 | CPUMCTX_EXTRN_OTHER_MSRS \
80 | CPUMCTX_EXTRN_CR0 \
81 | CPUMCTX_EXTRN_CR3 \
82 | CPUMCTX_EXTRN_CR4 \
83 | CPUMCTX_EXTRN_DR7 \
84 | CPUMCTX_EXTRN_HWVIRT \
85 | CPUMCTX_EXTRN_INHIBIT_INT \
86 | CPUMCTX_EXTRN_INHIBIT_NMI)
87
88/**
89 * Exception bitmap mask for real-mode guests (real-on-v86).
90 *
91 * We need to intercept all exceptions manually except:
92 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
93 * due to bugs in Intel CPUs.
94 * - \#PF need not be intercepted even in real-mode if we have nested paging
95 * support.
96 */
97#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
98 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
99 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
100 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
101 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
102 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
103 | RT_BIT(X86_XCPT_XF))
104
105/** Maximum VM-instruction error number. */
106#define HMVMX_INSTR_ERROR_MAX 28
107
108/** Profiling macro. */
109#ifdef HM_PROFILE_EXIT_DISPATCH
110# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
111# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
112#else
113# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
114# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
115#endif
116
117#ifndef IN_NEM_DARWIN
118/** Assert that preemption is disabled or covered by thread-context hooks. */
119# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
120 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
121
122/** Assert that we haven't migrated CPUs when thread-context hooks are not
123 * used. */
124# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
125 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
126 ("Illegal migration! Entered on CPU %u Current %u\n", \
127 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
128#else
129# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
130# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
131#endif
132
133/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
134 * context. */
135#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
136 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
137 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
138
139/** Log the VM-exit reason with an easily visible marker to identify it in a
140 * potential sea of logging data. */
141#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
142 do { \
143 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
144 HMGetVmxExitName(a_uExitReason))); \
145 } while (0) \
146
147
148/*********************************************************************************************************************************
149* Structures and Typedefs *
150*********************************************************************************************************************************/
151/**
152 * Memory operand read or write access.
153 */
154typedef enum VMXMEMACCESS
155{
156 VMXMEMACCESS_READ = 0,
157 VMXMEMACCESS_WRITE = 1
158} VMXMEMACCESS;
159
160
161/**
162 * VMX VM-exit handler.
163 *
164 * @returns Strict VBox status code (i.e. informational status codes too).
165 * @param pVCpu The cross context virtual CPU structure.
166 * @param pVmxTransient The VMX-transient structure.
167 */
168#ifndef HMVMX_USE_FUNCTION_TABLE
169typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
170#else
171typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
172/** Pointer to VM-exit handler. */
173typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
174#endif
175
176/**
177 * VMX VM-exit handler, non-strict status code.
178 *
179 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
180 *
181 * @returns VBox status code, no informational status code returned.
182 * @param pVCpu The cross context virtual CPU structure.
183 * @param pVmxTransient The VMX-transient structure.
184 *
185 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
186 * use of that status code will be replaced with VINF_EM_SOMETHING
187 * later when switching over to IEM.
188 */
189#ifndef HMVMX_USE_FUNCTION_TABLE
190typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
191#else
192typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
193#endif
194
195
196/*********************************************************************************************************************************
197* Internal Functions *
198*********************************************************************************************************************************/
199#ifndef HMVMX_USE_FUNCTION_TABLE
200DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
201# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
202# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
203#else
204# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
205# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
206#endif
207#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
208DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
209#endif
210
211static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
212
213/** @name VM-exit handler prototypes.
214 * @{
215 */
216static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
217static FNVMXEXITHANDLER vmxHCExitExtInt;
218static FNVMXEXITHANDLER vmxHCExitTripleFault;
219static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
220static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
221static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
222static FNVMXEXITHANDLER vmxHCExitCpuid;
223static FNVMXEXITHANDLER vmxHCExitGetsec;
224static FNVMXEXITHANDLER vmxHCExitHlt;
225static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
226static FNVMXEXITHANDLER vmxHCExitInvlpg;
227static FNVMXEXITHANDLER vmxHCExitRdpmc;
228static FNVMXEXITHANDLER vmxHCExitVmcall;
229#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
230static FNVMXEXITHANDLER vmxHCExitVmclear;
231static FNVMXEXITHANDLER vmxHCExitVmlaunch;
232static FNVMXEXITHANDLER vmxHCExitVmptrld;
233static FNVMXEXITHANDLER vmxHCExitVmptrst;
234static FNVMXEXITHANDLER vmxHCExitVmread;
235static FNVMXEXITHANDLER vmxHCExitVmresume;
236static FNVMXEXITHANDLER vmxHCExitVmwrite;
237static FNVMXEXITHANDLER vmxHCExitVmxoff;
238static FNVMXEXITHANDLER vmxHCExitVmxon;
239static FNVMXEXITHANDLER vmxHCExitInvvpid;
240# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
241static FNVMXEXITHANDLER vmxHCExitInvept;
242# endif
243#endif
244static FNVMXEXITHANDLER vmxHCExitRdtsc;
245static FNVMXEXITHANDLER vmxHCExitMovCRx;
246static FNVMXEXITHANDLER vmxHCExitMovDRx;
247static FNVMXEXITHANDLER vmxHCExitIoInstr;
248static FNVMXEXITHANDLER vmxHCExitRdmsr;
249static FNVMXEXITHANDLER vmxHCExitWrmsr;
250static FNVMXEXITHANDLER vmxHCExitMwait;
251static FNVMXEXITHANDLER vmxHCExitMtf;
252static FNVMXEXITHANDLER vmxHCExitMonitor;
253static FNVMXEXITHANDLER vmxHCExitPause;
254static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
255static FNVMXEXITHANDLER vmxHCExitApicAccess;
256static FNVMXEXITHANDLER vmxHCExitEptViolation;
257static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
258static FNVMXEXITHANDLER vmxHCExitRdtscp;
259static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
260static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
261static FNVMXEXITHANDLER vmxHCExitXsetbv;
262static FNVMXEXITHANDLER vmxHCExitInvpcid;
263#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
264static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
265#endif
266static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
267static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
268/** @} */
269
270#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
271/** @name Nested-guest VM-exit handler prototypes.
272 * @{
273 */
274static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
275static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
276static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
277static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
278static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
279static FNVMXEXITHANDLER vmxHCExitHltNested;
280static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
281static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
282static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
283static FNVMXEXITHANDLER vmxHCExitRdtscNested;
284static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
285static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
286static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
287static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
288static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
289static FNVMXEXITHANDLER vmxHCExitMwaitNested;
290static FNVMXEXITHANDLER vmxHCExitMtfNested;
291static FNVMXEXITHANDLER vmxHCExitMonitorNested;
292static FNVMXEXITHANDLER vmxHCExitPauseNested;
293static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
294static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
295static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
296static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
297static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
298static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
299static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
301static FNVMXEXITHANDLER vmxHCExitInstrNested;
302static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
303# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
304static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
305static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
306# endif
307/** @} */
308#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
309
310
311/*********************************************************************************************************************************
312* Global Variables *
313*********************************************************************************************************************************/
314#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
315/**
316 * Array of all VMCS fields.
317 * Any fields added to the VT-x spec. should be added here.
318 *
319 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
320 * of nested-guests.
321 */
322static const uint32_t g_aVmcsFields[] =
323{
324 /* 16-bit control fields. */
325 VMX_VMCS16_VPID,
326 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
327 VMX_VMCS16_EPTP_INDEX,
328
329 /* 16-bit guest-state fields. */
330 VMX_VMCS16_GUEST_ES_SEL,
331 VMX_VMCS16_GUEST_CS_SEL,
332 VMX_VMCS16_GUEST_SS_SEL,
333 VMX_VMCS16_GUEST_DS_SEL,
334 VMX_VMCS16_GUEST_FS_SEL,
335 VMX_VMCS16_GUEST_GS_SEL,
336 VMX_VMCS16_GUEST_LDTR_SEL,
337 VMX_VMCS16_GUEST_TR_SEL,
338 VMX_VMCS16_GUEST_INTR_STATUS,
339 VMX_VMCS16_GUEST_PML_INDEX,
340
341 /* 16-bits host-state fields. */
342 VMX_VMCS16_HOST_ES_SEL,
343 VMX_VMCS16_HOST_CS_SEL,
344 VMX_VMCS16_HOST_SS_SEL,
345 VMX_VMCS16_HOST_DS_SEL,
346 VMX_VMCS16_HOST_FS_SEL,
347 VMX_VMCS16_HOST_GS_SEL,
348 VMX_VMCS16_HOST_TR_SEL,
349
350 /* 64-bit control fields. */
351 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
352 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
353 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
354 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
355 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
356 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
357 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
358 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
359 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
360 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
361 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
362 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
363 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
364 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
365 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
366 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
367 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
368 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
369 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
370 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
371 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
372 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
373 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
374 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
375 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
376 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
377 VMX_VMCS64_CTRL_EPTP_FULL,
378 VMX_VMCS64_CTRL_EPTP_HIGH,
379 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
380 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
381 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
382 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
383 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
384 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
385 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
386 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
387 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
388 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
389 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
390 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
391 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
392 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
393 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
394 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
395 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
396 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
397 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
398 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
399 VMX_VMCS64_CTRL_SPPTP_FULL,
400 VMX_VMCS64_CTRL_SPPTP_HIGH,
401 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
402 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
403 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
404 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
405 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
406 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
407
408 /* 64-bit read-only data fields. */
409 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
410 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
411
412 /* 64-bit guest-state fields. */
413 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
414 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
415 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
416 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
417 VMX_VMCS64_GUEST_PAT_FULL,
418 VMX_VMCS64_GUEST_PAT_HIGH,
419 VMX_VMCS64_GUEST_EFER_FULL,
420 VMX_VMCS64_GUEST_EFER_HIGH,
421 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
422 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
423 VMX_VMCS64_GUEST_PDPTE0_FULL,
424 VMX_VMCS64_GUEST_PDPTE0_HIGH,
425 VMX_VMCS64_GUEST_PDPTE1_FULL,
426 VMX_VMCS64_GUEST_PDPTE1_HIGH,
427 VMX_VMCS64_GUEST_PDPTE2_FULL,
428 VMX_VMCS64_GUEST_PDPTE2_HIGH,
429 VMX_VMCS64_GUEST_PDPTE3_FULL,
430 VMX_VMCS64_GUEST_PDPTE3_HIGH,
431 VMX_VMCS64_GUEST_BNDCFGS_FULL,
432 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
433 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
434 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
435 VMX_VMCS64_GUEST_PKRS_FULL,
436 VMX_VMCS64_GUEST_PKRS_HIGH,
437
438 /* 64-bit host-state fields. */
439 VMX_VMCS64_HOST_PAT_FULL,
440 VMX_VMCS64_HOST_PAT_HIGH,
441 VMX_VMCS64_HOST_EFER_FULL,
442 VMX_VMCS64_HOST_EFER_HIGH,
443 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
444 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
445 VMX_VMCS64_HOST_PKRS_FULL,
446 VMX_VMCS64_HOST_PKRS_HIGH,
447
448 /* 32-bit control fields. */
449 VMX_VMCS32_CTRL_PIN_EXEC,
450 VMX_VMCS32_CTRL_PROC_EXEC,
451 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
452 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
453 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
454 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
455 VMX_VMCS32_CTRL_EXIT,
456 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
457 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
458 VMX_VMCS32_CTRL_ENTRY,
459 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
460 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
461 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
462 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
463 VMX_VMCS32_CTRL_TPR_THRESHOLD,
464 VMX_VMCS32_CTRL_PROC_EXEC2,
465 VMX_VMCS32_CTRL_PLE_GAP,
466 VMX_VMCS32_CTRL_PLE_WINDOW,
467
468 /* 32-bits read-only fields. */
469 VMX_VMCS32_RO_VM_INSTR_ERROR,
470 VMX_VMCS32_RO_EXIT_REASON,
471 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
472 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
473 VMX_VMCS32_RO_IDT_VECTORING_INFO,
474 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
475 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
476 VMX_VMCS32_RO_EXIT_INSTR_INFO,
477
478 /* 32-bit guest-state fields. */
479 VMX_VMCS32_GUEST_ES_LIMIT,
480 VMX_VMCS32_GUEST_CS_LIMIT,
481 VMX_VMCS32_GUEST_SS_LIMIT,
482 VMX_VMCS32_GUEST_DS_LIMIT,
483 VMX_VMCS32_GUEST_FS_LIMIT,
484 VMX_VMCS32_GUEST_GS_LIMIT,
485 VMX_VMCS32_GUEST_LDTR_LIMIT,
486 VMX_VMCS32_GUEST_TR_LIMIT,
487 VMX_VMCS32_GUEST_GDTR_LIMIT,
488 VMX_VMCS32_GUEST_IDTR_LIMIT,
489 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
490 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
491 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
492 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
493 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
494 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
495 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
496 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
497 VMX_VMCS32_GUEST_INT_STATE,
498 VMX_VMCS32_GUEST_ACTIVITY_STATE,
499 VMX_VMCS32_GUEST_SMBASE,
500 VMX_VMCS32_GUEST_SYSENTER_CS,
501 VMX_VMCS32_PREEMPT_TIMER_VALUE,
502
503 /* 32-bit host-state fields. */
504 VMX_VMCS32_HOST_SYSENTER_CS,
505
506 /* Natural-width control fields. */
507 VMX_VMCS_CTRL_CR0_MASK,
508 VMX_VMCS_CTRL_CR4_MASK,
509 VMX_VMCS_CTRL_CR0_READ_SHADOW,
510 VMX_VMCS_CTRL_CR4_READ_SHADOW,
511 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
512 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
513 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
514 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
515
516 /* Natural-width read-only data fields. */
517 VMX_VMCS_RO_EXIT_QUALIFICATION,
518 VMX_VMCS_RO_IO_RCX,
519 VMX_VMCS_RO_IO_RSI,
520 VMX_VMCS_RO_IO_RDI,
521 VMX_VMCS_RO_IO_RIP,
522 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
523
524 /* Natural-width guest-state field */
525 VMX_VMCS_GUEST_CR0,
526 VMX_VMCS_GUEST_CR3,
527 VMX_VMCS_GUEST_CR4,
528 VMX_VMCS_GUEST_ES_BASE,
529 VMX_VMCS_GUEST_CS_BASE,
530 VMX_VMCS_GUEST_SS_BASE,
531 VMX_VMCS_GUEST_DS_BASE,
532 VMX_VMCS_GUEST_FS_BASE,
533 VMX_VMCS_GUEST_GS_BASE,
534 VMX_VMCS_GUEST_LDTR_BASE,
535 VMX_VMCS_GUEST_TR_BASE,
536 VMX_VMCS_GUEST_GDTR_BASE,
537 VMX_VMCS_GUEST_IDTR_BASE,
538 VMX_VMCS_GUEST_DR7,
539 VMX_VMCS_GUEST_RSP,
540 VMX_VMCS_GUEST_RIP,
541 VMX_VMCS_GUEST_RFLAGS,
542 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
543 VMX_VMCS_GUEST_SYSENTER_ESP,
544 VMX_VMCS_GUEST_SYSENTER_EIP,
545 VMX_VMCS_GUEST_S_CET,
546 VMX_VMCS_GUEST_SSP,
547 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
548
549 /* Natural-width host-state fields */
550 VMX_VMCS_HOST_CR0,
551 VMX_VMCS_HOST_CR3,
552 VMX_VMCS_HOST_CR4,
553 VMX_VMCS_HOST_FS_BASE,
554 VMX_VMCS_HOST_GS_BASE,
555 VMX_VMCS_HOST_TR_BASE,
556 VMX_VMCS_HOST_GDTR_BASE,
557 VMX_VMCS_HOST_IDTR_BASE,
558 VMX_VMCS_HOST_SYSENTER_ESP,
559 VMX_VMCS_HOST_SYSENTER_EIP,
560 VMX_VMCS_HOST_RSP,
561 VMX_VMCS_HOST_RIP,
562 VMX_VMCS_HOST_S_CET,
563 VMX_VMCS_HOST_SSP,
564 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
565};
566#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
567
568#ifdef VBOX_STRICT
569static const uint32_t g_aVmcsSegBase[] =
570{
571 VMX_VMCS_GUEST_ES_BASE,
572 VMX_VMCS_GUEST_CS_BASE,
573 VMX_VMCS_GUEST_SS_BASE,
574 VMX_VMCS_GUEST_DS_BASE,
575 VMX_VMCS_GUEST_FS_BASE,
576 VMX_VMCS_GUEST_GS_BASE
577};
578static const uint32_t g_aVmcsSegSel[] =
579{
580 VMX_VMCS16_GUEST_ES_SEL,
581 VMX_VMCS16_GUEST_CS_SEL,
582 VMX_VMCS16_GUEST_SS_SEL,
583 VMX_VMCS16_GUEST_DS_SEL,
584 VMX_VMCS16_GUEST_FS_SEL,
585 VMX_VMCS16_GUEST_GS_SEL
586};
587static const uint32_t g_aVmcsSegLimit[] =
588{
589 VMX_VMCS32_GUEST_ES_LIMIT,
590 VMX_VMCS32_GUEST_CS_LIMIT,
591 VMX_VMCS32_GUEST_SS_LIMIT,
592 VMX_VMCS32_GUEST_DS_LIMIT,
593 VMX_VMCS32_GUEST_FS_LIMIT,
594 VMX_VMCS32_GUEST_GS_LIMIT
595};
596static const uint32_t g_aVmcsSegAttr[] =
597{
598 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
599 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
600 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
601 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
602 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
603 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
604};
605AssertCompile(RT_ELEMENTS(g_aVmcsSegSel) == X86_SREG_COUNT);
606AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
607AssertCompile(RT_ELEMENTS(g_aVmcsSegBase) == X86_SREG_COUNT);
608AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr) == X86_SREG_COUNT);
609#endif /* VBOX_STRICT */
610
611#ifdef HMVMX_USE_FUNCTION_TABLE
612/**
613 * VMX_EXIT dispatch table.
614 */
615static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
616{
617 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
618 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
619 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
620 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
621 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
622 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
623 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
624 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
625 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
626 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
627 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
628 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
629 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
630 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
631 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
632 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
633 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
634 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
635 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
636#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
637 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
638 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
639 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
640 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
641 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
642 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
643 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
644 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
645 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
646#else
647 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
648 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
649 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
650 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
651 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
652 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
653 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
654 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
655 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
656#endif
657 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
658 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
659 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
660 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
661 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
662 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
663 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
664 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
665 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
666 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
667 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
668 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
669 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
670 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
671 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
672 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
673 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
674 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
675 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
676 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
677 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
678 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
679#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
680 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
681#else
682 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
683#endif
684 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
685 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
686#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
687 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
688#else
689 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
690#endif
691 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
692 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
693 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
694 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
695 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
696 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
697 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
698 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
699 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
700 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
701 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
702 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
703 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
704 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
705 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
706 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
707};
708#endif /* HMVMX_USE_FUNCTION_TABLE */
709
710#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
711static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
712{
713 /* 0 */ "(Not Used)",
714 /* 1 */ "VMCALL executed in VMX root operation.",
715 /* 2 */ "VMCLEAR with invalid physical address.",
716 /* 3 */ "VMCLEAR with VMXON pointer.",
717 /* 4 */ "VMLAUNCH with non-clear VMCS.",
718 /* 5 */ "VMRESUME with non-launched VMCS.",
719 /* 6 */ "VMRESUME after VMXOFF",
720 /* 7 */ "VM-entry with invalid control fields.",
721 /* 8 */ "VM-entry with invalid host state fields.",
722 /* 9 */ "VMPTRLD with invalid physical address.",
723 /* 10 */ "VMPTRLD with VMXON pointer.",
724 /* 11 */ "VMPTRLD with incorrect revision identifier.",
725 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
726 /* 13 */ "VMWRITE to read-only VMCS component.",
727 /* 14 */ "(Not Used)",
728 /* 15 */ "VMXON executed in VMX root operation.",
729 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
730 /* 17 */ "VM-entry with non-launched executing VMCS.",
731 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
732 /* 19 */ "VMCALL with non-clear VMCS.",
733 /* 20 */ "VMCALL with invalid VM-exit control fields.",
734 /* 21 */ "(Not Used)",
735 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
736 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
737 /* 24 */ "VMCALL with invalid SMM-monitor features.",
738 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
739 /* 26 */ "VM-entry with events blocked by MOV SS.",
740 /* 27 */ "(Not Used)",
741 /* 28 */ "Invalid operand to INVEPT/INVVPID."
742};
743#endif /* VBOX_STRICT && LOG_ENABLED */
744
745
746/**
747 * Gets the CR0 guest/host mask.
748 *
749 * These bits typically does not change through the lifetime of a VM. Any bit set in
750 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
751 * by the guest.
752 *
753 * @returns The CR0 guest/host mask.
754 * @param pVCpu The cross context virtual CPU structure.
755 */
756static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
757{
758 /*
759 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
760 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
761 *
762 * Furthermore, modifications to any bits that are reserved/unspecified currently
763 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
764 * when future CPUs specify and use currently reserved/unspecified bits.
765 */
766 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
767 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
768 * and @bugref{6944}. */
769 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
770 return ( X86_CR0_PE
771 | X86_CR0_NE
772 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
773 | X86_CR0_PG
774 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
775}
776
777
778/**
779 * Gets the CR4 guest/host mask.
780 *
781 * These bits typically does not change through the lifetime of a VM. Any bit set in
782 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
783 * by the guest.
784 *
785 * @returns The CR4 guest/host mask.
786 * @param pVCpu The cross context virtual CPU structure.
787 */
788static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
789{
790 /*
791 * We construct a mask of all CR4 bits that the guest can modify without causing
792 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
793 * a VM-exit when the guest attempts to modify them when executing using
794 * hardware-assisted VMX.
795 *
796 * When a feature is not exposed to the guest (and may be present on the host),
797 * we want to intercept guest modifications to the bit so we can emulate proper
798 * behavior (e.g., #GP).
799 *
800 * Furthermore, only modifications to those bits that don't require immediate
801 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
802 * depends on CR3 which might not always be the guest value while executing
803 * using hardware-assisted VMX.
804 */
805 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
806 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
807#ifdef IN_NEM_DARWIN
808 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
809#endif
810 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
811
812 /*
813 * Paranoia.
814 * Ensure features exposed to the guest are present on the host.
815 */
816 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
817#ifdef IN_NEM_DARWIN
818 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
819#endif
820 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
821
822 uint64_t const fGstMask = X86_CR4_PVI
823 | X86_CR4_TSD
824 | X86_CR4_DE
825 | X86_CR4_MCE
826 | X86_CR4_PCE
827 | X86_CR4_OSXMMEEXCPT
828 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
829#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
830 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
831 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
832#endif
833 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
834 return ~fGstMask;
835}
836
837
838/**
839 * Adds one or more exceptions to the exception bitmap and commits it to the current
840 * VMCS.
841 *
842 * @param pVCpu The cross context virtual CPU structure.
843 * @param pVmxTransient The VMX-transient structure.
844 * @param uXcptMask The exception(s) to add.
845 */
846static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
847{
848 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
849 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
850 if ((uXcptBitmap & uXcptMask) != uXcptMask)
851 {
852 uXcptBitmap |= uXcptMask;
853 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
854 AssertRC(rc);
855 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
856 }
857}
858
859
860/**
861 * Adds an exception to the exception bitmap and commits it to the current VMCS.
862 *
863 * @param pVCpu The cross context virtual CPU structure.
864 * @param pVmxTransient The VMX-transient structure.
865 * @param uXcpt The exception to add.
866 */
867static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
868{
869 Assert(uXcpt <= X86_XCPT_LAST);
870 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
871}
872
873
874/**
875 * Remove one or more exceptions from the exception bitmap and commits it to the
876 * current VMCS.
877 *
878 * This takes care of not removing the exception intercept if a nested-guest
879 * requires the exception to be intercepted.
880 *
881 * @returns VBox status code.
882 * @param pVCpu The cross context virtual CPU structure.
883 * @param pVmxTransient The VMX-transient structure.
884 * @param uXcptMask The exception(s) to remove.
885 */
886static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
887{
888 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
889 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
890 if (u32XcptBitmap & uXcptMask)
891 {
892#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
893 if (!pVmxTransient->fIsNestedGuest)
894 { /* likely */ }
895 else
896 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
897#endif
898#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
899 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
900 | RT_BIT(X86_XCPT_DE)
901 | RT_BIT(X86_XCPT_NM)
902 | RT_BIT(X86_XCPT_TS)
903 | RT_BIT(X86_XCPT_UD)
904 | RT_BIT(X86_XCPT_NP)
905 | RT_BIT(X86_XCPT_SS)
906 | RT_BIT(X86_XCPT_GP)
907 | RT_BIT(X86_XCPT_PF)
908 | RT_BIT(X86_XCPT_MF));
909#elif defined(HMVMX_ALWAYS_TRAP_PF)
910 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
911#endif
912 if (uXcptMask)
913 {
914 /* Validate we are not removing any essential exception intercepts. */
915#ifndef IN_NEM_DARWIN
916 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
917#else
918 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
919#endif
920 NOREF(pVCpu);
921 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
922 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
923
924 /* Remove it from the exception bitmap. */
925 u32XcptBitmap &= ~uXcptMask;
926
927 /* Commit and update the cache if necessary. */
928 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
929 {
930 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
931 AssertRC(rc);
932 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
933 }
934 }
935 }
936 return VINF_SUCCESS;
937}
938
939
940/**
941 * Remove an exceptions from the exception bitmap and commits it to the current
942 * VMCS.
943 *
944 * @returns VBox status code.
945 * @param pVCpu The cross context virtual CPU structure.
946 * @param pVmxTransient The VMX-transient structure.
947 * @param uXcpt The exception to remove.
948 */
949static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
950{
951 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
952}
953
954
955#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
956/**
957 * Loads the shadow VMCS specified by the VMCS info. object.
958 *
959 * @returns VBox status code.
960 * @param pVmcsInfo The VMCS info. object.
961 *
962 * @remarks Can be called with interrupts disabled.
963 */
964static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
965{
966 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
967 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
968
969 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
970 if (RT_SUCCESS(rc))
971 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
972 return rc;
973}
974
975
976/**
977 * Clears the shadow VMCS specified by the VMCS info. object.
978 *
979 * @returns VBox status code.
980 * @param pVmcsInfo The VMCS info. object.
981 *
982 * @remarks Can be called with interrupts disabled.
983 */
984static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
985{
986 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
987 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
988
989 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
990 if (RT_SUCCESS(rc))
991 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
992 return rc;
993}
994
995
996/**
997 * Switches from and to the specified VMCSes.
998 *
999 * @returns VBox status code.
1000 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
1001 * @param pVmcsInfoTo The VMCS info. object we are switching to.
1002 *
1003 * @remarks Called with interrupts disabled.
1004 */
1005static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
1006{
1007 /*
1008 * Clear the VMCS we are switching out if it has not already been cleared.
1009 * This will sync any CPU internal data back to the VMCS.
1010 */
1011 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1012 {
1013 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
1014 if (RT_SUCCESS(rc))
1015 {
1016 /*
1017 * The shadow VMCS, if any, would not be active at this point since we
1018 * would have cleared it while importing the virtual hardware-virtualization
1019 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
1020 * clear the shadow VMCS here, just assert for safety.
1021 */
1022 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
1023 }
1024 else
1025 return rc;
1026 }
1027
1028 /*
1029 * Clear the VMCS we are switching to if it has not already been cleared.
1030 * This will initialize the VMCS launch state to "clear" required for loading it.
1031 *
1032 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1033 */
1034 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1035 {
1036 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1037 if (RT_SUCCESS(rc))
1038 { /* likely */ }
1039 else
1040 return rc;
1041 }
1042
1043 /*
1044 * Finally, load the VMCS we are switching to.
1045 */
1046 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1047}
1048
1049
1050/**
1051 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1052 * caller.
1053 *
1054 * @returns VBox status code.
1055 * @param pVCpu The cross context virtual CPU structure.
1056 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1057 * true) or guest VMCS (pass false).
1058 */
1059static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1060{
1061 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1062 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1063
1064 PVMXVMCSINFO pVmcsInfoFrom;
1065 PVMXVMCSINFO pVmcsInfoTo;
1066 if (fSwitchToNstGstVmcs)
1067 {
1068 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1069 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1070 }
1071 else
1072 {
1073 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1074 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1075 }
1076
1077 /*
1078 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1079 * preemption hook code path acquires the current VMCS.
1080 */
1081 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1082
1083 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1084 if (RT_SUCCESS(rc))
1085 {
1086 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1087 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1088
1089 /*
1090 * If we are switching to a VMCS that was executed on a different host CPU or was
1091 * never executed before, flag that we need to export the host state before executing
1092 * guest/nested-guest code using hardware-assisted VMX.
1093 *
1094 * This could probably be done in a preemptible context since the preemption hook
1095 * will flag the necessary change in host context. However, since preemption is
1096 * already disabled and to avoid making assumptions about host specific code in
1097 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1098 * disabled.
1099 */
1100 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1101 { /* likely */ }
1102 else
1103 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1104
1105 ASMSetFlags(fEFlags);
1106
1107 /*
1108 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1109 * flag that we need to update the host MSR values there. Even if we decide in the
1110 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1111 * if its content differs, we would have to update the host MSRs anyway.
1112 */
1113 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1114 }
1115 else
1116 ASMSetFlags(fEFlags);
1117 return rc;
1118}
1119#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1120
1121
1122#ifdef VBOX_STRICT
1123/**
1124 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1125 * transient structure.
1126 *
1127 * @param pVCpu The cross context virtual CPU structure.
1128 * @param pVmxTransient The VMX-transient structure.
1129 */
1130DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1131{
1132 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1133 AssertRC(rc);
1134}
1135
1136
1137/**
1138 * Reads the VM-entry exception error code field from the VMCS into
1139 * the VMX transient structure.
1140 *
1141 * @param pVCpu The cross context virtual CPU structure.
1142 * @param pVmxTransient The VMX-transient structure.
1143 */
1144DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1145{
1146 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1147 AssertRC(rc);
1148}
1149
1150
1151/**
1152 * Reads the VM-entry exception error code field from the VMCS into
1153 * the VMX transient structure.
1154 *
1155 * @param pVCpu The cross context virtual CPU structure.
1156 * @param pVmxTransient The VMX-transient structure.
1157 */
1158DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1159{
1160 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1161 AssertRC(rc);
1162}
1163#endif /* VBOX_STRICT */
1164
1165
1166/**
1167 * Reads the VM-exit interruption-information field from the VMCS into the VMX
1168 * transient structure.
1169 *
1170 * @param pVCpu The cross context virtual CPU structure.
1171 * @param pVmxTransient The VMX-transient structure.
1172 */
1173DECLINLINE(void) vmxHCReadExitIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1174{
1175 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1176 {
1177 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1178 AssertRC(rc);
1179 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
1180 }
1181}
1182
1183
1184/**
1185 * Reads the VM-exit interruption error code from the VMCS into the VMX
1186 * transient structure.
1187 *
1188 * @param pVCpu The cross context virtual CPU structure.
1189 * @param pVmxTransient The VMX-transient structure.
1190 */
1191DECLINLINE(void) vmxHCReadExitIntErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1192{
1193 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1194 {
1195 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1196 AssertRC(rc);
1197 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
1198 }
1199}
1200
1201
1202/**
1203 * Reads the VM-exit instruction length field from the VMCS into the VMX
1204 * transient structure.
1205 *
1206 * @param pVCpu The cross context virtual CPU structure.
1207 * @param pVmxTransient The VMX-transient structure.
1208 */
1209DECLINLINE(void) vmxHCReadExitInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1210{
1211 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1212 {
1213 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1214 AssertRC(rc);
1215 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
1216 }
1217}
1218
1219
1220/**
1221 * Reads the VM-exit instruction-information field from the VMCS into
1222 * the VMX transient structure.
1223 *
1224 * @param pVCpu The cross context virtual CPU structure.
1225 * @param pVmxTransient The VMX-transient structure.
1226 */
1227DECLINLINE(void) vmxHCReadExitInstrInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1228{
1229 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1230 {
1231 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1232 AssertRC(rc);
1233 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
1234 }
1235}
1236
1237
1238/**
1239 * Reads the Exit Qualification from the VMCS into the VMX transient structure.
1240 *
1241 * @param pVCpu The cross context virtual CPU structure.
1242 * @param pVmxTransient The VMX-transient structure.
1243 */
1244DECLINLINE(void) vmxHCReadExitQualVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1245{
1246 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1247 {
1248 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1249 AssertRC(rc);
1250 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
1251 }
1252}
1253
1254
1255/**
1256 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
1257 *
1258 * @param pVCpu The cross context virtual CPU structure.
1259 * @param pVmxTransient The VMX-transient structure.
1260 */
1261DECLINLINE(void) vmxHCReadGuestLinearAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1262{
1263 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1264 {
1265 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1266 AssertRC(rc);
1267 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
1268 }
1269}
1270
1271
1272/**
1273 * Reads the Guest-physical address from the VMCS into the VMX transient structure.
1274 *
1275 * @param pVCpu The cross context virtual CPU structure.
1276 * @param pVmxTransient The VMX-transient structure.
1277 */
1278DECLINLINE(void) vmxHCReadGuestPhysicalAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1279{
1280 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1281 {
1282 int rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1283 AssertRC(rc);
1284 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PHYSICAL_ADDR;
1285 }
1286}
1287
1288#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1289/**
1290 * Reads the Guest pending-debug exceptions from the VMCS into the VMX transient
1291 * structure.
1292 *
1293 * @param pVCpu The cross context virtual CPU structure.
1294 * @param pVmxTransient The VMX-transient structure.
1295 */
1296DECLINLINE(void) vmxHCReadGuestPendingDbgXctps(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1297{
1298 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1299 {
1300 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1301 AssertRC(rc);
1302 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PENDING_DBG_XCPTS;
1303 }
1304}
1305#endif
1306
1307/**
1308 * Reads the IDT-vectoring information field from the VMCS into the VMX
1309 * transient structure.
1310 *
1311 * @param pVCpu The cross context virtual CPU structure.
1312 * @param pVmxTransient The VMX-transient structure.
1313 *
1314 * @remarks No-long-jump zone!!!
1315 */
1316DECLINLINE(void) vmxHCReadIdtVectoringInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1317{
1318 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1319 {
1320 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1321 AssertRC(rc);
1322 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
1323 }
1324}
1325
1326
1327/**
1328 * Reads the IDT-vectoring error code from the VMCS into the VMX
1329 * transient structure.
1330 *
1331 * @param pVCpu The cross context virtual CPU structure.
1332 * @param pVmxTransient The VMX-transient structure.
1333 */
1334DECLINLINE(void) vmxHCReadIdtVectoringErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1335{
1336 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1337 {
1338 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1339 AssertRC(rc);
1340 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
1341 }
1342}
1343
1344#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1345/**
1346 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1347 *
1348 * @param pVCpu The cross context virtual CPU structure.
1349 * @param pVmxTransient The VMX-transient structure.
1350 */
1351static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1352{
1353 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1354 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1355 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1356 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1357 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1358 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1359 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1360 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1361 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1362 AssertRC(rc);
1363 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1364 | HMVMX_READ_EXIT_INSTR_LEN
1365 | HMVMX_READ_EXIT_INSTR_INFO
1366 | HMVMX_READ_IDT_VECTORING_INFO
1367 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1368 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1369 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1370 | HMVMX_READ_GUEST_LINEAR_ADDR
1371 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1372}
1373#endif
1374
1375/**
1376 * Verifies that our cached values of the VMCS fields are all consistent with
1377 * what's actually present in the VMCS.
1378 *
1379 * @returns VBox status code.
1380 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1381 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1382 * VMCS content. HMCPU error-field is
1383 * updated, see VMX_VCI_XXX.
1384 * @param pVCpu The cross context virtual CPU structure.
1385 * @param pVmcsInfo The VMCS info. object.
1386 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1387 */
1388static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1389{
1390 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1391
1392 uint32_t u32Val;
1393 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1394 AssertRC(rc);
1395 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1396 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1397 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1398 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1399
1400 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1401 AssertRC(rc);
1402 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1403 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1404 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1405 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1406
1407 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1408 AssertRC(rc);
1409 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1410 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1411 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1412 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1413
1414 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1415 AssertRC(rc);
1416 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1417 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1418 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1419 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1420
1421 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1422 {
1423 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1424 AssertRC(rc);
1425 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1426 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1427 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1428 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1429 }
1430
1431 uint64_t u64Val;
1432 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1433 {
1434 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1435 AssertRC(rc);
1436 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1437 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1438 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1439 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1440 }
1441
1442 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1443 AssertRC(rc);
1444 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1445 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1446 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1447 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1448
1449 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1450 AssertRC(rc);
1451 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1452 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1453 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1454 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1455
1456 NOREF(pcszVmcs);
1457 return VINF_SUCCESS;
1458}
1459
1460
1461/**
1462 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1463 * VMCS.
1464 *
1465 * This is typically required when the guest changes paging mode.
1466 *
1467 * @returns VBox status code.
1468 * @param pVCpu The cross context virtual CPU structure.
1469 * @param pVmxTransient The VMX-transient structure.
1470 *
1471 * @remarks Requires EFER.
1472 * @remarks No-long-jump zone!!!
1473 */
1474static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1475{
1476 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1477 {
1478 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1479 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1480
1481 /*
1482 * VM-entry controls.
1483 */
1484 {
1485 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1486 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1487
1488 /*
1489 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1490 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1491 *
1492 * For nested-guests, this is a mandatory VM-entry control. It's also
1493 * required because we do not want to leak host bits to the nested-guest.
1494 */
1495 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1496
1497 /*
1498 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1499 *
1500 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1501 * required to get the nested-guest working with hardware-assisted VMX execution.
1502 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1503 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1504 * here rather than while merging the guest VMCS controls.
1505 */
1506 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1507 {
1508 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1509 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1510 }
1511 else
1512 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1513
1514 /*
1515 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1516 *
1517 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1518 * regardless of whether the nested-guest VMCS specifies it because we are free to
1519 * load whatever MSRs we require and we do not need to modify the guest visible copy
1520 * of the VM-entry MSR load area.
1521 */
1522 if ( g_fHmVmxSupportsVmcsEfer
1523#ifndef IN_NEM_DARWIN
1524 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1525#endif
1526 )
1527 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1528 else
1529 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1530
1531 /*
1532 * The following should -not- be set (since we're not in SMM mode):
1533 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1534 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1535 */
1536
1537 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1538 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1539
1540 if ((fVal & fZap) == fVal)
1541 { /* likely */ }
1542 else
1543 {
1544 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1545 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1546 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1547 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1548 }
1549
1550 /* Commit it to the VMCS. */
1551 if (pVmcsInfo->u32EntryCtls != fVal)
1552 {
1553 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1554 AssertRC(rc);
1555 pVmcsInfo->u32EntryCtls = fVal;
1556 }
1557 }
1558
1559 /*
1560 * VM-exit controls.
1561 */
1562 {
1563 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1564 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1565
1566 /*
1567 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1568 * supported the 1-setting of this bit.
1569 *
1570 * For nested-guests, we set the "save debug controls" as the converse
1571 * "load debug controls" is mandatory for nested-guests anyway.
1572 */
1573 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1574
1575 /*
1576 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1577 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1578 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1579 * vmxHCExportHostMsrs().
1580 *
1581 * For nested-guests, we always set this bit as we do not support 32-bit
1582 * hosts.
1583 */
1584 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1585
1586#ifndef IN_NEM_DARWIN
1587 /*
1588 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1589 *
1590 * For nested-guests, we should use the "save IA32_EFER" control if we also
1591 * used the "load IA32_EFER" control while exporting VM-entry controls.
1592 */
1593 if ( g_fHmVmxSupportsVmcsEfer
1594 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1595 {
1596 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1597 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1598 }
1599#endif
1600
1601 /*
1602 * Enable saving of the VMX-preemption timer value on VM-exit.
1603 * For nested-guests, currently not exposed/used.
1604 */
1605 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1606 * the timer value. */
1607 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1608 {
1609 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1610 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1611 }
1612
1613 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1614 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1615
1616 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1617 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1618 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1619
1620 if ((fVal & fZap) == fVal)
1621 { /* likely */ }
1622 else
1623 {
1624 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1625 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1626 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1627 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1628 }
1629
1630 /* Commit it to the VMCS. */
1631 if (pVmcsInfo->u32ExitCtls != fVal)
1632 {
1633 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1634 AssertRC(rc);
1635 pVmcsInfo->u32ExitCtls = fVal;
1636 }
1637 }
1638
1639 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1640 }
1641 return VINF_SUCCESS;
1642}
1643
1644
1645/**
1646 * Sets the TPR threshold in the VMCS.
1647 *
1648 * @param pVCpu The cross context virtual CPU structure.
1649 * @param pVmcsInfo The VMCS info. object.
1650 * @param u32TprThreshold The TPR threshold (task-priority class only).
1651 */
1652DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1653{
1654 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1655 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1656 RT_NOREF(pVmcsInfo);
1657 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1658 AssertRC(rc);
1659}
1660
1661
1662/**
1663 * Exports the guest APIC TPR state into the VMCS.
1664 *
1665 * @param pVCpu The cross context virtual CPU structure.
1666 * @param pVmxTransient The VMX-transient structure.
1667 *
1668 * @remarks No-long-jump zone!!!
1669 */
1670static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1671{
1672 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1673 {
1674 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1675
1676 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1677 if (!pVmxTransient->fIsNestedGuest)
1678 {
1679 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1680 && APICIsEnabled(pVCpu))
1681 {
1682 /*
1683 * Setup TPR shadowing.
1684 */
1685 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1686 {
1687 bool fPendingIntr = false;
1688 uint8_t u8Tpr = 0;
1689 uint8_t u8PendingIntr = 0;
1690 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1691 AssertRC(rc);
1692
1693 /*
1694 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1695 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1696 * priority of the pending interrupt so we can deliver the interrupt. If there
1697 * are no interrupts pending, set threshold to 0 to not cause any
1698 * TPR-below-threshold VM-exits.
1699 */
1700 uint32_t u32TprThreshold = 0;
1701 if (fPendingIntr)
1702 {
1703 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1704 (which is the Task-Priority Class). */
1705 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1706 const uint8_t u8TprPriority = u8Tpr >> 4;
1707 if (u8PendingPriority <= u8TprPriority)
1708 u32TprThreshold = u8PendingPriority;
1709 }
1710
1711 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1712 }
1713 }
1714 }
1715 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1716 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1717 }
1718}
1719
1720
1721/**
1722 * Gets the guest interruptibility-state and updates related force-flags.
1723 *
1724 * @returns Guest's interruptibility-state.
1725 * @param pVCpu The cross context virtual CPU structure.
1726 *
1727 * @remarks No-long-jump zone!!!
1728 */
1729static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1730{
1731 /*
1732 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1733 */
1734 uint32_t fIntrState = 0;
1735 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1736 {
1737 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
1738 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
1739
1740 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1741 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
1742 {
1743 if (pCtx->eflags.Bits.u1IF)
1744 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1745 else
1746 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1747 }
1748 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1749 {
1750 /*
1751 * We can clear the inhibit force flag as even if we go back to the recompiler
1752 * without executing guest code in VT-x, the flag's condition to be cleared is
1753 * met and thus the cleared state is correct.
1754 */
1755 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1756 }
1757 }
1758
1759 /*
1760 * Check if we should inhibit NMI delivery.
1761 */
1762 if (CPUMIsGuestNmiBlocking(pVCpu))
1763 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1764
1765 /*
1766 * Validate.
1767 */
1768#ifdef VBOX_STRICT
1769 /* We don't support block-by-SMI yet.*/
1770 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1771
1772 /* Block-by-STI must not be set when interrupts are disabled. */
1773 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
1774 {
1775 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1776 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
1777 }
1778#endif
1779
1780 return fIntrState;
1781}
1782
1783
1784/**
1785 * Exports the exception intercepts required for guest execution in the VMCS.
1786 *
1787 * @param pVCpu The cross context virtual CPU structure.
1788 * @param pVmxTransient The VMX-transient structure.
1789 *
1790 * @remarks No-long-jump zone!!!
1791 */
1792static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1793{
1794 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1795 {
1796 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1797 if ( !pVmxTransient->fIsNestedGuest
1798 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1799 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1800 else
1801 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1802
1803 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1804 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1805 }
1806}
1807
1808
1809/**
1810 * Exports the guest's RIP into the guest-state area in the VMCS.
1811 *
1812 * @param pVCpu The cross context virtual CPU structure.
1813 *
1814 * @remarks No-long-jump zone!!!
1815 */
1816static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1817{
1818 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1819 {
1820 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1821
1822 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1823 AssertRC(rc);
1824
1825 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1826 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1827 }
1828}
1829
1830
1831/**
1832 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1833 *
1834 * @param pVCpu The cross context virtual CPU structure.
1835 * @param pVmxTransient The VMX-transient structure.
1836 *
1837 * @remarks No-long-jump zone!!!
1838 */
1839static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1840{
1841 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1842 {
1843 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1844
1845 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
1846 Let us assert it as such and use 32-bit VMWRITE. */
1847 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
1848 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
1849 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
1850 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
1851
1852#ifndef IN_NEM_DARWIN
1853 /*
1854 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1855 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1856 * can run the real-mode guest code under Virtual 8086 mode.
1857 */
1858 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1859 if (pVmcsInfo->RealMode.fRealOnV86Active)
1860 {
1861 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1862 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1863 Assert(!pVmxTransient->fIsNestedGuest);
1864 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
1865 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
1866 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1867 }
1868#else
1869 RT_NOREF(pVmxTransient);
1870#endif
1871
1872 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
1873 AssertRC(rc);
1874
1875 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1876 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
1877 }
1878}
1879
1880
1881#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1882/**
1883 * Copies the nested-guest VMCS to the shadow VMCS.
1884 *
1885 * @returns VBox status code.
1886 * @param pVCpu The cross context virtual CPU structure.
1887 * @param pVmcsInfo The VMCS info. object.
1888 *
1889 * @remarks No-long-jump zone!!!
1890 */
1891static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1892{
1893 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1894 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1895
1896 /*
1897 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1898 * current VMCS, as we may try saving guest lazy MSRs.
1899 *
1900 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1901 * calling the import VMCS code which is currently performing the guest MSR reads
1902 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1903 * and the rest of the VMX leave session machinery.
1904 */
1905 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1906
1907 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1908 if (RT_SUCCESS(rc))
1909 {
1910 /*
1911 * Copy all guest read/write VMCS fields.
1912 *
1913 * We don't check for VMWRITE failures here for performance reasons and
1914 * because they are not expected to fail, barring irrecoverable conditions
1915 * like hardware errors.
1916 */
1917 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1918 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1919 {
1920 uint64_t u64Val;
1921 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1922 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1923 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1924 }
1925
1926 /*
1927 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1928 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1929 */
1930 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1931 {
1932 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1933 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1934 {
1935 uint64_t u64Val;
1936 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1937 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1938 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1939 }
1940 }
1941
1942 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1943 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1944 }
1945
1946 ASMSetFlags(fEFlags);
1947 return rc;
1948}
1949
1950
1951/**
1952 * Copies the shadow VMCS to the nested-guest VMCS.
1953 *
1954 * @returns VBox status code.
1955 * @param pVCpu The cross context virtual CPU structure.
1956 * @param pVmcsInfo The VMCS info. object.
1957 *
1958 * @remarks Called with interrupts disabled.
1959 */
1960static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1961{
1962 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1963 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1964 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1965
1966 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1967 if (RT_SUCCESS(rc))
1968 {
1969 /*
1970 * Copy guest read/write fields from the shadow VMCS.
1971 * Guest read-only fields cannot be modified, so no need to copy them.
1972 *
1973 * We don't check for VMREAD failures here for performance reasons and
1974 * because they are not expected to fail, barring irrecoverable conditions
1975 * like hardware errors.
1976 */
1977 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1978 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1979 {
1980 uint64_t u64Val;
1981 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1982 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1983 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1984 }
1985
1986 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1987 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1988 }
1989 return rc;
1990}
1991
1992
1993/**
1994 * Enables VMCS shadowing for the given VMCS info. object.
1995 *
1996 * @param pVCpu The cross context virtual CPU structure.
1997 * @param pVmcsInfo The VMCS info. object.
1998 *
1999 * @remarks No-long-jump zone!!!
2000 */
2001static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2002{
2003 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2004 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
2005 {
2006 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
2007 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
2008 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2009 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
2010 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2011 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
2012 Log4Func(("Enabled\n"));
2013 }
2014}
2015
2016
2017/**
2018 * Disables VMCS shadowing for the given VMCS info. object.
2019 *
2020 * @param pVCpu The cross context virtual CPU structure.
2021 * @param pVmcsInfo The VMCS info. object.
2022 *
2023 * @remarks No-long-jump zone!!!
2024 */
2025static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2026{
2027 /*
2028 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
2029 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
2030 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2031 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2032 *
2033 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2034 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2035 */
2036 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2037 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2038 {
2039 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2040 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2041 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2042 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2043 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2044 Log4Func(("Disabled\n"));
2045 }
2046}
2047#endif
2048
2049
2050/**
2051 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2052 *
2053 * The guest FPU state is always pre-loaded hence we don't need to bother about
2054 * sharing FPU related CR0 bits between the guest and host.
2055 *
2056 * @returns VBox status code.
2057 * @param pVCpu The cross context virtual CPU structure.
2058 * @param pVmxTransient The VMX-transient structure.
2059 *
2060 * @remarks No-long-jump zone!!!
2061 */
2062static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2063{
2064 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2065 {
2066 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2067 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2068
2069 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2070 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2071 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2072 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2073 else
2074 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2075
2076 if (!pVmxTransient->fIsNestedGuest)
2077 {
2078 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2079 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2080 uint64_t const u64ShadowCr0 = u64GuestCr0;
2081 Assert(!RT_HI_U32(u64GuestCr0));
2082
2083 /*
2084 * Setup VT-x's view of the guest CR0.
2085 */
2086 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2087 if (VM_IS_VMX_NESTED_PAGING(pVM))
2088 {
2089#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2090 if (CPUMIsGuestPagingEnabled(pVCpu))
2091 {
2092 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2093 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2094 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2095 }
2096 else
2097 {
2098 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2099 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2100 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2101 }
2102
2103 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2104 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2105 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2106#endif
2107 }
2108 else
2109 {
2110 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2111 u64GuestCr0 |= X86_CR0_WP;
2112 }
2113
2114 /*
2115 * Guest FPU bits.
2116 *
2117 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2118 * using CR0.TS.
2119 *
2120 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2121 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2122 */
2123 u64GuestCr0 |= X86_CR0_NE;
2124
2125 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2126 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2127
2128 /*
2129 * Update exception intercepts.
2130 */
2131 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2132#ifndef IN_NEM_DARWIN
2133 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2134 {
2135 Assert(PDMVmmDevHeapIsEnabled(pVM));
2136 Assert(pVM->hm.s.vmx.pRealModeTSS);
2137 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2138 }
2139 else
2140#endif
2141 {
2142 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2143 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2144 if (fInterceptMF)
2145 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2146 }
2147
2148 /* Additional intercepts for debugging, define these yourself explicitly. */
2149#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2150 uXcptBitmap |= 0
2151 | RT_BIT(X86_XCPT_BP)
2152 | RT_BIT(X86_XCPT_DE)
2153 | RT_BIT(X86_XCPT_NM)
2154 | RT_BIT(X86_XCPT_TS)
2155 | RT_BIT(X86_XCPT_UD)
2156 | RT_BIT(X86_XCPT_NP)
2157 | RT_BIT(X86_XCPT_SS)
2158 | RT_BIT(X86_XCPT_GP)
2159 | RT_BIT(X86_XCPT_PF)
2160 | RT_BIT(X86_XCPT_MF)
2161 ;
2162#elif defined(HMVMX_ALWAYS_TRAP_PF)
2163 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2164#endif
2165 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2166 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2167 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2168 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2169 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2170
2171 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2172 u64GuestCr0 |= fSetCr0;
2173 u64GuestCr0 &= fZapCr0;
2174 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2175
2176 /* Commit the CR0 and related fields to the guest VMCS. */
2177 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2178 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2179 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2180 {
2181 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2182 AssertRC(rc);
2183 }
2184 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2185 {
2186 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2187 AssertRC(rc);
2188 }
2189
2190 /* Update our caches. */
2191 pVmcsInfo->u32ProcCtls = uProcCtls;
2192 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2193
2194 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2195 }
2196 else
2197 {
2198 /*
2199 * With nested-guests, we may have extended the guest/host mask here since we
2200 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2201 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2202 * originally supplied. We must copy those bits from the nested-guest CR0 into
2203 * the nested-guest CR0 read-shadow.
2204 */
2205 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2206 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2207 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2208 Assert(!RT_HI_U32(u64GuestCr0));
2209 Assert(u64GuestCr0 & X86_CR0_NE);
2210
2211 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2212 u64GuestCr0 |= fSetCr0;
2213 u64GuestCr0 &= fZapCr0;
2214 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2215
2216 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2217 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2218 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2219
2220 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2221 }
2222
2223 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2224 }
2225
2226 return VINF_SUCCESS;
2227}
2228
2229
2230/**
2231 * Exports the guest control registers (CR3, CR4) into the guest-state area
2232 * in the VMCS.
2233 *
2234 * @returns VBox strict status code.
2235 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2236 * without unrestricted guest access and the VMMDev is not presently
2237 * mapped (e.g. EFI32).
2238 *
2239 * @param pVCpu The cross context virtual CPU structure.
2240 * @param pVmxTransient The VMX-transient structure.
2241 *
2242 * @remarks No-long-jump zone!!!
2243 */
2244static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2245{
2246 int rc = VINF_SUCCESS;
2247 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2248
2249 /*
2250 * Guest CR2.
2251 * It's always loaded in the assembler code. Nothing to do here.
2252 */
2253
2254 /*
2255 * Guest CR3.
2256 */
2257 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2258 {
2259 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2260
2261 if (VM_IS_VMX_NESTED_PAGING(pVM))
2262 {
2263#ifndef IN_NEM_DARWIN
2264 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2265 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2266
2267 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2268 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2269 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2270 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2271
2272 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2273 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2274 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2275
2276 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2277 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2278 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2279 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2280 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2281 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2282 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2283
2284 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2285 AssertRC(rc);
2286#endif
2287
2288 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2289 uint64_t u64GuestCr3 = pCtx->cr3;
2290 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2291 || CPUMIsGuestPagingEnabledEx(pCtx))
2292 {
2293 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2294 if (CPUMIsGuestInPAEModeEx(pCtx))
2295 {
2296 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2297 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2298 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2299 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2300 }
2301
2302 /*
2303 * The guest's view of its CR3 is unblemished with nested paging when the
2304 * guest is using paging or we have unrestricted guest execution to handle
2305 * the guest when it's not using paging.
2306 */
2307 }
2308#ifndef IN_NEM_DARWIN
2309 else
2310 {
2311 /*
2312 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2313 * thinks it accesses physical memory directly, we use our identity-mapped
2314 * page table to map guest-linear to guest-physical addresses. EPT takes care
2315 * of translating it to host-physical addresses.
2316 */
2317 RTGCPHYS GCPhys;
2318 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2319
2320 /* We obtain it here every time as the guest could have relocated this PCI region. */
2321 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2322 if (RT_SUCCESS(rc))
2323 { /* likely */ }
2324 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2325 {
2326 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2327 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2328 }
2329 else
2330 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2331
2332 u64GuestCr3 = GCPhys;
2333 }
2334#endif
2335
2336 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2337 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2338 AssertRC(rc);
2339 }
2340 else
2341 {
2342 Assert(!pVmxTransient->fIsNestedGuest);
2343 /* Non-nested paging case, just use the hypervisor's CR3. */
2344 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2345
2346 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2347 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2348 AssertRC(rc);
2349 }
2350
2351 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2352 }
2353
2354 /*
2355 * Guest CR4.
2356 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2357 */
2358 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2359 {
2360 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2361 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2362
2363 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2364 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2365
2366 /*
2367 * With nested-guests, we may have extended the guest/host mask here (since we
2368 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2369 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2370 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2371 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2372 */
2373 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2374 uint64_t u64GuestCr4 = pCtx->cr4;
2375 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2376 ? pCtx->cr4
2377 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2378 Assert(!RT_HI_U32(u64GuestCr4));
2379
2380#ifndef IN_NEM_DARWIN
2381 /*
2382 * Setup VT-x's view of the guest CR4.
2383 *
2384 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2385 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2386 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2387 *
2388 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2389 */
2390 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2391 {
2392 Assert(pVM->hm.s.vmx.pRealModeTSS);
2393 Assert(PDMVmmDevHeapIsEnabled(pVM));
2394 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2395 }
2396#endif
2397
2398 if (VM_IS_VMX_NESTED_PAGING(pVM))
2399 {
2400 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2401 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2402 {
2403 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2404 u64GuestCr4 |= X86_CR4_PSE;
2405 /* Our identity mapping is a 32-bit page directory. */
2406 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2407 }
2408 /* else use guest CR4.*/
2409 }
2410 else
2411 {
2412 Assert(!pVmxTransient->fIsNestedGuest);
2413
2414 /*
2415 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2416 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2417 */
2418 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2419 {
2420 case PGMMODE_REAL: /* Real-mode. */
2421 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2422 case PGMMODE_32_BIT: /* 32-bit paging. */
2423 {
2424 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2425 break;
2426 }
2427
2428 case PGMMODE_PAE: /* PAE paging. */
2429 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2430 {
2431 u64GuestCr4 |= X86_CR4_PAE;
2432 break;
2433 }
2434
2435 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2436 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2437 {
2438#ifdef VBOX_WITH_64_BITS_GUESTS
2439 /* For our assumption in vmxHCShouldSwapEferMsr. */
2440 Assert(u64GuestCr4 & X86_CR4_PAE);
2441 break;
2442#endif
2443 }
2444 default:
2445 AssertFailed();
2446 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2447 }
2448 }
2449
2450 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2451 u64GuestCr4 |= fSetCr4;
2452 u64GuestCr4 &= fZapCr4;
2453
2454 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2455 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2456 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2457
2458#ifndef IN_NEM_DARWIN
2459 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2460 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2461 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2462 {
2463 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2464 hmR0VmxUpdateStartVmFunction(pVCpu);
2465 }
2466#endif
2467
2468 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2469
2470 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2471 }
2472 return rc;
2473}
2474
2475
2476#ifdef VBOX_STRICT
2477/**
2478 * Strict function to validate segment registers.
2479 *
2480 * @param pVCpu The cross context virtual CPU structure.
2481 * @param pVmcsInfo The VMCS info. object.
2482 *
2483 * @remarks Will import guest CR0 on strict builds during validation of
2484 * segments.
2485 */
2486static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2487{
2488 /*
2489 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2490 *
2491 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2492 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2493 * unusable bit and doesn't change the guest-context value.
2494 */
2495 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2496 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2497 vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2498 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2499 && ( !CPUMIsGuestInRealModeEx(pCtx)
2500 && !CPUMIsGuestInV86ModeEx(pCtx)))
2501 {
2502 /* Protected mode checks */
2503 /* CS */
2504 Assert(pCtx->cs.Attr.n.u1Present);
2505 Assert(!(pCtx->cs.Attr.u & 0xf00));
2506 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2507 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2508 || !(pCtx->cs.Attr.n.u1Granularity));
2509 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2510 || (pCtx->cs.Attr.n.u1Granularity));
2511 /* CS cannot be loaded with NULL in protected mode. */
2512 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2513 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2514 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2515 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2516 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2517 else
2518 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2519 /* SS */
2520 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2521 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2522 if ( !(pCtx->cr0 & X86_CR0_PE)
2523 || pCtx->cs.Attr.n.u4Type == 3)
2524 {
2525 Assert(!pCtx->ss.Attr.n.u2Dpl);
2526 }
2527 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2528 {
2529 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2530 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2531 Assert(pCtx->ss.Attr.n.u1Present);
2532 Assert(!(pCtx->ss.Attr.u & 0xf00));
2533 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2534 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2535 || !(pCtx->ss.Attr.n.u1Granularity));
2536 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2537 || (pCtx->ss.Attr.n.u1Granularity));
2538 }
2539 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2540 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2541 {
2542 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2543 Assert(pCtx->ds.Attr.n.u1Present);
2544 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2545 Assert(!(pCtx->ds.Attr.u & 0xf00));
2546 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2547 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2548 || !(pCtx->ds.Attr.n.u1Granularity));
2549 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2550 || (pCtx->ds.Attr.n.u1Granularity));
2551 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2552 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2553 }
2554 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2555 {
2556 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2557 Assert(pCtx->es.Attr.n.u1Present);
2558 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2559 Assert(!(pCtx->es.Attr.u & 0xf00));
2560 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2561 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2562 || !(pCtx->es.Attr.n.u1Granularity));
2563 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2564 || (pCtx->es.Attr.n.u1Granularity));
2565 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2566 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2567 }
2568 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2569 {
2570 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2571 Assert(pCtx->fs.Attr.n.u1Present);
2572 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2573 Assert(!(pCtx->fs.Attr.u & 0xf00));
2574 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2575 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2576 || !(pCtx->fs.Attr.n.u1Granularity));
2577 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2578 || (pCtx->fs.Attr.n.u1Granularity));
2579 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2580 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2581 }
2582 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2583 {
2584 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2585 Assert(pCtx->gs.Attr.n.u1Present);
2586 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2587 Assert(!(pCtx->gs.Attr.u & 0xf00));
2588 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2589 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2590 || !(pCtx->gs.Attr.n.u1Granularity));
2591 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2592 || (pCtx->gs.Attr.n.u1Granularity));
2593 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2594 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2595 }
2596 /* 64-bit capable CPUs. */
2597 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2598 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2599 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2600 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2601 }
2602 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2603 || ( CPUMIsGuestInRealModeEx(pCtx)
2604 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2605 {
2606 /* Real and v86 mode checks. */
2607 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2608 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2609#ifndef IN_NEM_DARWIN
2610 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2611 {
2612 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2613 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2614 }
2615 else
2616#endif
2617 {
2618 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2619 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2620 }
2621
2622 /* CS */
2623 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2624 Assert(pCtx->cs.u32Limit == 0xffff);
2625 Assert(u32CSAttr == 0xf3);
2626 /* SS */
2627 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2628 Assert(pCtx->ss.u32Limit == 0xffff);
2629 Assert(u32SSAttr == 0xf3);
2630 /* DS */
2631 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2632 Assert(pCtx->ds.u32Limit == 0xffff);
2633 Assert(u32DSAttr == 0xf3);
2634 /* ES */
2635 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2636 Assert(pCtx->es.u32Limit == 0xffff);
2637 Assert(u32ESAttr == 0xf3);
2638 /* FS */
2639 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2640 Assert(pCtx->fs.u32Limit == 0xffff);
2641 Assert(u32FSAttr == 0xf3);
2642 /* GS */
2643 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2644 Assert(pCtx->gs.u32Limit == 0xffff);
2645 Assert(u32GSAttr == 0xf3);
2646 /* 64-bit capable CPUs. */
2647 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2648 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2649 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2650 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2651 }
2652}
2653#endif /* VBOX_STRICT */
2654
2655
2656/**
2657 * Exports a guest segment register into the guest-state area in the VMCS.
2658 *
2659 * @returns VBox status code.
2660 * @param pVCpu The cross context virtual CPU structure.
2661 * @param pVmcsInfo The VMCS info. object.
2662 * @param iSegReg The segment register number (X86_SREG_XXX).
2663 * @param pSelReg Pointer to the segment selector.
2664 *
2665 * @remarks No-long-jump zone!!!
2666 */
2667static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2668{
2669 Assert(iSegReg < X86_SREG_COUNT);
2670
2671 uint32_t u32Access = pSelReg->Attr.u;
2672#ifndef IN_NEM_DARWIN
2673 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2674#endif
2675 {
2676 /*
2677 * The way to differentiate between whether this is really a null selector or was just
2678 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2679 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2680 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2681 * NULL selectors loaded in protected-mode have their attribute as 0.
2682 */
2683 if (u32Access)
2684 { }
2685 else
2686 u32Access = X86DESCATTR_UNUSABLE;
2687 }
2688#ifndef IN_NEM_DARWIN
2689 else
2690 {
2691 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2692 u32Access = 0xf3;
2693 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2694 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2695 RT_NOREF_PV(pVCpu);
2696 }
2697#else
2698 RT_NOREF(pVmcsInfo);
2699#endif
2700
2701 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2702 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2703 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2704
2705 /*
2706 * Commit it to the VMCS.
2707 */
2708 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
2709 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
2710 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
2711 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
2712 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2713 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2714 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2715 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2716 return VINF_SUCCESS;
2717}
2718
2719
2720/**
2721 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2722 * area in the VMCS.
2723 *
2724 * @returns VBox status code.
2725 * @param pVCpu The cross context virtual CPU structure.
2726 * @param pVmxTransient The VMX-transient structure.
2727 *
2728 * @remarks Will import guest CR0 on strict builds during validation of
2729 * segments.
2730 * @remarks No-long-jump zone!!!
2731 */
2732static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2733{
2734 int rc = VERR_INTERNAL_ERROR_5;
2735#ifndef IN_NEM_DARWIN
2736 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2737#endif
2738 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2739 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2740#ifndef IN_NEM_DARWIN
2741 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2742#endif
2743
2744 /*
2745 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2746 */
2747 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2748 {
2749 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2750 {
2751 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2752#ifndef IN_NEM_DARWIN
2753 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2754 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2755#endif
2756 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2757 AssertRC(rc);
2758 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2759 }
2760
2761 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2762 {
2763 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2764#ifndef IN_NEM_DARWIN
2765 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2766 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2767#endif
2768 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2769 AssertRC(rc);
2770 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2771 }
2772
2773 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2774 {
2775 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2776#ifndef IN_NEM_DARWIN
2777 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2778 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2779#endif
2780 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2781 AssertRC(rc);
2782 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2783 }
2784
2785 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2786 {
2787 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2788#ifndef IN_NEM_DARWIN
2789 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2790 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2791#endif
2792 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2793 AssertRC(rc);
2794 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2795 }
2796
2797 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2798 {
2799 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2800#ifndef IN_NEM_DARWIN
2801 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2802 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2803#endif
2804 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2805 AssertRC(rc);
2806 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2807 }
2808
2809 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2810 {
2811 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2812#ifndef IN_NEM_DARWIN
2813 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2814 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2815#endif
2816 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2817 AssertRC(rc);
2818 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2819 }
2820
2821#ifdef VBOX_STRICT
2822 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2823#endif
2824 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2825 pCtx->cs.Attr.u));
2826 }
2827
2828 /*
2829 * Guest TR.
2830 */
2831 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2832 {
2833 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2834
2835 /*
2836 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2837 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2838 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2839 */
2840 uint16_t u16Sel;
2841 uint32_t u32Limit;
2842 uint64_t u64Base;
2843 uint32_t u32AccessRights;
2844#ifndef IN_NEM_DARWIN
2845 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2846#endif
2847 {
2848 u16Sel = pCtx->tr.Sel;
2849 u32Limit = pCtx->tr.u32Limit;
2850 u64Base = pCtx->tr.u64Base;
2851 u32AccessRights = pCtx->tr.Attr.u;
2852 }
2853#ifndef IN_NEM_DARWIN
2854 else
2855 {
2856 Assert(!pVmxTransient->fIsNestedGuest);
2857 Assert(pVM->hm.s.vmx.pRealModeTSS);
2858 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2859
2860 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2861 RTGCPHYS GCPhys;
2862 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2863 AssertRCReturn(rc, rc);
2864
2865 X86DESCATTR DescAttr;
2866 DescAttr.u = 0;
2867 DescAttr.n.u1Present = 1;
2868 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2869
2870 u16Sel = 0;
2871 u32Limit = HM_VTX_TSS_SIZE;
2872 u64Base = GCPhys;
2873 u32AccessRights = DescAttr.u;
2874 }
2875#endif
2876
2877 /* Validate. */
2878 Assert(!(u16Sel & RT_BIT(2)));
2879 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2880 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2881 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2882 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2883 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2884 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2885 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2886 Assert( (u32Limit & 0xfff) == 0xfff
2887 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2888 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2889 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2890
2891 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2892 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2893 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2894 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2895
2896 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2897 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2898 }
2899
2900 /*
2901 * Guest GDTR.
2902 */
2903 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2904 {
2905 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2906
2907 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2908 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2909
2910 /* Validate. */
2911 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2912
2913 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2914 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2915 }
2916
2917 /*
2918 * Guest LDTR.
2919 */
2920 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2921 {
2922 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2923
2924 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2925 uint32_t u32Access;
2926 if ( !pVmxTransient->fIsNestedGuest
2927 && !pCtx->ldtr.Attr.u)
2928 u32Access = X86DESCATTR_UNUSABLE;
2929 else
2930 u32Access = pCtx->ldtr.Attr.u;
2931
2932 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2933 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2934 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2935 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2936
2937 /* Validate. */
2938 if (!(u32Access & X86DESCATTR_UNUSABLE))
2939 {
2940 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2941 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2942 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2943 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2944 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2945 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2946 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2947 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2948 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2949 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2950 }
2951
2952 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2953 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2954 }
2955
2956 /*
2957 * Guest IDTR.
2958 */
2959 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2960 {
2961 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2962
2963 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2964 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2965
2966 /* Validate. */
2967 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2968
2969 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2970 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2971 }
2972
2973 return VINF_SUCCESS;
2974}
2975
2976
2977/**
2978 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2979 * VM-exit interruption info type.
2980 *
2981 * @returns The IEM exception flags.
2982 * @param uVector The event vector.
2983 * @param uVmxEventType The VMX event type.
2984 *
2985 * @remarks This function currently only constructs flags required for
2986 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2987 * and CR2 aspects of an exception are not included).
2988 */
2989static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2990{
2991 uint32_t fIemXcptFlags;
2992 switch (uVmxEventType)
2993 {
2994 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2995 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2996 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2997 break;
2998
2999 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
3000 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
3001 break;
3002
3003 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
3004 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
3005 break;
3006
3007 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
3008 {
3009 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3010 if (uVector == X86_XCPT_BP)
3011 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
3012 else if (uVector == X86_XCPT_OF)
3013 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
3014 else
3015 {
3016 fIemXcptFlags = 0;
3017 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
3018 }
3019 break;
3020 }
3021
3022 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
3023 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3024 break;
3025
3026 default:
3027 fIemXcptFlags = 0;
3028 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
3029 break;
3030 }
3031 return fIemXcptFlags;
3032}
3033
3034
3035/**
3036 * Sets an event as a pending event to be injected into the guest.
3037 *
3038 * @param pVCpu The cross context virtual CPU structure.
3039 * @param u32IntInfo The VM-entry interruption-information field.
3040 * @param cbInstr The VM-entry instruction length in bytes (for
3041 * software interrupts, exceptions and privileged
3042 * software exceptions).
3043 * @param u32ErrCode The VM-entry exception error code.
3044 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3045 * page-fault.
3046 */
3047DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3048 RTGCUINTPTR GCPtrFaultAddress)
3049{
3050 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3051 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3052 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3053 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3054 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3055 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3056}
3057
3058
3059/**
3060 * Sets an external interrupt as pending-for-injection into the VM.
3061 *
3062 * @param pVCpu The cross context virtual CPU structure.
3063 * @param u8Interrupt The external interrupt vector.
3064 */
3065DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3066{
3067 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3068 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3069 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3070 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3071 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3072}
3073
3074
3075/**
3076 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3077 *
3078 * @param pVCpu The cross context virtual CPU structure.
3079 */
3080DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3081{
3082 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3083 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3084 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3085 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3086 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3087}
3088
3089
3090/**
3091 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3092 *
3093 * @param pVCpu The cross context virtual CPU structure.
3094 */
3095DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3096{
3097 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3098 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3099 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3100 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3101 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3102}
3103
3104
3105/**
3106 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3107 *
3108 * @param pVCpu The cross context virtual CPU structure.
3109 */
3110DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3111{
3112 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3113 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3114 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3115 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3116 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3117}
3118
3119
3120/**
3121 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3122 *
3123 * @param pVCpu The cross context virtual CPU structure.
3124 */
3125DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3126{
3127 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3128 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3129 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3130 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3131 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3132}
3133
3134
3135#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3136/**
3137 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3138 *
3139 * @param pVCpu The cross context virtual CPU structure.
3140 * @param u32ErrCode The error code for the general-protection exception.
3141 */
3142DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3143{
3144 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3145 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3146 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3147 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3148 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3149}
3150
3151
3152/**
3153 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3154 *
3155 * @param pVCpu The cross context virtual CPU structure.
3156 * @param u32ErrCode The error code for the stack exception.
3157 */
3158DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3159{
3160 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3161 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3162 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3163 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3164 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3165}
3166#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3167
3168
3169/**
3170 * Fixes up attributes for the specified segment register.
3171 *
3172 * @param pVCpu The cross context virtual CPU structure.
3173 * @param pSelReg The segment register that needs fixing.
3174 * @param pszRegName The register name (for logging and assertions).
3175 */
3176static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3177{
3178 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3179
3180 /*
3181 * If VT-x marks the segment as unusable, most other bits remain undefined:
3182 * - For CS the L, D and G bits have meaning.
3183 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3184 * - For the remaining data segments no bits are defined.
3185 *
3186 * The present bit and the unusable bit has been observed to be set at the
3187 * same time (the selector was supposed to be invalid as we started executing
3188 * a V8086 interrupt in ring-0).
3189 *
3190 * What should be important for the rest of the VBox code, is that the P bit is
3191 * cleared. Some of the other VBox code recognizes the unusable bit, but
3192 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3193 * safe side here, we'll strip off P and other bits we don't care about. If
3194 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3195 *
3196 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3197 */
3198#ifdef VBOX_STRICT
3199 uint32_t const uAttr = pSelReg->Attr.u;
3200#endif
3201
3202 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3203 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3204 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3205
3206#ifdef VBOX_STRICT
3207# ifndef IN_NEM_DARWIN
3208 VMMRZCallRing3Disable(pVCpu);
3209# endif
3210 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3211# ifdef DEBUG_bird
3212 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3213 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3214 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3215# endif
3216# ifndef IN_NEM_DARWIN
3217 VMMRZCallRing3Enable(pVCpu);
3218# endif
3219 NOREF(uAttr);
3220#endif
3221 RT_NOREF2(pVCpu, pszRegName);
3222}
3223
3224
3225/**
3226 * Imports a guest segment register from the current VMCS into the guest-CPU
3227 * context.
3228 *
3229 * @param pVCpu The cross context virtual CPU structure.
3230 * @param iSegReg The segment register number (X86_SREG_XXX).
3231 *
3232 * @remarks Called with interrupts and/or preemption disabled.
3233 */
3234static void vmxHCImportGuestSegReg(PVMCPUCC pVCpu, uint32_t iSegReg)
3235{
3236 Assert(iSegReg < X86_SREG_COUNT);
3237 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
3238 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
3239 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
3240 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
3241
3242 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
3243
3244 uint16_t u16Sel;
3245 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), &u16Sel); AssertRC(rc);
3246 pSelReg->Sel = u16Sel;
3247 pSelReg->ValidSel = u16Sel;
3248
3249 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3250 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), &pSelReg->u64Base); AssertRC(rc);
3251
3252 uint32_t u32Attr;
3253 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), &u32Attr); AssertRC(rc);
3254 pSelReg->Attr.u = u32Attr;
3255 if (u32Attr & X86DESCATTR_UNUSABLE)
3256 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + iSegReg * 3);
3257
3258 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3259}
3260
3261
3262/**
3263 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3264 *
3265 * @param pVCpu The cross context virtual CPU structure.
3266 *
3267 * @remarks Called with interrupts and/or preemption disabled.
3268 */
3269static void vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3270{
3271 uint16_t u16Sel;
3272 uint64_t u64Base;
3273 uint32_t u32Limit, u32Attr;
3274 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3275 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3276 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3277 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3278
3279 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3280 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3281 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3282 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3283 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3284 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3285 if (u32Attr & X86DESCATTR_UNUSABLE)
3286 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3287}
3288
3289
3290/**
3291 * Imports the guest TR from the current VMCS into the guest-CPU context.
3292 *
3293 * @param pVCpu The cross context virtual CPU structure.
3294 *
3295 * @remarks Called with interrupts and/or preemption disabled.
3296 */
3297static void vmxHCImportGuestTr(PVMCPUCC pVCpu)
3298{
3299 uint16_t u16Sel;
3300 uint64_t u64Base;
3301 uint32_t u32Limit, u32Attr;
3302 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3303 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3304 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3305 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3306
3307 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3308 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3309 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3310 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3311 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3312 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3313 /* TR is the only selector that can never be unusable. */
3314 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3315}
3316
3317
3318/**
3319 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3320 *
3321 * @param pVCpu The cross context virtual CPU structure.
3322 *
3323 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3324 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3325 * instead!!!
3326 */
3327static void vmxHCImportGuestRip(PVMCPUCC pVCpu)
3328{
3329 uint64_t u64Val;
3330 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3331 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
3332 {
3333 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3334 AssertRC(rc);
3335
3336 pCtx->rip = u64Val;
3337 EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
3338 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
3339 }
3340}
3341
3342
3343/**
3344 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3345 *
3346 * @param pVCpu The cross context virtual CPU structure.
3347 * @param pVmcsInfo The VMCS info. object.
3348 *
3349 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3350 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3351 * instead!!!
3352 */
3353static void vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3354{
3355 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3356 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
3357 {
3358 uint64_t u64Val;
3359 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
3360 AssertRC(rc);
3361
3362 pCtx->rflags.u64 = u64Val;
3363#ifndef IN_NEM_DARWIN
3364 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3365 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
3366 {
3367 pCtx->eflags.Bits.u1VM = 0;
3368 pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3369 }
3370#else
3371 RT_NOREF(pVmcsInfo);
3372#endif
3373 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3374 }
3375}
3376
3377
3378/**
3379 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3380 * context.
3381 *
3382 * @param pVCpu The cross context virtual CPU structure.
3383 * @param pVmcsInfo The VMCS info. object.
3384 *
3385 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3386 * do not log!
3387 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3388 * instead!!!
3389 */
3390static void vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3391{
3392 uint32_t u32Val;
3393 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3394 if (!u32Val)
3395 {
3396 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3397 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3398 CPUMSetGuestNmiBlocking(pVCpu, false);
3399 }
3400 else
3401 {
3402 /*
3403 * We must import RIP here to set our EM interrupt-inhibited state.
3404 * We also import RFLAGS as our code that evaluates pending interrupts
3405 * before VM-entry requires it.
3406 */
3407 vmxHCImportGuestRip(pVCpu);
3408 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3409
3410 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3411 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
3412 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3413 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3414
3415 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
3416 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
3417 }
3418}
3419
3420
3421/**
3422 * Worker for VMXR0ImportStateOnDemand.
3423 *
3424 * @returns VBox status code.
3425 * @param pVCpu The cross context virtual CPU structure.
3426 * @param pVmcsInfo The VMCS info. object.
3427 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3428 */
3429static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3430{
3431 int rc = VINF_SUCCESS;
3432 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3433 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3434 uint32_t u32Val;
3435
3436 /*
3437 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3438 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3439 * neither are other host platforms.
3440 *
3441 * Committing this temporarily as it prevents BSOD.
3442 *
3443 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3444 */
3445# ifdef RT_OS_WINDOWS
3446 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3447 return VERR_HM_IPE_1;
3448# endif
3449
3450 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3451
3452#ifndef IN_NEM_DARWIN
3453 /*
3454 * We disable interrupts to make the updating of the state and in particular
3455 * the fExtrn modification atomic wrt to preemption hooks.
3456 */
3457 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3458#endif
3459
3460 fWhat &= pCtx->fExtrn;
3461 if (fWhat)
3462 {
3463 do
3464 {
3465 if (fWhat & CPUMCTX_EXTRN_RIP)
3466 vmxHCImportGuestRip(pVCpu);
3467
3468 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3469 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3470
3471 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3472 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3473
3474 if (fWhat & CPUMCTX_EXTRN_RSP)
3475 {
3476 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3477 AssertRC(rc);
3478 }
3479
3480 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3481 {
3482 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3483#ifndef IN_NEM_DARWIN
3484 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3485#else
3486 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3487#endif
3488 if (fWhat & CPUMCTX_EXTRN_CS)
3489 {
3490 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
3491 vmxHCImportGuestRip(pVCpu);
3492 if (fRealOnV86Active)
3493 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3494 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3495 }
3496 if (fWhat & CPUMCTX_EXTRN_SS)
3497 {
3498 vmxHCImportGuestSegReg(pVCpu, X86_SREG_SS);
3499 if (fRealOnV86Active)
3500 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3501 }
3502 if (fWhat & CPUMCTX_EXTRN_DS)
3503 {
3504 vmxHCImportGuestSegReg(pVCpu, X86_SREG_DS);
3505 if (fRealOnV86Active)
3506 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3507 }
3508 if (fWhat & CPUMCTX_EXTRN_ES)
3509 {
3510 vmxHCImportGuestSegReg(pVCpu, X86_SREG_ES);
3511 if (fRealOnV86Active)
3512 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3513 }
3514 if (fWhat & CPUMCTX_EXTRN_FS)
3515 {
3516 vmxHCImportGuestSegReg(pVCpu, X86_SREG_FS);
3517 if (fRealOnV86Active)
3518 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3519 }
3520 if (fWhat & CPUMCTX_EXTRN_GS)
3521 {
3522 vmxHCImportGuestSegReg(pVCpu, X86_SREG_GS);
3523 if (fRealOnV86Active)
3524 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3525 }
3526 }
3527
3528 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3529 {
3530 if (fWhat & CPUMCTX_EXTRN_LDTR)
3531 vmxHCImportGuestLdtr(pVCpu);
3532
3533 if (fWhat & CPUMCTX_EXTRN_GDTR)
3534 {
3535 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3536 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3537 pCtx->gdtr.cbGdt = u32Val;
3538 }
3539
3540 /* Guest IDTR. */
3541 if (fWhat & CPUMCTX_EXTRN_IDTR)
3542 {
3543 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3544 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3545 pCtx->idtr.cbIdt = u32Val;
3546 }
3547
3548 /* Guest TR. */
3549 if (fWhat & CPUMCTX_EXTRN_TR)
3550 {
3551#ifndef IN_NEM_DARWIN
3552 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3553 don't need to import that one. */
3554 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3555#endif
3556 vmxHCImportGuestTr(pVCpu);
3557 }
3558 }
3559
3560 if (fWhat & CPUMCTX_EXTRN_DR7)
3561 {
3562#ifndef IN_NEM_DARWIN
3563 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3564#endif
3565 {
3566 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3567 AssertRC(rc);
3568 }
3569 }
3570
3571 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3572 {
3573 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3574 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3575 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3576 pCtx->SysEnter.cs = u32Val;
3577 }
3578
3579#ifndef IN_NEM_DARWIN
3580 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3581 {
3582 if ( pVM->hmr0.s.fAllow64BitGuests
3583 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3584 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3585 }
3586
3587 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3588 {
3589 if ( pVM->hmr0.s.fAllow64BitGuests
3590 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3591 {
3592 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3593 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3594 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3595 }
3596 }
3597
3598 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3599 {
3600 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3601 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3602 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3603 Assert(pMsrs);
3604 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3605 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3606 for (uint32_t i = 0; i < cMsrs; i++)
3607 {
3608 uint32_t const idMsr = pMsrs[i].u32Msr;
3609 switch (idMsr)
3610 {
3611 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3612 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3613 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3614 default:
3615 {
3616 uint32_t idxLbrMsr;
3617 if (VM_IS_VMX_LBR(pVM))
3618 {
3619 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3620 {
3621 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3622 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3623 break;
3624 }
3625 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3626 {
3627 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3628 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3629 break;
3630 }
3631 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3632 {
3633 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3634 break;
3635 }
3636 /* Fallthru (no break) */
3637 }
3638 pCtx->fExtrn = 0;
3639 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3640 ASMSetFlags(fEFlags);
3641 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3642 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3643 }
3644 }
3645 }
3646 }
3647#endif
3648
3649 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3650 {
3651 if (fWhat & CPUMCTX_EXTRN_CR0)
3652 {
3653 uint64_t u64Cr0;
3654 uint64_t u64Shadow;
3655 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3656 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3657#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3658 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3659 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3660#else
3661 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3662 {
3663 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3664 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3665 }
3666 else
3667 {
3668 /*
3669 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3670 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3671 * re-construct CR0. See @bugref{9180#c95} for details.
3672 */
3673 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3674 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3675 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3676 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3677 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3678 }
3679#endif
3680#ifndef IN_NEM_DARWIN
3681 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3682#endif
3683 CPUMSetGuestCR0(pVCpu, u64Cr0);
3684#ifndef IN_NEM_DARWIN
3685 VMMRZCallRing3Enable(pVCpu);
3686#endif
3687 }
3688
3689 if (fWhat & CPUMCTX_EXTRN_CR4)
3690 {
3691 uint64_t u64Cr4;
3692 uint64_t u64Shadow;
3693 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3694 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3695#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3696 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3697 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3698#else
3699 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3700 {
3701 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3702 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3703 }
3704 else
3705 {
3706 /*
3707 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3708 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3709 * re-construct CR4. See @bugref{9180#c95} for details.
3710 */
3711 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3712 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3713 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3714 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3715 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3716 }
3717#endif
3718 pCtx->cr4 = u64Cr4;
3719 }
3720
3721 if (fWhat & CPUMCTX_EXTRN_CR3)
3722 {
3723 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3724 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3725 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3726 && CPUMIsGuestPagingEnabledEx(pCtx)))
3727 {
3728 uint64_t u64Cr3;
3729 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3730 if (pCtx->cr3 != u64Cr3)
3731 {
3732 pCtx->cr3 = u64Cr3;
3733 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3734 }
3735
3736 /*
3737 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3738 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3739 */
3740 if (CPUMIsGuestInPAEModeEx(pCtx))
3741 {
3742 X86PDPE aPaePdpes[4];
3743 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3744 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3745 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3746 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3747 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3748 {
3749 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3750 /* PGM now updates PAE PDPTEs while updating CR3. */
3751 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3752 }
3753 }
3754 }
3755 }
3756 }
3757
3758#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3759 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3760 {
3761 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3762 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3763 {
3764 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3765 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3766 if (RT_SUCCESS(rc))
3767 { /* likely */ }
3768 else
3769 break;
3770 }
3771 }
3772#endif
3773 } while (0);
3774
3775 if (RT_SUCCESS(rc))
3776 {
3777 /* Update fExtrn. */
3778 pCtx->fExtrn &= ~fWhat;
3779
3780 /* If everything has been imported, clear the HM keeper bit. */
3781 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3782 {
3783#ifndef IN_NEM_DARWIN
3784 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3785#else
3786 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3787#endif
3788 Assert(!pCtx->fExtrn);
3789 }
3790 }
3791 }
3792#ifndef IN_NEM_DARWIN
3793 else
3794 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3795
3796 /*
3797 * Restore interrupts.
3798 */
3799 ASMSetFlags(fEFlags);
3800#endif
3801
3802 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3803
3804 if (RT_SUCCESS(rc))
3805 { /* likely */ }
3806 else
3807 return rc;
3808
3809 /*
3810 * Honor any pending CR3 updates.
3811 *
3812 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3813 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3814 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3815 *
3816 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3817 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3818 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3819 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3820 *
3821 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3822 *
3823 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3824 */
3825 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3826#ifndef IN_NEM_DARWIN
3827 && VMMRZCallRing3IsEnabled(pVCpu)
3828#endif
3829 )
3830 {
3831 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3832 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3833 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3834 }
3835
3836 return VINF_SUCCESS;
3837}
3838
3839
3840/**
3841 * Check per-VM and per-VCPU force flag actions that require us to go back to
3842 * ring-3 for one reason or another.
3843 *
3844 * @returns Strict VBox status code (i.e. informational status codes too)
3845 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3846 * ring-3.
3847 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3848 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3849 * interrupts)
3850 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
3851 * all EMTs to be in ring-3.
3852 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
3853 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
3854 * to the EM loop.
3855 *
3856 * @param pVCpu The cross context virtual CPU structure.
3857 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
3858 * @param fStepping Whether we are single-stepping the guest using the
3859 * hypervisor debugger.
3860 *
3861 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
3862 * is no longer in VMX non-root mode.
3863 */
3864static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
3865{
3866#ifndef IN_NEM_DARWIN
3867 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3868#endif
3869
3870 /*
3871 * Update pending interrupts into the APIC's IRR.
3872 */
3873 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3874 APICUpdatePendingInterrupts(pVCpu);
3875
3876 /*
3877 * Anything pending? Should be more likely than not if we're doing a good job.
3878 */
3879 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3880 if ( !fStepping
3881 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
3882 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
3883 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
3884 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3885 return VINF_SUCCESS;
3886
3887 /* Pending PGM C3 sync. */
3888 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
3889 {
3890 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3891 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
3892 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
3893 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
3894 if (rcStrict != VINF_SUCCESS)
3895 {
3896 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
3897 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
3898 return rcStrict;
3899 }
3900 }
3901
3902 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
3903 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
3904 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3905 {
3906 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
3907 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
3908 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
3909 return rc;
3910 }
3911
3912 /* Pending VM request packets, such as hardware interrupts. */
3913 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
3914 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
3915 {
3916 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
3917 Log4Func(("Pending VM request forcing us back to ring-3\n"));
3918 return VINF_EM_PENDING_REQUEST;
3919 }
3920
3921 /* Pending PGM pool flushes. */
3922 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
3923 {
3924 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
3925 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
3926 return VINF_PGM_POOL_FLUSH_PENDING;
3927 }
3928
3929 /* Pending DMA requests. */
3930 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
3931 {
3932 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
3933 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
3934 return VINF_EM_RAW_TO_R3;
3935 }
3936
3937#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3938 /*
3939 * Pending nested-guest events.
3940 *
3941 * Please note the priority of these events are specified and important.
3942 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
3943 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
3944 */
3945 if (fIsNestedGuest)
3946 {
3947 /* Pending nested-guest APIC-write. */
3948 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3949 {
3950 Log4Func(("Pending nested-guest APIC-write\n"));
3951 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
3952 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3953 return rcStrict;
3954 }
3955
3956 /* Pending nested-guest monitor-trap flag (MTF). */
3957 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
3958 {
3959 Log4Func(("Pending nested-guest MTF\n"));
3960 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
3961 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3962 return rcStrict;
3963 }
3964
3965 /* Pending nested-guest VMX-preemption timer expired. */
3966 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
3967 {
3968 Log4Func(("Pending nested-guest preempt timer\n"));
3969 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
3970 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3971 return rcStrict;
3972 }
3973 }
3974#else
3975 NOREF(fIsNestedGuest);
3976#endif
3977
3978 return VINF_SUCCESS;
3979}
3980
3981
3982/**
3983 * Converts any TRPM trap into a pending HM event. This is typically used when
3984 * entering from ring-3 (not longjmp returns).
3985 *
3986 * @param pVCpu The cross context virtual CPU structure.
3987 */
3988static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
3989{
3990 Assert(TRPMHasTrap(pVCpu));
3991 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3992
3993 uint8_t uVector;
3994 TRPMEVENT enmTrpmEvent;
3995 uint32_t uErrCode;
3996 RTGCUINTPTR GCPtrFaultAddress;
3997 uint8_t cbInstr;
3998 bool fIcebp;
3999
4000 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4001 AssertRC(rc);
4002
4003 uint32_t u32IntInfo;
4004 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4005 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4006
4007 rc = TRPMResetTrap(pVCpu);
4008 AssertRC(rc);
4009 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4010 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4011
4012 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4013}
4014
4015
4016/**
4017 * Converts the pending HM event into a TRPM trap.
4018 *
4019 * @param pVCpu The cross context virtual CPU structure.
4020 */
4021static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4022{
4023 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4024
4025 /* If a trap was already pending, we did something wrong! */
4026 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4027
4028 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4029 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4030 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4031
4032 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4033
4034 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4035 AssertRC(rc);
4036
4037 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4038 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4039
4040 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4041 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4042 else
4043 {
4044 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4045 switch (uVectorType)
4046 {
4047 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4048 TRPMSetTrapDueToIcebp(pVCpu);
4049 RT_FALL_THRU();
4050 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4051 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4052 {
4053 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4054 || ( uVector == X86_XCPT_BP /* INT3 */
4055 || uVector == X86_XCPT_OF /* INTO */
4056 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4057 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4058 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4059 break;
4060 }
4061 }
4062 }
4063
4064 /* We're now done converting the pending event. */
4065 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4066}
4067
4068
4069/**
4070 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4071 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4072 *
4073 * @param pVCpu The cross context virtual CPU structure.
4074 * @param pVmcsInfo The VMCS info. object.
4075 */
4076static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4077{
4078 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4079 {
4080 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4081 {
4082 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4083 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4084 AssertRC(rc);
4085 }
4086 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4087}
4088
4089
4090/**
4091 * Clears the interrupt-window exiting control in the VMCS.
4092 *
4093 * @param pVCpu The cross context virtual CPU structure.
4094 * @param pVmcsInfo The VMCS info. object.
4095 */
4096DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4097{
4098 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4099 {
4100 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4101 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4102 AssertRC(rc);
4103 }
4104}
4105
4106
4107/**
4108 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4109 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4110 *
4111 * @param pVCpu The cross context virtual CPU structure.
4112 * @param pVmcsInfo The VMCS info. object.
4113 */
4114static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4115{
4116 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4117 {
4118 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4119 {
4120 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4121 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4122 AssertRC(rc);
4123 Log4Func(("Setup NMI-window exiting\n"));
4124 }
4125 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4126}
4127
4128
4129/**
4130 * Clears the NMI-window exiting control in the VMCS.
4131 *
4132 * @param pVCpu The cross context virtual CPU structure.
4133 * @param pVmcsInfo The VMCS info. object.
4134 */
4135DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4136{
4137 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4138 {
4139 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4140 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4141 AssertRC(rc);
4142 }
4143}
4144
4145
4146/**
4147 * Injects an event into the guest upon VM-entry by updating the relevant fields
4148 * in the VM-entry area in the VMCS.
4149 *
4150 * @returns Strict VBox status code (i.e. informational status codes too).
4151 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4152 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4153 *
4154 * @param pVCpu The cross context virtual CPU structure.
4155 * @param pVmcsInfo The VMCS info object.
4156 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4157 * @param pEvent The event being injected.
4158 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4159 * will be updated if necessary. This cannot not be NULL.
4160 * @param fStepping Whether we're single-stepping guest execution and should
4161 * return VINF_EM_DBG_STEPPED if the event is injected
4162 * directly (registers modified by us, not by hardware on
4163 * VM-entry).
4164 */
4165static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4166 bool fStepping, uint32_t *pfIntrState)
4167{
4168 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4169 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4170 Assert(pfIntrState);
4171
4172#ifdef IN_NEM_DARWIN
4173 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4174#endif
4175
4176 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4177 uint32_t u32IntInfo = pEvent->u64IntInfo;
4178 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4179 uint32_t const cbInstr = pEvent->cbInstr;
4180 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4181 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4182 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4183
4184#ifdef VBOX_STRICT
4185 /*
4186 * Validate the error-code-valid bit for hardware exceptions.
4187 * No error codes for exceptions in real-mode.
4188 *
4189 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4190 */
4191 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4192 && !CPUMIsGuestInRealModeEx(pCtx))
4193 {
4194 switch (uVector)
4195 {
4196 case X86_XCPT_PF:
4197 case X86_XCPT_DF:
4198 case X86_XCPT_TS:
4199 case X86_XCPT_NP:
4200 case X86_XCPT_SS:
4201 case X86_XCPT_GP:
4202 case X86_XCPT_AC:
4203 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4204 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4205 RT_FALL_THRU();
4206 default:
4207 break;
4208 }
4209 }
4210
4211 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4212 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4213 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4214#endif
4215
4216 RT_NOREF(uVector);
4217 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4218 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4219 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4220 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4221 {
4222 Assert(uVector <= X86_XCPT_LAST);
4223 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4224 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4225 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4226 }
4227 else
4228 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4229
4230 /*
4231 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4232 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4233 * interrupt handler in the (real-mode) guest.
4234 *
4235 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4236 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4237 */
4238 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4239 {
4240#ifndef IN_NEM_DARWIN
4241 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4242#endif
4243 {
4244 /*
4245 * For CPUs with unrestricted guest execution enabled and with the guest
4246 * in real-mode, we must not set the deliver-error-code bit.
4247 *
4248 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4249 */
4250 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4251 }
4252#ifndef IN_NEM_DARWIN
4253 else
4254 {
4255 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4256 Assert(PDMVmmDevHeapIsEnabled(pVM));
4257 Assert(pVM->hm.s.vmx.pRealModeTSS);
4258 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4259
4260 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4261 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4262 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4263 AssertRCReturn(rc2, rc2);
4264
4265 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4266 size_t const cbIdtEntry = sizeof(X86IDTR16);
4267 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4268 {
4269 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4270 if (uVector == X86_XCPT_DF)
4271 return VINF_EM_RESET;
4272
4273 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4274 No error codes for exceptions in real-mode. */
4275 if (uVector == X86_XCPT_GP)
4276 {
4277 uint32_t const uXcptDfInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4278 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4279 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4280 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4281 HMEVENT EventXcptDf;
4282 RT_ZERO(EventXcptDf);
4283 EventXcptDf.u64IntInfo = uXcptDfInfo;
4284 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptDf, fStepping, pfIntrState);
4285 }
4286
4287 /*
4288 * If we're injecting an event with no valid IDT entry, inject a #GP.
4289 * No error codes for exceptions in real-mode.
4290 *
4291 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4292 */
4293 uint32_t const uXcptGpInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4294 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4295 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4296 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4297 HMEVENT EventXcptGp;
4298 RT_ZERO(EventXcptGp);
4299 EventXcptGp.u64IntInfo = uXcptGpInfo;
4300 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptGp, fStepping, pfIntrState);
4301 }
4302
4303 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4304 uint16_t uGuestIp = pCtx->ip;
4305 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4306 {
4307 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4308 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4309 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4310 }
4311 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4312 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4313
4314 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4315 X86IDTR16 IdtEntry;
4316 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4317 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4318 AssertRCReturn(rc2, rc2);
4319
4320 /* Construct the stack frame for the interrupt/exception handler. */
4321 VBOXSTRICTRC rcStrict;
4322 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
4323 if (rcStrict == VINF_SUCCESS)
4324 {
4325 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4326 if (rcStrict == VINF_SUCCESS)
4327 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4328 }
4329
4330 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4331 if (rcStrict == VINF_SUCCESS)
4332 {
4333 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4334 pCtx->rip = IdtEntry.offSel;
4335 pCtx->cs.Sel = IdtEntry.uSel;
4336 pCtx->cs.ValidSel = IdtEntry.uSel;
4337 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4338 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4339 && uVector == X86_XCPT_PF)
4340 pCtx->cr2 = GCPtrFault;
4341
4342 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4343 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4344 | HM_CHANGED_GUEST_RSP);
4345
4346 /*
4347 * If we delivered a hardware exception (other than an NMI) and if there was
4348 * block-by-STI in effect, we should clear it.
4349 */
4350 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4351 {
4352 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4353 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4354 Log4Func(("Clearing inhibition due to STI\n"));
4355 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4356 }
4357
4358 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4359 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4360
4361 /*
4362 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4363 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4364 */
4365 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4366
4367 /*
4368 * If we eventually support nested-guest execution without unrestricted guest execution,
4369 * we should set fInterceptEvents here.
4370 */
4371 Assert(!fIsNestedGuest);
4372
4373 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4374 if (fStepping)
4375 rcStrict = VINF_EM_DBG_STEPPED;
4376 }
4377 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4378 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4379 return rcStrict;
4380 }
4381#else
4382 RT_NOREF(pVmcsInfo);
4383#endif
4384 }
4385
4386 /*
4387 * Validate.
4388 */
4389 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4390 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4391
4392 /*
4393 * Inject the event into the VMCS.
4394 */
4395 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4396 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4397 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4398 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4399 AssertRC(rc);
4400
4401 /*
4402 * Update guest CR2 if this is a page-fault.
4403 */
4404 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4405 pCtx->cr2 = GCPtrFault;
4406
4407 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4408 return VINF_SUCCESS;
4409}
4410
4411
4412/**
4413 * Evaluates the event to be delivered to the guest and sets it as the pending
4414 * event.
4415 *
4416 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4417 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4418 * NOT restore these force-flags.
4419 *
4420 * @returns Strict VBox status code (i.e. informational status codes too).
4421 * @param pVCpu The cross context virtual CPU structure.
4422 * @param pVmcsInfo The VMCS information structure.
4423 * @param fIsNestedGuest Flag whether the evaluation happens for a nestd guest.
4424 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4425 */
4426static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4427{
4428 Assert(pfIntrState);
4429 Assert(!TRPMHasTrap(pVCpu));
4430
4431 /*
4432 * Compute/update guest-interruptibility state related FFs.
4433 * The FFs will be used below while evaluating events to be injected.
4434 */
4435 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4436
4437 /*
4438 * Evaluate if a new event needs to be injected.
4439 * An event that's already pending has already performed all necessary checks.
4440 */
4441 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4442 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4443 {
4444 /** @todo SMI. SMIs take priority over NMIs. */
4445
4446 /*
4447 * NMIs.
4448 * NMIs take priority over external interrupts.
4449 */
4450#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4451 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4452#endif
4453 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4454 {
4455 /*
4456 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4457 *
4458 * For a nested-guest, the FF always indicates the outer guest's ability to
4459 * receive an NMI while the guest-interruptibility state bit depends on whether
4460 * the nested-hypervisor is using virtual-NMIs.
4461 */
4462 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4463 {
4464#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4465 if ( fIsNestedGuest
4466 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4467 return IEMExecVmxVmexitXcptNmi(pVCpu);
4468#endif
4469 vmxHCSetPendingXcptNmi(pVCpu);
4470 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4471 Log4Func(("NMI pending injection\n"));
4472
4473 /* We've injected the NMI, bail. */
4474 return VINF_SUCCESS;
4475 }
4476 else if (!fIsNestedGuest)
4477 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4478 }
4479
4480 /*
4481 * External interrupts (PIC/APIC).
4482 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4483 * We cannot re-request the interrupt from the controller again.
4484 */
4485 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4486 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4487 {
4488 Assert(!DBGFIsStepping(pVCpu));
4489 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4490 AssertRC(rc);
4491
4492 /*
4493 * We must not check EFLAGS directly when executing a nested-guest, use
4494 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4495 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4496 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4497 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4498 *
4499 * See Intel spec. 25.4.1 "Event Blocking".
4500 */
4501 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4502 {
4503#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4504 if ( fIsNestedGuest
4505 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4506 {
4507 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4508 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4509 return rcStrict;
4510 }
4511#endif
4512 uint8_t u8Interrupt;
4513 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4514 if (RT_SUCCESS(rc))
4515 {
4516#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4517 if ( fIsNestedGuest
4518 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4519 {
4520 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4521 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4522 return rcStrict;
4523 }
4524#endif
4525 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4526 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
4527 }
4528 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4529 {
4530 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4531
4532 if ( !fIsNestedGuest
4533 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
4534 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4535 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
4536
4537 /*
4538 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4539 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4540 * need to re-set this force-flag here.
4541 */
4542 }
4543 else
4544 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4545
4546 /* We've injected the interrupt or taken necessary action, bail. */
4547 return VINF_SUCCESS;
4548 }
4549 if (!fIsNestedGuest)
4550 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4551 }
4552 }
4553 else if (!fIsNestedGuest)
4554 {
4555 /*
4556 * An event is being injected or we are in an interrupt shadow. Check if another event is
4557 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
4558 * the pending event.
4559 */
4560 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4561 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4562 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4563 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4564 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4565 }
4566 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
4567
4568 return VINF_SUCCESS;
4569}
4570
4571
4572/**
4573 * Injects any pending events into the guest if the guest is in a state to
4574 * receive them.
4575 *
4576 * @returns Strict VBox status code (i.e. informational status codes too).
4577 * @param pVCpu The cross context virtual CPU structure.
4578 * @param pVmcsInfo The VMCS information structure.
4579 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
4580 * @param fIntrState The VT-x guest-interruptibility state.
4581 * @param fStepping Whether we are single-stepping the guest using the
4582 * hypervisor debugger and should return
4583 * VINF_EM_DBG_STEPPED if the event was dispatched
4584 * directly.
4585 */
4586static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
4587 uint32_t fIntrState, bool fStepping)
4588{
4589 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
4590#ifndef IN_NEM_DARWIN
4591 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4592#endif
4593
4594#ifdef VBOX_STRICT
4595 /*
4596 * Verify guest-interruptibility state.
4597 *
4598 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
4599 * since injecting an event may modify the interruptibility state and we must thus always
4600 * use fIntrState.
4601 */
4602 {
4603 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
4604 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
4605 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
4606 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
4607 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
4608 Assert(!TRPMHasTrap(pVCpu));
4609 NOREF(fBlockMovSS); NOREF(fBlockSti);
4610 }
4611#endif
4612
4613 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4614 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
4615 {
4616 /*
4617 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
4618 * pending even while injecting an event and in this case, we want a VM-exit as soon as
4619 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
4620 *
4621 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
4622 */
4623 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
4624#ifdef VBOX_STRICT
4625 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4626 {
4627 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
4628 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4629 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4630 }
4631 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
4632 {
4633 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
4634 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4635 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4636 }
4637#endif
4638 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
4639 uIntType));
4640
4641 /*
4642 * Inject the event and get any changes to the guest-interruptibility state.
4643 *
4644 * The guest-interruptibility state may need to be updated if we inject the event
4645 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
4646 */
4647 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
4648 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
4649
4650 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4651 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
4652 else
4653 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
4654 }
4655
4656 /*
4657 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
4658 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
4659 */
4660 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
4661 && !fIsNestedGuest)
4662 {
4663 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
4664
4665 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4666 {
4667 /*
4668 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
4669 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
4670 */
4671 Assert(!DBGFIsStepping(pVCpu));
4672 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
4673 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
4674 AssertRC(rc);
4675 }
4676 else
4677 {
4678 /*
4679 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
4680 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
4681 * we take care of this case in vmxHCExportSharedDebugState and also the case if
4682 * we use MTF, so just make sure it's called before executing guest-code.
4683 */
4684 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
4685 }
4686 }
4687 /* else: for nested-guest currently handling while merging controls. */
4688
4689 /*
4690 * Finally, update the guest-interruptibility state.
4691 *
4692 * This is required for the real-on-v86 software interrupt injection, for
4693 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
4694 */
4695 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
4696 AssertRC(rc);
4697
4698 /*
4699 * There's no need to clear the VM-entry interruption-information field here if we're not
4700 * injecting anything. VT-x clears the valid bit on every VM-exit.
4701 *
4702 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
4703 */
4704
4705 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
4706 return rcStrict;
4707}
4708
4709
4710/**
4711 * Tries to determine what part of the guest-state VT-x has deemed as invalid
4712 * and update error record fields accordingly.
4713 *
4714 * @returns VMX_IGS_* error codes.
4715 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
4716 * wrong with the guest state.
4717 *
4718 * @param pVCpu The cross context virtual CPU structure.
4719 * @param pVmcsInfo The VMCS info. object.
4720 *
4721 * @remarks This function assumes our cache of the VMCS controls
4722 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
4723 */
4724static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
4725{
4726#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
4727#define HMVMX_CHECK_BREAK(expr, err) do { \
4728 if (!(expr)) { uError = (err); break; } \
4729 } while (0)
4730
4731 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4732 uint32_t uError = VMX_IGS_ERROR;
4733 uint32_t u32IntrState = 0;
4734#ifndef IN_NEM_DARWIN
4735 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4736 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
4737#else
4738 bool const fUnrestrictedGuest = true;
4739#endif
4740 do
4741 {
4742 int rc;
4743
4744 /*
4745 * Guest-interruptibility state.
4746 *
4747 * Read this first so that any check that fails prior to those that actually
4748 * require the guest-interruptibility state would still reflect the correct
4749 * VMCS value and avoids causing further confusion.
4750 */
4751 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
4752 AssertRC(rc);
4753
4754 uint32_t u32Val;
4755 uint64_t u64Val;
4756
4757 /*
4758 * CR0.
4759 */
4760 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4761 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
4762 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
4763 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
4764 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
4765 if (fUnrestrictedGuest)
4766 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
4767
4768 uint64_t u64GuestCr0;
4769 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
4770 AssertRC(rc);
4771 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
4772 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
4773 if ( !fUnrestrictedGuest
4774 && (u64GuestCr0 & X86_CR0_PG)
4775 && !(u64GuestCr0 & X86_CR0_PE))
4776 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
4777
4778 /*
4779 * CR4.
4780 */
4781 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4782 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
4783 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
4784
4785 uint64_t u64GuestCr4;
4786 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
4787 AssertRC(rc);
4788 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
4789 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
4790
4791 /*
4792 * IA32_DEBUGCTL MSR.
4793 */
4794 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
4795 AssertRC(rc);
4796 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4797 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
4798 {
4799 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
4800 }
4801 uint64_t u64DebugCtlMsr = u64Val;
4802
4803#ifdef VBOX_STRICT
4804 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
4805 AssertRC(rc);
4806 Assert(u32Val == pVmcsInfo->u32EntryCtls);
4807#endif
4808 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4809
4810 /*
4811 * RIP and RFLAGS.
4812 */
4813 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
4814 AssertRC(rc);
4815 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
4816 if ( !fLongModeGuest
4817 || !pCtx->cs.Attr.n.u1Long)
4818 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
4819 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
4820 * must be identical if the "IA-32e mode guest" VM-entry
4821 * control is 1 and CS.L is 1. No check applies if the
4822 * CPU supports 64 linear-address bits. */
4823
4824 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
4825 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
4826 AssertRC(rc);
4827 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
4828 VMX_IGS_RFLAGS_RESERVED);
4829 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
4830 uint32_t const u32Eflags = u64Val;
4831
4832 if ( fLongModeGuest
4833 || ( fUnrestrictedGuest
4834 && !(u64GuestCr0 & X86_CR0_PE)))
4835 {
4836 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
4837 }
4838
4839 uint32_t u32EntryInfo;
4840 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
4841 AssertRC(rc);
4842 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
4843 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
4844
4845 /*
4846 * 64-bit checks.
4847 */
4848 if (fLongModeGuest)
4849 {
4850 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
4851 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
4852 }
4853
4854 if ( !fLongModeGuest
4855 && (u64GuestCr4 & X86_CR4_PCIDE))
4856 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
4857
4858 /** @todo CR3 field must be such that bits 63:52 and bits in the range
4859 * 51:32 beyond the processor's physical-address width are 0. */
4860
4861 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4862 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
4863 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
4864
4865#ifndef IN_NEM_DARWIN
4866 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
4867 AssertRC(rc);
4868 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
4869
4870 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
4871 AssertRC(rc);
4872 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
4873#endif
4874
4875 /*
4876 * PERF_GLOBAL MSR.
4877 */
4878 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
4879 {
4880 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
4881 AssertRC(rc);
4882 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
4883 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
4884 }
4885
4886 /*
4887 * PAT MSR.
4888 */
4889 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4890 {
4891 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
4892 AssertRC(rc);
4893 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
4894 for (unsigned i = 0; i < 8; i++)
4895 {
4896 uint8_t u8Val = (u64Val & 0xff);
4897 if ( u8Val != 0 /* UC */
4898 && u8Val != 1 /* WC */
4899 && u8Val != 4 /* WT */
4900 && u8Val != 5 /* WP */
4901 && u8Val != 6 /* WB */
4902 && u8Val != 7 /* UC- */)
4903 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
4904 u64Val >>= 8;
4905 }
4906 }
4907
4908 /*
4909 * EFER MSR.
4910 */
4911 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4912 {
4913 Assert(g_fHmVmxSupportsVmcsEfer);
4914 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
4915 AssertRC(rc);
4916 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
4917 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
4918 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
4919 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
4920 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
4921 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
4922 * iemVmxVmentryCheckGuestState(). */
4923 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4924 || !(u64GuestCr0 & X86_CR0_PG)
4925 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
4926 VMX_IGS_EFER_LMA_LME_MISMATCH);
4927 }
4928
4929 /*
4930 * Segment registers.
4931 */
4932 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
4933 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
4934 if (!(u32Eflags & X86_EFL_VM))
4935 {
4936 /* CS */
4937 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
4938 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
4939 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
4940 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4941 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4942 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
4943 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4944 /* CS cannot be loaded with NULL in protected mode. */
4945 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
4946 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
4947 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4948 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
4949 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4950 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
4951 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
4952 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
4953 else
4954 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
4955
4956 /* SS */
4957 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4958 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
4959 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
4960 if ( !(pCtx->cr0 & X86_CR0_PE)
4961 || pCtx->cs.Attr.n.u4Type == 3)
4962 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
4963
4964 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4965 {
4966 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
4967 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
4968 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
4969 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
4970 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4971 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4972 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
4973 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4974 }
4975
4976 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
4977 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4978 {
4979 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
4980 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
4981 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4982 || pCtx->ds.Attr.n.u4Type > 11
4983 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4984 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
4985 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
4986 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4987 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4988 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
4989 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4990 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4991 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
4992 }
4993 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4994 {
4995 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
4996 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
4997 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4998 || pCtx->es.Attr.n.u4Type > 11
4999 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5000 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5001 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5002 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5003 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5004 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5005 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5006 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5007 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5008 }
5009 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5010 {
5011 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5012 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5013 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5014 || pCtx->fs.Attr.n.u4Type > 11
5015 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5016 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5017 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5018 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5019 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5020 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5021 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5022 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5023 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5024 }
5025 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5026 {
5027 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5028 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5029 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5030 || pCtx->gs.Attr.n.u4Type > 11
5031 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5032 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5033 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5034 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5035 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5036 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5037 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5038 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5039 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5040 }
5041 /* 64-bit capable CPUs. */
5042 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5043 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5044 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5045 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5046 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5047 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5048 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5049 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5050 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5051 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5052 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5053 }
5054 else
5055 {
5056 /* V86 mode checks. */
5057 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5058 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5059 {
5060 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5061 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5062 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5063 }
5064 else
5065 {
5066 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5067 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5068 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5069 }
5070
5071 /* CS */
5072 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5073 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5074 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5075 /* SS */
5076 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5077 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5078 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5079 /* DS */
5080 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5081 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5082 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5083 /* ES */
5084 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5085 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5086 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5087 /* FS */
5088 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5089 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5090 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5091 /* GS */
5092 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5093 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5094 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5095 /* 64-bit capable CPUs. */
5096 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5097 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5098 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5099 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5100 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5101 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5102 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5103 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5104 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5105 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5106 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5107 }
5108
5109 /*
5110 * TR.
5111 */
5112 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5113 /* 64-bit capable CPUs. */
5114 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5115 if (fLongModeGuest)
5116 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5117 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5118 else
5119 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5120 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5121 VMX_IGS_TR_ATTR_TYPE_INVALID);
5122 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5123 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5124 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5125 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5126 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5127 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5128 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5129 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5130
5131 /*
5132 * GDTR and IDTR (64-bit capable checks).
5133 */
5134 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5135 AssertRC(rc);
5136 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5137
5138 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5139 AssertRC(rc);
5140 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5141
5142 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5143 AssertRC(rc);
5144 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5145
5146 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5147 AssertRC(rc);
5148 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5149
5150 /*
5151 * Guest Non-Register State.
5152 */
5153 /* Activity State. */
5154 uint32_t u32ActivityState;
5155 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5156 AssertRC(rc);
5157 HMVMX_CHECK_BREAK( !u32ActivityState
5158 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5159 VMX_IGS_ACTIVITY_STATE_INVALID);
5160 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5161 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5162
5163 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5164 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5165 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5166
5167 /** @todo Activity state and injecting interrupts. Left as a todo since we
5168 * currently don't use activity states but ACTIVE. */
5169
5170 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5171 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5172
5173 /* Guest interruptibility-state. */
5174 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5175 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5176 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5177 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5178 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5179 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5180 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5181 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5182 {
5183 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5184 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5185 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5186 }
5187 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5188 {
5189 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5190 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5191 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5192 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5193 }
5194 /** @todo Assumes the processor is not in SMM. */
5195 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5196 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5197 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5198 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5199 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5200 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5201 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5202 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5203
5204 /* Pending debug exceptions. */
5205 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5206 AssertRC(rc);
5207 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5208 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5209 u32Val = u64Val; /* For pending debug exceptions checks below. */
5210
5211 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5212 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5213 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5214 {
5215 if ( (u32Eflags & X86_EFL_TF)
5216 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5217 {
5218 /* Bit 14 is PendingDebug.BS. */
5219 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5220 }
5221 if ( !(u32Eflags & X86_EFL_TF)
5222 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5223 {
5224 /* Bit 14 is PendingDebug.BS. */
5225 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5226 }
5227 }
5228
5229#ifndef IN_NEM_DARWIN
5230 /* VMCS link pointer. */
5231 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5232 AssertRC(rc);
5233 if (u64Val != UINT64_C(0xffffffffffffffff))
5234 {
5235 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5236 /** @todo Bits beyond the processor's physical-address width MBZ. */
5237 /** @todo SMM checks. */
5238 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5239 Assert(pVmcsInfo->pvShadowVmcs);
5240 VMXVMCSREVID VmcsRevId;
5241 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5242 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5243 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5244 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5245 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5246 }
5247
5248 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5249 * not using nested paging? */
5250 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5251 && !fLongModeGuest
5252 && CPUMIsGuestInPAEModeEx(pCtx))
5253 {
5254 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5255 AssertRC(rc);
5256 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5257
5258 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5259 AssertRC(rc);
5260 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5261
5262 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5263 AssertRC(rc);
5264 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5265
5266 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5267 AssertRC(rc);
5268 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5269 }
5270#endif
5271
5272 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5273 if (uError == VMX_IGS_ERROR)
5274 uError = VMX_IGS_REASON_NOT_FOUND;
5275 } while (0);
5276
5277 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5278 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5279 return uError;
5280
5281#undef HMVMX_ERROR_BREAK
5282#undef HMVMX_CHECK_BREAK
5283}
5284
5285
5286#ifndef HMVMX_USE_FUNCTION_TABLE
5287/**
5288 * Handles a guest VM-exit from hardware-assisted VMX execution.
5289 *
5290 * @returns Strict VBox status code (i.e. informational status codes too).
5291 * @param pVCpu The cross context virtual CPU structure.
5292 * @param pVmxTransient The VMX-transient structure.
5293 */
5294DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5295{
5296#ifdef DEBUG_ramshankar
5297# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5298 do { \
5299 if (a_fSave != 0) \
5300 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \
5301 VBOXSTRICTRC rcStrict = a_CallExpr; \
5302 if (a_fSave != 0) \
5303 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5304 return rcStrict; \
5305 } while (0)
5306#else
5307# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5308#endif
5309 uint32_t const uExitReason = pVmxTransient->uExitReason;
5310 switch (uExitReason)
5311 {
5312 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5313 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5314 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5315 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5316 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5317 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5318 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5319 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5320 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5321 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5322 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5323 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5324 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5325 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5326 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5327 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5328 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5329 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5330 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5331 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5332 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5333 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5334 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5335 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5336 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5337 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5338 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5339 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5340 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5341 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5342#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5343 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5344 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5345 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5346 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5347 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5348 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5349 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5350 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5351 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5352 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5353#else
5354 case VMX_EXIT_VMCLEAR:
5355 case VMX_EXIT_VMLAUNCH:
5356 case VMX_EXIT_VMPTRLD:
5357 case VMX_EXIT_VMPTRST:
5358 case VMX_EXIT_VMREAD:
5359 case VMX_EXIT_VMRESUME:
5360 case VMX_EXIT_VMWRITE:
5361 case VMX_EXIT_VMXOFF:
5362 case VMX_EXIT_VMXON:
5363 case VMX_EXIT_INVVPID:
5364 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5365#endif
5366#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5367 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5368#else
5369 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5370#endif
5371
5372 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5373 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5374 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5375
5376 case VMX_EXIT_INIT_SIGNAL:
5377 case VMX_EXIT_SIPI:
5378 case VMX_EXIT_IO_SMI:
5379 case VMX_EXIT_SMI:
5380 case VMX_EXIT_ERR_MSR_LOAD:
5381 case VMX_EXIT_ERR_MACHINE_CHECK:
5382 case VMX_EXIT_PML_FULL:
5383 case VMX_EXIT_VIRTUALIZED_EOI:
5384 case VMX_EXIT_GDTR_IDTR_ACCESS:
5385 case VMX_EXIT_LDTR_TR_ACCESS:
5386 case VMX_EXIT_APIC_WRITE:
5387 case VMX_EXIT_RDRAND:
5388 case VMX_EXIT_RSM:
5389 case VMX_EXIT_VMFUNC:
5390 case VMX_EXIT_ENCLS:
5391 case VMX_EXIT_RDSEED:
5392 case VMX_EXIT_XSAVES:
5393 case VMX_EXIT_XRSTORS:
5394 case VMX_EXIT_UMWAIT:
5395 case VMX_EXIT_TPAUSE:
5396 case VMX_EXIT_LOADIWKEY:
5397 default:
5398 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5399 }
5400#undef VMEXIT_CALL_RET
5401}
5402#endif /* !HMVMX_USE_FUNCTION_TABLE */
5403
5404
5405#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5406/**
5407 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5408 *
5409 * @returns Strict VBox status code (i.e. informational status codes too).
5410 * @param pVCpu The cross context virtual CPU structure.
5411 * @param pVmxTransient The VMX-transient structure.
5412 */
5413DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5414{
5415 uint32_t const uExitReason = pVmxTransient->uExitReason;
5416 switch (uExitReason)
5417 {
5418# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5419 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5420 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5421# else
5422 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5423 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5424# endif
5425 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5426 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5427 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5428
5429 /*
5430 * We shouldn't direct host physical interrupts to the nested-guest.
5431 */
5432 case VMX_EXIT_EXT_INT:
5433 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5434
5435 /*
5436 * Instructions that cause VM-exits unconditionally or the condition is
5437 * always taken solely from the nested hypervisor (meaning if the VM-exit
5438 * happens, it's guaranteed to be a nested-guest VM-exit).
5439 *
5440 * - Provides VM-exit instruction length ONLY.
5441 */
5442 case VMX_EXIT_CPUID: /* Unconditional. */
5443 case VMX_EXIT_VMCALL:
5444 case VMX_EXIT_GETSEC:
5445 case VMX_EXIT_INVD:
5446 case VMX_EXIT_XSETBV:
5447 case VMX_EXIT_VMLAUNCH:
5448 case VMX_EXIT_VMRESUME:
5449 case VMX_EXIT_VMXOFF:
5450 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5451 case VMX_EXIT_VMFUNC:
5452 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5453
5454 /*
5455 * Instructions that cause VM-exits unconditionally or the condition is
5456 * always taken solely from the nested hypervisor (meaning if the VM-exit
5457 * happens, it's guaranteed to be a nested-guest VM-exit).
5458 *
5459 * - Provides VM-exit instruction length.
5460 * - Provides VM-exit information.
5461 * - Optionally provides Exit qualification.
5462 *
5463 * Since Exit qualification is 0 for all VM-exits where it is not
5464 * applicable, reading and passing it to the guest should produce
5465 * defined behavior.
5466 *
5467 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5468 */
5469 case VMX_EXIT_INVEPT: /* Unconditional. */
5470 case VMX_EXIT_INVVPID:
5471 case VMX_EXIT_VMCLEAR:
5472 case VMX_EXIT_VMPTRLD:
5473 case VMX_EXIT_VMPTRST:
5474 case VMX_EXIT_VMXON:
5475 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5476 case VMX_EXIT_LDTR_TR_ACCESS:
5477 case VMX_EXIT_RDRAND:
5478 case VMX_EXIT_RDSEED:
5479 case VMX_EXIT_XSAVES:
5480 case VMX_EXIT_XRSTORS:
5481 case VMX_EXIT_UMWAIT:
5482 case VMX_EXIT_TPAUSE:
5483 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5484
5485 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5486 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5487 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5488 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5489 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5490 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5491 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5492 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5493 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5494 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5495 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5496 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5497 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5498 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5499 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5500 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5501 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5502 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5503 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5504
5505 case VMX_EXIT_PREEMPT_TIMER:
5506 {
5507 /** @todo NSTVMX: Preempt timer. */
5508 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
5509 }
5510
5511 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
5512 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
5513
5514 case VMX_EXIT_VMREAD:
5515 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
5516
5517 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
5518 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
5519
5520 case VMX_EXIT_INIT_SIGNAL:
5521 case VMX_EXIT_SIPI:
5522 case VMX_EXIT_IO_SMI:
5523 case VMX_EXIT_SMI:
5524 case VMX_EXIT_ERR_MSR_LOAD:
5525 case VMX_EXIT_ERR_MACHINE_CHECK:
5526 case VMX_EXIT_PML_FULL:
5527 case VMX_EXIT_RSM:
5528 default:
5529 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5530 }
5531}
5532#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5533
5534
5535/** @name VM-exit helpers.
5536 * @{
5537 */
5538/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5539/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5540/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5541
5542/** Macro for VM-exits called unexpectedly. */
5543#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
5544 do { \
5545 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
5546 return VERR_VMX_UNEXPECTED_EXIT; \
5547 } while (0)
5548
5549#ifdef VBOX_STRICT
5550# ifndef IN_NEM_DARWIN
5551/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
5552# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
5553 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
5554
5555# define HMVMX_ASSERT_PREEMPT_CPUID() \
5556 do { \
5557 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
5558 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
5559 } while (0)
5560
5561# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5562 do { \
5563 AssertPtr((a_pVCpu)); \
5564 AssertPtr((a_pVmxTransient)); \
5565 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5566 Assert((a_pVmxTransient)->pVmcsInfo); \
5567 Assert(ASMIntAreEnabled()); \
5568 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5569 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
5570 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5571 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5572 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
5573 HMVMX_ASSERT_PREEMPT_CPUID(); \
5574 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5575 } while (0)
5576# else
5577# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
5578# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
5579# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5580 do { \
5581 AssertPtr((a_pVCpu)); \
5582 AssertPtr((a_pVmxTransient)); \
5583 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5584 Assert((a_pVmxTransient)->pVmcsInfo); \
5585 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5586 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5587 } while (0)
5588# endif
5589
5590# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5591 do { \
5592 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
5593 Assert((a_pVmxTransient)->fIsNestedGuest); \
5594 } while (0)
5595
5596# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5597 do { \
5598 Log4Func(("\n")); \
5599 } while (0)
5600#else
5601# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5602 do { \
5603 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5604 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
5605 } while (0)
5606
5607# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5608 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
5609
5610# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
5611#endif
5612
5613#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5614/** Macro that does the necessary privilege checks and intercepted VM-exits for
5615 * guests that attempted to execute a VMX instruction. */
5616# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
5617 do \
5618 { \
5619 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
5620 if (rcStrictTmp == VINF_SUCCESS) \
5621 { /* likely */ } \
5622 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5623 { \
5624 Assert((a_pVCpu)->hm.s.Event.fPending); \
5625 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
5626 return VINF_SUCCESS; \
5627 } \
5628 else \
5629 { \
5630 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
5631 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
5632 } \
5633 } while (0)
5634
5635/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
5636# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
5637 do \
5638 { \
5639 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
5640 (a_pGCPtrEffAddr)); \
5641 if (rcStrictTmp == VINF_SUCCESS) \
5642 { /* likely */ } \
5643 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5644 { \
5645 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
5646 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
5647 NOREF(uXcptTmp); \
5648 return VINF_SUCCESS; \
5649 } \
5650 else \
5651 { \
5652 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
5653 return rcStrictTmp; \
5654 } \
5655 } while (0)
5656#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5657
5658
5659/**
5660 * Advances the guest RIP by the specified number of bytes.
5661 *
5662 * @param pVCpu The cross context virtual CPU structure.
5663 * @param cbInstr Number of bytes to advance the RIP by.
5664 *
5665 * @remarks No-long-jump zone!!!
5666 */
5667DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
5668{
5669 /* Advance the RIP. */
5670 pVCpu->cpum.GstCtx.rip += cbInstr;
5671 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
5672
5673 /* Update interrupt inhibition. */
5674 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
5675 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
5676 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5677}
5678
5679
5680/**
5681 * Advances the guest RIP after reading it from the VMCS.
5682 *
5683 * @returns VBox status code, no informational status codes.
5684 * @param pVCpu The cross context virtual CPU structure.
5685 * @param pVmxTransient The VMX-transient structure.
5686 *
5687 * @remarks No-long-jump zone!!!
5688 */
5689static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5690{
5691 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
5692 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
5693 AssertRCReturn(rc, rc);
5694
5695 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
5696 return VINF_SUCCESS;
5697}
5698
5699
5700/**
5701 * Handle a condition that occurred while delivering an event through the guest or
5702 * nested-guest IDT.
5703 *
5704 * @returns Strict VBox status code (i.e. informational status codes too).
5705 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5706 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
5707 * to continue execution of the guest which will delivery the \#DF.
5708 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5709 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5710 *
5711 * @param pVCpu The cross context virtual CPU structure.
5712 * @param pVmxTransient The VMX-transient structure.
5713 *
5714 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
5715 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
5716 * is due to an EPT violation, PML full or SPP-related event.
5717 *
5718 * @remarks No-long-jump zone!!!
5719 */
5720static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5721{
5722 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
5723 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
5724 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5725 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5726 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5727 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
5728
5729 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5730 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5731 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
5732 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
5733 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
5734 {
5735 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
5736 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
5737
5738 /*
5739 * If the event was a software interrupt (generated with INT n) or a software exception
5740 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
5741 * can handle the VM-exit and continue guest execution which will re-execute the
5742 * instruction rather than re-injecting the exception, as that can cause premature
5743 * trips to ring-3 before injection and involve TRPM which currently has no way of
5744 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
5745 * the problem).
5746 */
5747 IEMXCPTRAISE enmRaise;
5748 IEMXCPTRAISEINFO fRaiseInfo;
5749 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5750 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5751 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5752 {
5753 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
5754 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5755 }
5756 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
5757 {
5758 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
5759 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
5760 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
5761
5762 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
5763 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
5764
5765 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
5766
5767 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
5768 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
5769 {
5770 pVmxTransient->fVectoringPF = true;
5771 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5772 }
5773 }
5774 else
5775 {
5776 /*
5777 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
5778 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
5779 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
5780 */
5781 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5782 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5783 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
5784 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5785 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5786 }
5787
5788 /*
5789 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
5790 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
5791 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
5792 * subsequent VM-entry would fail, see @bugref{7445}.
5793 *
5794 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
5795 */
5796 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5797 && enmRaise == IEMXCPTRAISE_PREV_EVENT
5798 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5799 && CPUMIsGuestNmiBlocking(pVCpu))
5800 {
5801 CPUMSetGuestNmiBlocking(pVCpu, false);
5802 }
5803
5804 switch (enmRaise)
5805 {
5806 case IEMXCPTRAISE_CURRENT_XCPT:
5807 {
5808 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
5809 Assert(rcStrict == VINF_SUCCESS);
5810 break;
5811 }
5812
5813 case IEMXCPTRAISE_PREV_EVENT:
5814 {
5815 uint32_t u32ErrCode;
5816 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
5817 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5818 else
5819 u32ErrCode = 0;
5820
5821 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
5822 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
5823 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */,
5824 u32ErrCode, pVCpu->cpum.GstCtx.cr2);
5825
5826 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5827 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
5828 Assert(rcStrict == VINF_SUCCESS);
5829 break;
5830 }
5831
5832 case IEMXCPTRAISE_REEXEC_INSTR:
5833 Assert(rcStrict == VINF_SUCCESS);
5834 break;
5835
5836 case IEMXCPTRAISE_DOUBLE_FAULT:
5837 {
5838 /*
5839 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
5840 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
5841 */
5842 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
5843 {
5844 pVmxTransient->fVectoringDoublePF = true;
5845 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5846 pVCpu->cpum.GstCtx.cr2));
5847 rcStrict = VINF_SUCCESS;
5848 }
5849 else
5850 {
5851 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
5852 vmxHCSetPendingXcptDF(pVCpu);
5853 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5854 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5855 rcStrict = VINF_HM_DOUBLE_FAULT;
5856 }
5857 break;
5858 }
5859
5860 case IEMXCPTRAISE_TRIPLE_FAULT:
5861 {
5862 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
5863 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5864 rcStrict = VINF_EM_RESET;
5865 break;
5866 }
5867
5868 case IEMXCPTRAISE_CPU_HANG:
5869 {
5870 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
5871 rcStrict = VERR_EM_GUEST_CPU_HANG;
5872 break;
5873 }
5874
5875 default:
5876 {
5877 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
5878 rcStrict = VERR_VMX_IPE_2;
5879 break;
5880 }
5881 }
5882 }
5883 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5884 && !CPUMIsGuestNmiBlocking(pVCpu))
5885 {
5886 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
5887 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
5888 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
5889 {
5890 /*
5891 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
5892 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5893 * that virtual NMIs remain blocked until the IRET execution is completed.
5894 *
5895 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
5896 */
5897 CPUMSetGuestNmiBlocking(pVCpu, true);
5898 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5899 }
5900 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5901 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5902 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5903 {
5904 /*
5905 * Execution of IRET caused an EPT violation, page-modification log-full event or
5906 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
5907 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5908 * that virtual NMIs remain blocked until the IRET execution is completed.
5909 *
5910 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
5911 */
5912 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
5913 {
5914 CPUMSetGuestNmiBlocking(pVCpu, true);
5915 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5916 }
5917 }
5918 }
5919
5920 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
5921 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
5922 return rcStrict;
5923}
5924
5925
5926#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5927/**
5928 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
5929 * guest attempting to execute a VMX instruction.
5930 *
5931 * @returns Strict VBox status code (i.e. informational status codes too).
5932 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5933 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
5934 *
5935 * @param pVCpu The cross context virtual CPU structure.
5936 * @param uExitReason The VM-exit reason.
5937 *
5938 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
5939 * @remarks No-long-jump zone!!!
5940 */
5941static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
5942{
5943 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
5944 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
5945
5946 /*
5947 * The physical CPU would have already checked the CPU mode/code segment.
5948 * We shall just assert here for paranoia.
5949 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
5950 */
5951 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
5952 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
5953 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
5954
5955 if (uExitReason == VMX_EXIT_VMXON)
5956 {
5957 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5958
5959 /*
5960 * We check CR4.VMXE because it is required to be always set while in VMX operation
5961 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
5962 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
5963 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
5964 */
5965 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
5966 {
5967 Log4Func(("CR4.VMXE is not set -> #UD\n"));
5968 vmxHCSetPendingXcptUD(pVCpu);
5969 return VINF_HM_PENDING_XCPT;
5970 }
5971 }
5972 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
5973 {
5974 /*
5975 * The guest has not entered VMX operation but attempted to execute a VMX instruction
5976 * (other than VMXON), we need to raise a #UD.
5977 */
5978 Log4Func(("Not in VMX root mode -> #UD\n"));
5979 vmxHCSetPendingXcptUD(pVCpu);
5980 return VINF_HM_PENDING_XCPT;
5981 }
5982
5983 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
5984 return VINF_SUCCESS;
5985}
5986
5987
5988/**
5989 * Decodes the memory operand of an instruction that caused a VM-exit.
5990 *
5991 * The Exit qualification field provides the displacement field for memory
5992 * operand instructions, if any.
5993 *
5994 * @returns Strict VBox status code (i.e. informational status codes too).
5995 * @retval VINF_SUCCESS if the operand was successfully decoded.
5996 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
5997 * operand.
5998 * @param pVCpu The cross context virtual CPU structure.
5999 * @param uExitInstrInfo The VM-exit instruction information field.
6000 * @param enmMemAccess The memory operand's access type (read or write).
6001 * @param GCPtrDisp The instruction displacement field, if any. For
6002 * RIP-relative addressing pass RIP + displacement here.
6003 * @param pGCPtrMem Where to store the effective destination memory address.
6004 *
6005 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6006 * virtual-8086 mode hence skips those checks while verifying if the
6007 * segment is valid.
6008 */
6009static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6010 PRTGCPTR pGCPtrMem)
6011{
6012 Assert(pGCPtrMem);
6013 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6014 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6015 | CPUMCTX_EXTRN_CR0);
6016
6017 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6018 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6019 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6020
6021 VMXEXITINSTRINFO ExitInstrInfo;
6022 ExitInstrInfo.u = uExitInstrInfo;
6023 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6024 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6025 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6026 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6027 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6028 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6029 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6030 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6031 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6032
6033 /*
6034 * Validate instruction information.
6035 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6036 */
6037 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6038 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6039 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6040 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6041 AssertLogRelMsgReturn(fIsMemOperand,
6042 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6043
6044 /*
6045 * Compute the complete effective address.
6046 *
6047 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6048 * See AMD spec. 4.5.2 "Segment Registers".
6049 */
6050 RTGCPTR GCPtrMem = GCPtrDisp;
6051 if (fBaseRegValid)
6052 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6053 if (fIdxRegValid)
6054 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6055
6056 RTGCPTR const GCPtrOff = GCPtrMem;
6057 if ( !fIsLongMode
6058 || iSegReg >= X86_SREG_FS)
6059 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6060 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6061
6062 /*
6063 * Validate effective address.
6064 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6065 */
6066 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6067 Assert(cbAccess > 0);
6068 if (fIsLongMode)
6069 {
6070 if (X86_IS_CANONICAL(GCPtrMem))
6071 {
6072 *pGCPtrMem = GCPtrMem;
6073 return VINF_SUCCESS;
6074 }
6075
6076 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6077 * "Data Limit Checks in 64-bit Mode". */
6078 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6079 vmxHCSetPendingXcptGP(pVCpu, 0);
6080 return VINF_HM_PENDING_XCPT;
6081 }
6082
6083 /*
6084 * This is a watered down version of iemMemApplySegment().
6085 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6086 * and segment CPL/DPL checks are skipped.
6087 */
6088 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6089 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6090 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6091
6092 /* Check if the segment is present and usable. */
6093 if ( pSel->Attr.n.u1Present
6094 && !pSel->Attr.n.u1Unusable)
6095 {
6096 Assert(pSel->Attr.n.u1DescType);
6097 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6098 {
6099 /* Check permissions for the data segment. */
6100 if ( enmMemAccess == VMXMEMACCESS_WRITE
6101 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6102 {
6103 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6104 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6105 return VINF_HM_PENDING_XCPT;
6106 }
6107
6108 /* Check limits if it's a normal data segment. */
6109 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6110 {
6111 if ( GCPtrFirst32 > pSel->u32Limit
6112 || GCPtrLast32 > pSel->u32Limit)
6113 {
6114 Log4Func(("Data segment limit exceeded. "
6115 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6116 GCPtrLast32, pSel->u32Limit));
6117 if (iSegReg == X86_SREG_SS)
6118 vmxHCSetPendingXcptSS(pVCpu, 0);
6119 else
6120 vmxHCSetPendingXcptGP(pVCpu, 0);
6121 return VINF_HM_PENDING_XCPT;
6122 }
6123 }
6124 else
6125 {
6126 /* Check limits if it's an expand-down data segment.
6127 Note! The upper boundary is defined by the B bit, not the G bit! */
6128 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6129 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6130 {
6131 Log4Func(("Expand-down data segment limit exceeded. "
6132 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6133 GCPtrLast32, pSel->u32Limit));
6134 if (iSegReg == X86_SREG_SS)
6135 vmxHCSetPendingXcptSS(pVCpu, 0);
6136 else
6137 vmxHCSetPendingXcptGP(pVCpu, 0);
6138 return VINF_HM_PENDING_XCPT;
6139 }
6140 }
6141 }
6142 else
6143 {
6144 /* Check permissions for the code segment. */
6145 if ( enmMemAccess == VMXMEMACCESS_WRITE
6146 || ( enmMemAccess == VMXMEMACCESS_READ
6147 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6148 {
6149 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6150 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6151 vmxHCSetPendingXcptGP(pVCpu, 0);
6152 return VINF_HM_PENDING_XCPT;
6153 }
6154
6155 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6156 if ( GCPtrFirst32 > pSel->u32Limit
6157 || GCPtrLast32 > pSel->u32Limit)
6158 {
6159 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6160 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6161 if (iSegReg == X86_SREG_SS)
6162 vmxHCSetPendingXcptSS(pVCpu, 0);
6163 else
6164 vmxHCSetPendingXcptGP(pVCpu, 0);
6165 return VINF_HM_PENDING_XCPT;
6166 }
6167 }
6168 }
6169 else
6170 {
6171 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6172 vmxHCSetPendingXcptGP(pVCpu, 0);
6173 return VINF_HM_PENDING_XCPT;
6174 }
6175
6176 *pGCPtrMem = GCPtrMem;
6177 return VINF_SUCCESS;
6178}
6179#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6180
6181
6182/**
6183 * VM-exit helper for LMSW.
6184 */
6185static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6186{
6187 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6188 AssertRCReturn(rc, rc);
6189
6190 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6191 AssertMsg( rcStrict == VINF_SUCCESS
6192 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6193
6194 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6195 if (rcStrict == VINF_IEM_RAISED_XCPT)
6196 {
6197 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6198 rcStrict = VINF_SUCCESS;
6199 }
6200
6201 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6202 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6203 return rcStrict;
6204}
6205
6206
6207/**
6208 * VM-exit helper for CLTS.
6209 */
6210static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6211{
6212 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6213 AssertRCReturn(rc, rc);
6214
6215 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6216 AssertMsg( rcStrict == VINF_SUCCESS
6217 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6218
6219 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6220 if (rcStrict == VINF_IEM_RAISED_XCPT)
6221 {
6222 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6223 rcStrict = VINF_SUCCESS;
6224 }
6225
6226 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6227 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6228 return rcStrict;
6229}
6230
6231
6232/**
6233 * VM-exit helper for MOV from CRx (CRx read).
6234 */
6235static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6236{
6237 Assert(iCrReg < 16);
6238 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6239
6240 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6241 AssertRCReturn(rc, rc);
6242
6243 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6244 AssertMsg( rcStrict == VINF_SUCCESS
6245 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6246
6247 if (iGReg == X86_GREG_xSP)
6248 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6249 else
6250 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6251#ifdef VBOX_WITH_STATISTICS
6252 switch (iCrReg)
6253 {
6254 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6255 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6256 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6257 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6258 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6259 }
6260#endif
6261 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6262 return rcStrict;
6263}
6264
6265
6266/**
6267 * VM-exit helper for MOV to CRx (CRx write).
6268 */
6269static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6270{
6271 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6272
6273 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6274 AssertMsg( rcStrict == VINF_SUCCESS
6275 || rcStrict == VINF_IEM_RAISED_XCPT
6276 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6277
6278 switch (iCrReg)
6279 {
6280 case 0:
6281 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6282 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6283 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6284 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6285 break;
6286
6287 case 2:
6288 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6289 /* Nothing to do here, CR2 it's not part of the VMCS. */
6290 break;
6291
6292 case 3:
6293 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6294 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6295 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6296 break;
6297
6298 case 4:
6299 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6300 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6301#ifndef IN_NEM_DARWIN
6302 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6303 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6304#else
6305 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6306#endif
6307 break;
6308
6309 case 8:
6310 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6311 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6312 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6313 break;
6314
6315 default:
6316 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6317 break;
6318 }
6319
6320 if (rcStrict == VINF_IEM_RAISED_XCPT)
6321 {
6322 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6323 rcStrict = VINF_SUCCESS;
6324 }
6325 return rcStrict;
6326}
6327
6328
6329/**
6330 * VM-exit exception handler for \#PF (Page-fault exception).
6331 *
6332 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6333 */
6334static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6335{
6336 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6337 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6338
6339#ifndef IN_NEM_DARWIN
6340 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6341 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6342 { /* likely */ }
6343 else
6344#endif
6345 {
6346#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6347 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6348#endif
6349 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6350 if (!pVmxTransient->fVectoringDoublePF)
6351 {
6352 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6353 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6354 }
6355 else
6356 {
6357 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6358 Assert(!pVmxTransient->fIsNestedGuest);
6359 vmxHCSetPendingXcptDF(pVCpu);
6360 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6361 }
6362 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6363 return VINF_SUCCESS;
6364 }
6365
6366 Assert(!pVmxTransient->fIsNestedGuest);
6367
6368 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6369 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6370 if (pVmxTransient->fVectoringPF)
6371 {
6372 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6373 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6374 }
6375
6376 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6377 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6378 AssertRCReturn(rc, rc);
6379
6380 Log4Func(("#PF: cs:rip=%#04x:%#RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
6381 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
6382
6383 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6384 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
6385
6386 Log4Func(("#PF: rc=%Rrc\n", rc));
6387 if (rc == VINF_SUCCESS)
6388 {
6389 /*
6390 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6391 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6392 */
6393 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6394 TRPMResetTrap(pVCpu);
6395 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6396 return rc;
6397 }
6398
6399 if (rc == VINF_EM_RAW_GUEST_TRAP)
6400 {
6401 if (!pVmxTransient->fVectoringDoublePF)
6402 {
6403 /* It's a guest page fault and needs to be reflected to the guest. */
6404 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6405 TRPMResetTrap(pVCpu);
6406 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6407 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6408 uGstErrorCode, pVmxTransient->uExitQual);
6409 }
6410 else
6411 {
6412 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6413 TRPMResetTrap(pVCpu);
6414 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6415 vmxHCSetPendingXcptDF(pVCpu);
6416 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6417 }
6418
6419 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6420 return VINF_SUCCESS;
6421 }
6422
6423 TRPMResetTrap(pVCpu);
6424 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6425 return rc;
6426}
6427
6428
6429/**
6430 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6431 *
6432 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6433 */
6434static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6435{
6436 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6437 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6438
6439 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
6440 AssertRCReturn(rc, rc);
6441
6442 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6443 {
6444 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6445 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6446
6447 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6448 * provides VM-exit instruction length. If this causes problem later,
6449 * disassemble the instruction like it's done on AMD-V. */
6450 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6451 AssertRCReturn(rc2, rc2);
6452 return rc;
6453 }
6454
6455 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6456 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6457 return VINF_SUCCESS;
6458}
6459
6460
6461/**
6462 * VM-exit exception handler for \#BP (Breakpoint exception).
6463 *
6464 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6465 */
6466static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6467{
6468 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6469 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6470
6471 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6472 AssertRCReturn(rc, rc);
6473
6474 VBOXSTRICTRC rcStrict;
6475 if (!pVmxTransient->fIsNestedGuest)
6476 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
6477 else
6478 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6479
6480 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6481 {
6482 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6483 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6484 rcStrict = VINF_SUCCESS;
6485 }
6486
6487 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6488 return rcStrict;
6489}
6490
6491
6492/**
6493 * VM-exit exception handler for \#AC (Alignment-check exception).
6494 *
6495 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6496 */
6497static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6498{
6499 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6500
6501 /*
6502 * Detect #ACs caused by host having enabled split-lock detection.
6503 * Emulate such instructions.
6504 */
6505 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo,
6506 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
6507 AssertRCReturn(rc, rc);
6508 /** @todo detect split lock in cpu feature? */
6509 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6510 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6511 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6512 || CPUMGetGuestCPL(pVCpu) != 3
6513 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6514 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6515 {
6516 /*
6517 * Check for debug/trace events and import state accordingly.
6518 */
6519 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
6520 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6521 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
6522#ifndef IN_NEM_DARWIN
6523 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
6524#endif
6525 )
6526 {
6527 if (pVM->cCpus == 1)
6528 {
6529#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6530 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6531#else
6532 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6533#endif
6534 AssertRCReturn(rc, rc);
6535 }
6536 }
6537 else
6538 {
6539 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6540 AssertRCReturn(rc, rc);
6541
6542 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
6543
6544 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
6545 {
6546 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
6547 if (rcStrict != VINF_SUCCESS)
6548 return rcStrict;
6549 }
6550 }
6551
6552 /*
6553 * Emulate the instruction.
6554 *
6555 * We have to ignore the LOCK prefix here as we must not retrigger the
6556 * detection on the host. This isn't all that satisfactory, though...
6557 */
6558 if (pVM->cCpus == 1)
6559 {
6560 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
6561 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6562
6563 /** @todo For SMP configs we should do a rendezvous here. */
6564 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
6565 if (rcStrict == VINF_SUCCESS)
6566#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6567 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6568 HM_CHANGED_GUEST_RIP
6569 | HM_CHANGED_GUEST_RFLAGS
6570 | HM_CHANGED_GUEST_GPRS_MASK
6571 | HM_CHANGED_GUEST_CS
6572 | HM_CHANGED_GUEST_SS);
6573#else
6574 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6575#endif
6576 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6577 {
6578 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6579 rcStrict = VINF_SUCCESS;
6580 }
6581 return rcStrict;
6582 }
6583 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
6584 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6585 return VINF_EM_EMULATE_SPLIT_LOCK;
6586 }
6587
6588 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
6589 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6590 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
6591
6592 /* Re-inject it. We'll detect any nesting before getting here. */
6593 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6594 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6595 return VINF_SUCCESS;
6596}
6597
6598
6599/**
6600 * VM-exit exception handler for \#DB (Debug exception).
6601 *
6602 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6603 */
6604static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6605{
6606 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6607 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
6608
6609 /*
6610 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
6611 */
6612 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6613
6614 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
6615 uint64_t const uDR6 = X86_DR6_INIT_VAL
6616 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
6617 | X86_DR6_BD | X86_DR6_BS));
6618
6619 int rc;
6620 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6621 if (!pVmxTransient->fIsNestedGuest)
6622 {
6623 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6624
6625 /*
6626 * Prevents stepping twice over the same instruction when the guest is stepping using
6627 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
6628 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
6629 */
6630 if ( rc == VINF_EM_DBG_STEPPED
6631 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
6632 {
6633 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6634 rc = VINF_EM_RAW_GUEST_TRAP;
6635 }
6636 }
6637 else
6638 rc = VINF_EM_RAW_GUEST_TRAP;
6639 Log6Func(("rc=%Rrc\n", rc));
6640 if (rc == VINF_EM_RAW_GUEST_TRAP)
6641 {
6642 /*
6643 * The exception was for the guest. Update DR6, DR7.GD and
6644 * IA32_DEBUGCTL.LBR before forwarding it.
6645 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
6646 */
6647#ifndef IN_NEM_DARWIN
6648 VMMRZCallRing3Disable(pVCpu);
6649 HM_DISABLE_PREEMPT(pVCpu);
6650
6651 pCtx->dr[6] &= ~X86_DR6_B_MASK;
6652 pCtx->dr[6] |= uDR6;
6653 if (CPUMIsGuestDebugStateActive(pVCpu))
6654 ASMSetDR6(pCtx->dr[6]);
6655
6656 HM_RESTORE_PREEMPT();
6657 VMMRZCallRing3Enable(pVCpu);
6658#else
6659 /** @todo */
6660#endif
6661
6662 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
6663 AssertRCReturn(rc, rc);
6664
6665 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
6666 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
6667
6668 /* Paranoia. */
6669 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
6670 pCtx->dr[7] |= X86_DR7_RA1_MASK;
6671
6672 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
6673 AssertRC(rc);
6674
6675 /*
6676 * Raise #DB in the guest.
6677 *
6678 * It is important to reflect exactly what the VM-exit gave us (preserving the
6679 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
6680 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
6681 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
6682 *
6683 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
6684 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6685 */
6686 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6687 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6688 return VINF_SUCCESS;
6689 }
6690
6691 /*
6692 * Not a guest trap, must be a hypervisor related debug event then.
6693 * Update DR6 in case someone is interested in it.
6694 */
6695 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
6696 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
6697 CPUMSetHyperDR6(pVCpu, uDR6);
6698
6699 return rc;
6700}
6701
6702
6703/**
6704 * Hacks its way around the lovely mesa driver's backdoor accesses.
6705 *
6706 * @sa hmR0SvmHandleMesaDrvGp.
6707 */
6708static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6709{
6710 LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
6711 RT_NOREF(pCtx);
6712
6713 /* For now we'll just skip the instruction. */
6714 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6715}
6716
6717
6718/**
6719 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
6720 * backdoor logging w/o checking what it is running inside.
6721 *
6722 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
6723 * backdoor port and magic numbers loaded in registers.
6724 *
6725 * @returns true if it is, false if it isn't.
6726 * @sa hmR0SvmIsMesaDrvGp.
6727 */
6728DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6729{
6730 /* 0xed: IN eAX,dx */
6731 uint8_t abInstr[1];
6732 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
6733 return false;
6734
6735 /* Check that it is #GP(0). */
6736 if (pVmxTransient->uExitIntErrorCode != 0)
6737 return false;
6738
6739 /* Check magic and port. */
6740 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
6741 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
6742 if (pCtx->rax != UINT32_C(0x564d5868))
6743 return false;
6744 if (pCtx->dx != UINT32_C(0x5658))
6745 return false;
6746
6747 /* Flat ring-3 CS. */
6748 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
6749 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
6750 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
6751 if (pCtx->cs.Attr.n.u2Dpl != 3)
6752 return false;
6753 if (pCtx->cs.u64Base != 0)
6754 return false;
6755
6756 /* Check opcode. */
6757 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
6758 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
6759 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
6760 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
6761 if (RT_FAILURE(rc))
6762 return false;
6763 if (abInstr[0] != 0xed)
6764 return false;
6765
6766 return true;
6767}
6768
6769
6770/**
6771 * VM-exit exception handler for \#GP (General-protection exception).
6772 *
6773 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6774 */
6775static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6776{
6777 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6778 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
6779
6780 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6781 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6782#ifndef IN_NEM_DARWIN
6783 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
6784 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6785 { /* likely */ }
6786 else
6787#endif
6788 {
6789#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6790# ifndef IN_NEM_DARWIN
6791 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6792# else
6793 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6794# endif
6795#endif
6796 /*
6797 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
6798 * executing a nested-guest, reflect #GP to the guest or nested-guest.
6799 */
6800 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6801 AssertRCReturn(rc, rc);
6802 Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
6803 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
6804
6805 if ( pVmxTransient->fIsNestedGuest
6806 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
6807 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
6808 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6809 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6810 else
6811 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
6812 return rc;
6813 }
6814
6815#ifndef IN_NEM_DARWIN
6816 Assert(CPUMIsGuestInRealModeEx(pCtx));
6817 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
6818 Assert(!pVmxTransient->fIsNestedGuest);
6819
6820 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6821 AssertRCReturn(rc, rc);
6822
6823 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
6824 if (rcStrict == VINF_SUCCESS)
6825 {
6826 if (!CPUMIsGuestInRealModeEx(pCtx))
6827 {
6828 /*
6829 * The guest is no longer in real-mode, check if we can continue executing the
6830 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
6831 */
6832 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
6833 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
6834 {
6835 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
6836 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6837 }
6838 else
6839 {
6840 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
6841 rcStrict = VINF_EM_RESCHEDULE;
6842 }
6843 }
6844 else
6845 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6846 }
6847 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6848 {
6849 rcStrict = VINF_SUCCESS;
6850 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6851 }
6852 return VBOXSTRICTRC_VAL(rcStrict);
6853#endif
6854}
6855
6856
6857/**
6858 * VM-exit exception handler for \#DE (Divide Error).
6859 *
6860 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6861 */
6862static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6863{
6864 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6865 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
6866
6867 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6868 AssertRCReturn(rc, rc);
6869
6870 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
6871 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
6872 {
6873 uint8_t cbInstr = 0;
6874 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
6875 if (rc2 == VINF_SUCCESS)
6876 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
6877 else if (rc2 == VERR_NOT_FOUND)
6878 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
6879 else
6880 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
6881 }
6882 else
6883 rcStrict = VINF_SUCCESS; /* Do nothing. */
6884
6885 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
6886 if (RT_FAILURE(rcStrict))
6887 {
6888 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6889 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6890 rcStrict = VINF_SUCCESS;
6891 }
6892
6893 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
6894 return VBOXSTRICTRC_VAL(rcStrict);
6895}
6896
6897
6898/**
6899 * VM-exit exception handler wrapper for all other exceptions that are not handled
6900 * by a specific handler.
6901 *
6902 * This simply re-injects the exception back into the VM without any special
6903 * processing.
6904 *
6905 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6906 */
6907static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6908{
6909 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6910
6911#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6912# ifndef IN_NEM_DARWIN
6913 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6914 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
6915 ("uVector=%#x u32XcptBitmap=%#X32\n",
6916 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
6917 NOREF(pVmcsInfo);
6918# endif
6919#endif
6920
6921 /*
6922 * Re-inject the exception into the guest. This cannot be a double-fault condition which
6923 * would have been handled while checking exits due to event delivery.
6924 */
6925 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6926
6927#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6928 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6929 AssertRCReturn(rc, rc);
6930 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6931#endif
6932
6933#ifdef VBOX_WITH_STATISTICS
6934 switch (uVector)
6935 {
6936 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
6937 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
6938 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
6939 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6940 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
6941 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
6942 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6943 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
6944 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
6945 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
6946 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
6947 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
6948 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
6949 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
6950 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
6951 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
6952 default:
6953 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
6954 break;
6955 }
6956#endif
6957
6958 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
6959 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
6960 NOREF(uVector);
6961
6962 /* Re-inject the original exception into the guest. */
6963 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6964 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6965 return VINF_SUCCESS;
6966}
6967
6968
6969/**
6970 * VM-exit exception handler for all exceptions (except NMIs!).
6971 *
6972 * @remarks This may be called for both guests and nested-guests. Take care to not
6973 * make assumptions and avoid doing anything that is not relevant when
6974 * executing a nested-guest (e.g., Mesa driver hacks).
6975 */
6976static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6977{
6978 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6979
6980 /*
6981 * If this VM-exit occurred while delivering an event through the guest IDT, take
6982 * action based on the return code and additional hints (e.g. for page-faults)
6983 * that will be updated in the VMX transient structure.
6984 */
6985 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
6986 if (rcStrict == VINF_SUCCESS)
6987 {
6988 /*
6989 * If an exception caused a VM-exit due to delivery of an event, the original
6990 * event may have to be re-injected into the guest. We shall reinject it and
6991 * continue guest execution. However, page-fault is a complicated case and
6992 * needs additional processing done in vmxHCExitXcptPF().
6993 */
6994 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
6995 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6996 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
6997 || uVector == X86_XCPT_PF)
6998 {
6999 switch (uVector)
7000 {
7001 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7002 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7003 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7004 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7005 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7006 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7007 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7008 default:
7009 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7010 }
7011 }
7012 /* else: inject pending event before resuming guest execution. */
7013 }
7014 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7015 {
7016 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7017 rcStrict = VINF_SUCCESS;
7018 }
7019
7020 return rcStrict;
7021}
7022/** @} */
7023
7024
7025/** @name VM-exit handlers.
7026 * @{
7027 */
7028/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7029/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7030/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7031
7032/**
7033 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7034 */
7035HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7036{
7037 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7038 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7039
7040#ifndef IN_NEM_DARWIN
7041 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7042 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7043 return VINF_SUCCESS;
7044 return VINF_EM_RAW_INTERRUPT;
7045#else
7046 return VINF_SUCCESS;
7047#endif
7048}
7049
7050
7051/**
7052 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7053 * VM-exit.
7054 */
7055HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7056{
7057 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7058 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7059
7060 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
7061
7062 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7063 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7064 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7065
7066 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7067 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7068 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7069 NOREF(pVmcsInfo);
7070
7071 VBOXSTRICTRC rcStrict;
7072 switch (uExitIntType)
7073 {
7074#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7075 /*
7076 * Host physical NMIs:
7077 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7078 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7079 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7080 *
7081 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7082 * See Intel spec. 27.5.5 "Updating Non-Register State".
7083 */
7084 case VMX_EXIT_INT_INFO_TYPE_NMI:
7085 {
7086 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7087 break;
7088 }
7089#endif
7090
7091 /*
7092 * Privileged software exceptions (#DB from ICEBP),
7093 * Software exceptions (#BP and #OF),
7094 * Hardware exceptions:
7095 * Process the required exceptions and resume guest execution if possible.
7096 */
7097 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7098 Assert(uVector == X86_XCPT_DB);
7099 RT_FALL_THRU();
7100 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7101 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7102 RT_FALL_THRU();
7103 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7104 {
7105 NOREF(uVector);
7106 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
7107 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7108 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
7109 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
7110
7111 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7112 break;
7113 }
7114
7115 default:
7116 {
7117 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7118 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7119 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7120 break;
7121 }
7122 }
7123
7124 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7125 return rcStrict;
7126}
7127
7128
7129/**
7130 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7131 */
7132HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7133{
7134 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7135
7136 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7137 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7138 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7139
7140 /* Evaluate and deliver pending events and resume guest execution. */
7141 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7142 return VINF_SUCCESS;
7143}
7144
7145
7146/**
7147 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7148 */
7149HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7150{
7151 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7152
7153 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7154 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7155 {
7156 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7157 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7158 }
7159
7160 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
7161
7162 /*
7163 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7164 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7165 */
7166 uint32_t fIntrState;
7167 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7168 AssertRC(rc);
7169 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7170 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7171 {
7172 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7173 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7174
7175 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7176 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7177 AssertRC(rc);
7178 }
7179
7180 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7181 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7182
7183 /* Evaluate and deliver pending events and resume guest execution. */
7184 return VINF_SUCCESS;
7185}
7186
7187
7188/**
7189 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7190 */
7191HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7192{
7193 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7194 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7195}
7196
7197
7198/**
7199 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7200 */
7201HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7202{
7203 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7204 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7205}
7206
7207
7208/**
7209 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7210 */
7211HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7212{
7213 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7214
7215 /*
7216 * Get the state we need and update the exit history entry.
7217 */
7218 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7219 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7220
7221 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7222 AssertRCReturn(rc, rc);
7223
7224 VBOXSTRICTRC rcStrict;
7225 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7226 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7227 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7228 if (!pExitRec)
7229 {
7230 /*
7231 * Regular CPUID instruction execution.
7232 */
7233 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7234 if (rcStrict == VINF_SUCCESS)
7235 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7236 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7237 {
7238 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7239 rcStrict = VINF_SUCCESS;
7240 }
7241 }
7242 else
7243 {
7244 /*
7245 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7246 */
7247 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7248 AssertRCReturn(rc2, rc2);
7249
7250 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7251 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7252
7253 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7254 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7255
7256 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7257 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7258 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7259 }
7260 return rcStrict;
7261}
7262
7263
7264/**
7265 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7266 */
7267HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7268{
7269 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7270
7271 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7272 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
7273 AssertRCReturn(rc, rc);
7274
7275 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7276 return VINF_EM_RAW_EMULATE_INSTR;
7277
7278 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7279 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7280}
7281
7282
7283/**
7284 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7285 */
7286HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7287{
7288 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7289
7290 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7291 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7292 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
7293 AssertRCReturn(rc, rc);
7294
7295 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7296 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7297 {
7298 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7299 we must reset offsetting on VM-entry. See @bugref{6634}. */
7300 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7301 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7302 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7303 }
7304 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7305 {
7306 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7307 rcStrict = VINF_SUCCESS;
7308 }
7309 return rcStrict;
7310}
7311
7312
7313/**
7314 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7315 */
7316HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7317{
7318 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7319
7320 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7321 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7322 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
7323 AssertRCReturn(rc, rc);
7324
7325 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7326 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7327 {
7328 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7329 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7330 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7331 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7332 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7333 }
7334 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7335 {
7336 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7337 rcStrict = VINF_SUCCESS;
7338 }
7339 return rcStrict;
7340}
7341
7342
7343/**
7344 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7345 */
7346HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7347{
7348 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7349
7350 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7351 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0
7352 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
7353 AssertRCReturn(rc, rc);
7354
7355 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7356 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7357 if (RT_LIKELY(rc == VINF_SUCCESS))
7358 {
7359 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7360 Assert(pVmxTransient->cbExitInstr == 2);
7361 }
7362 else
7363 {
7364 AssertMsgFailed(("vmxHCExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7365 rc = VERR_EM_INTERPRETER;
7366 }
7367 return rc;
7368}
7369
7370
7371/**
7372 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7373 */
7374HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7375{
7376 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7377
7378 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7379 if (EMAreHypercallInstructionsEnabled(pVCpu))
7380 {
7381 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7382 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
7383 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
7384 AssertRCReturn(rc, rc);
7385
7386 /* Perform the hypercall. */
7387 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7388 if (rcStrict == VINF_SUCCESS)
7389 {
7390 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7391 AssertRCReturn(rc, rc);
7392 }
7393 else
7394 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7395 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7396 || RT_FAILURE(rcStrict));
7397
7398 /* If the hypercall changes anything other than guest's general-purpose registers,
7399 we would need to reload the guest changed bits here before VM-entry. */
7400 }
7401 else
7402 Log4Func(("Hypercalls not enabled\n"));
7403
7404 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7405 if (RT_FAILURE(rcStrict))
7406 {
7407 vmxHCSetPendingXcptUD(pVCpu);
7408 rcStrict = VINF_SUCCESS;
7409 }
7410
7411 return rcStrict;
7412}
7413
7414
7415/**
7416 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7417 */
7418HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7419{
7420 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7421#ifndef IN_NEM_DARWIN
7422 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7423#endif
7424
7425 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7426 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7427 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7428 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7429 AssertRCReturn(rc, rc);
7430
7431 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7432
7433 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7434 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7435 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7436 {
7437 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7438 rcStrict = VINF_SUCCESS;
7439 }
7440 else
7441 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7442 VBOXSTRICTRC_VAL(rcStrict)));
7443 return rcStrict;
7444}
7445
7446
7447/**
7448 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7449 */
7450HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7451{
7452 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7453
7454 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7455 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7456 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
7457 AssertRCReturn(rc, rc);
7458
7459 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7460 if (rcStrict == VINF_SUCCESS)
7461 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7462 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7463 {
7464 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7465 rcStrict = VINF_SUCCESS;
7466 }
7467
7468 return rcStrict;
7469}
7470
7471
7472/**
7473 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7474 */
7475HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7476{
7477 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7478
7479 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7480 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7481 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7482 AssertRCReturn(rc, rc);
7483
7484 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7485 if (RT_SUCCESS(rcStrict))
7486 {
7487 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7488 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7489 rcStrict = VINF_SUCCESS;
7490 }
7491
7492 return rcStrict;
7493}
7494
7495
7496/**
7497 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7498 * VM-exit.
7499 */
7500HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7501{
7502 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7503 return VINF_EM_RESET;
7504}
7505
7506
7507/**
7508 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7509 */
7510HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7511{
7512 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7513
7514 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7515 AssertRCReturn(rc, rc);
7516
7517 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
7518 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
7519 rc = VINF_SUCCESS;
7520 else
7521 rc = VINF_EM_HALT;
7522
7523 if (rc != VINF_SUCCESS)
7524 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
7525 return rc;
7526}
7527
7528
7529#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7530/**
7531 * VM-exit handler for instructions that result in a \#UD exception delivered to
7532 * the guest.
7533 */
7534HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7535{
7536 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7537 vmxHCSetPendingXcptUD(pVCpu);
7538 return VINF_SUCCESS;
7539}
7540#endif
7541
7542
7543/**
7544 * VM-exit handler for expiry of the VMX-preemption timer.
7545 */
7546HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7547{
7548 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7549
7550 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
7551 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7552Log12(("vmxHCExitPreemptTimer:\n"));
7553
7554 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7555 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7556 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7557 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
7558 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7559}
7560
7561
7562/**
7563 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7564 */
7565HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7566{
7567 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7568
7569 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7570 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7571 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
7572 AssertRCReturn(rc, rc);
7573
7574 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
7575 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
7576 : HM_CHANGED_RAISED_XCPT_MASK);
7577
7578#ifndef IN_NEM_DARWIN
7579 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7580 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
7581 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
7582 {
7583 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
7584 hmR0VmxUpdateStartVmFunction(pVCpu);
7585 }
7586#endif
7587
7588 return rcStrict;
7589}
7590
7591
7592/**
7593 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7594 */
7595HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7596{
7597 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7598
7599 /** @todo Enable the new code after finding a reliably guest test-case. */
7600#if 1
7601 return VERR_EM_INTERPRETER;
7602#else
7603 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7604 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
7605 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7606 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
7607 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7608 AssertRCReturn(rc, rc);
7609
7610 /* Paranoia. Ensure this has a memory operand. */
7611 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
7612
7613 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
7614 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
7615 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
7616 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
7617
7618 RTGCPTR GCPtrDesc;
7619 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
7620
7621 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
7622 GCPtrDesc, uType);
7623 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7624 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7625 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7626 {
7627 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7628 rcStrict = VINF_SUCCESS;
7629 }
7630 return rcStrict;
7631#endif
7632}
7633
7634
7635/**
7636 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
7637 * VM-exit.
7638 */
7639HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7640{
7641 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7642 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7643 AssertRCReturn(rc, rc);
7644
7645 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
7646 if (RT_FAILURE(rc))
7647 return rc;
7648
7649 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
7650 NOREF(uInvalidReason);
7651
7652#ifdef VBOX_STRICT
7653 uint32_t fIntrState;
7654 uint64_t u64Val;
7655 vmxHCReadEntryIntInfoVmcs(pVCpu, pVmxTransient);
7656 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
7657 vmxHCReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
7658
7659 Log4(("uInvalidReason %u\n", uInvalidReason));
7660 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
7661 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7662 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7663
7664 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
7665 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
7666 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
7667 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
7668 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
7669 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
7670 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
7671 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7672 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
7673 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
7674 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
7675 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7676# ifndef IN_NEM_DARWIN
7677 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
7678 {
7679 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7680 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7681 }
7682
7683 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
7684# endif
7685#endif
7686
7687 return VERR_VMX_INVALID_GUEST_STATE;
7688}
7689
7690/**
7691 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
7692 */
7693HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7694{
7695 /*
7696 * Cumulative notes of all recognized but unexpected VM-exits.
7697 *
7698 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
7699 * nested-paging is used.
7700 *
7701 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
7702 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
7703 * this function (and thereby stop VM execution) for handling such instructions.
7704 *
7705 *
7706 * VMX_EXIT_INIT_SIGNAL:
7707 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
7708 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
7709 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
7710 *
7711 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
7712 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
7713 * See Intel spec. "23.8 Restrictions on VMX operation".
7714 *
7715 * VMX_EXIT_SIPI:
7716 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
7717 * activity state is used. We don't make use of it as our guests don't have direct
7718 * access to the host local APIC.
7719 *
7720 * See Intel spec. 25.3 "Other Causes of VM-exits".
7721 *
7722 * VMX_EXIT_IO_SMI:
7723 * VMX_EXIT_SMI:
7724 * This can only happen if we support dual-monitor treatment of SMI, which can be
7725 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
7726 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
7727 * VMX root mode or receive an SMI. If we get here, something funny is going on.
7728 *
7729 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
7730 * See Intel spec. 25.3 "Other Causes of VM-Exits"
7731 *
7732 * VMX_EXIT_ERR_MSR_LOAD:
7733 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
7734 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
7735 * execution.
7736 *
7737 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
7738 *
7739 * VMX_EXIT_ERR_MACHINE_CHECK:
7740 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
7741 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
7742 * #MC exception abort class exception is raised. We thus cannot assume a
7743 * reasonable chance of continuing any sort of execution and we bail.
7744 *
7745 * See Intel spec. 15.1 "Machine-check Architecture".
7746 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
7747 *
7748 * VMX_EXIT_PML_FULL:
7749 * VMX_EXIT_VIRTUALIZED_EOI:
7750 * VMX_EXIT_APIC_WRITE:
7751 * We do not currently support any of these features and thus they are all unexpected
7752 * VM-exits.
7753 *
7754 * VMX_EXIT_GDTR_IDTR_ACCESS:
7755 * VMX_EXIT_LDTR_TR_ACCESS:
7756 * VMX_EXIT_RDRAND:
7757 * VMX_EXIT_RSM:
7758 * VMX_EXIT_VMFUNC:
7759 * VMX_EXIT_ENCLS:
7760 * VMX_EXIT_RDSEED:
7761 * VMX_EXIT_XSAVES:
7762 * VMX_EXIT_XRSTORS:
7763 * VMX_EXIT_UMWAIT:
7764 * VMX_EXIT_TPAUSE:
7765 * VMX_EXIT_LOADIWKEY:
7766 * These VM-exits are -not- caused unconditionally by execution of the corresponding
7767 * instruction. Any VM-exit for these instructions indicate a hardware problem,
7768 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
7769 *
7770 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
7771 */
7772 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7773 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
7774 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7775}
7776
7777
7778/**
7779 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7780 */
7781HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7782{
7783 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7784
7785 /** @todo Optimize this: We currently drag in the whole MSR state
7786 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7787 * MSRs required. That would require changes to IEM and possibly CPUM too.
7788 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7789 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7790 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7791 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7792 switch (idMsr)
7793 {
7794 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7795 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7796 }
7797
7798 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7799 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7800 AssertRCReturn(rc, rc);
7801
7802 Log4Func(("ecx=%#RX32\n", idMsr));
7803
7804#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7805 Assert(!pVmxTransient->fIsNestedGuest);
7806 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7807 {
7808 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
7809 && idMsr != MSR_K6_EFER)
7810 {
7811 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
7812 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7813 }
7814 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7815 {
7816 Assert(pVmcsInfo->pvMsrBitmap);
7817 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7818 if (fMsrpm & VMXMSRPM_ALLOW_RD)
7819 {
7820 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
7821 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7822 }
7823 }
7824 }
7825#endif
7826
7827 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
7828 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
7829 if (rcStrict == VINF_SUCCESS)
7830 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7831 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7832 {
7833 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7834 rcStrict = VINF_SUCCESS;
7835 }
7836 else
7837 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
7838 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7839
7840 return rcStrict;
7841}
7842
7843
7844/**
7845 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7846 */
7847HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7848{
7849 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7850
7851 /** @todo Optimize this: We currently drag in the whole MSR state
7852 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7853 * MSRs required. That would require changes to IEM and possibly CPUM too.
7854 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7855 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7856 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7857
7858 /*
7859 * The FS and GS base MSRs are not part of the above all-MSRs mask.
7860 * Although we don't need to fetch the base as it will be overwritten shortly, while
7861 * loading guest-state we would also load the entire segment register including limit
7862 * and attributes and thus we need to load them here.
7863 */
7864 switch (idMsr)
7865 {
7866 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7867 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7868 }
7869
7870 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7871 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7872 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7873 AssertRCReturn(rc, rc);
7874
7875 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
7876
7877 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
7878 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
7879
7880 if (rcStrict == VINF_SUCCESS)
7881 {
7882 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7883
7884 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7885 if ( idMsr == MSR_IA32_APICBASE
7886 || ( idMsr >= MSR_IA32_X2APIC_START
7887 && idMsr <= MSR_IA32_X2APIC_END))
7888 {
7889 /*
7890 * We've already saved the APIC related guest-state (TPR) in post-run phase.
7891 * When full APIC register virtualization is implemented we'll have to make
7892 * sure APIC state is saved from the VMCS before IEM changes it.
7893 */
7894 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7895 }
7896 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
7897 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7898 else if (idMsr == MSR_K6_EFER)
7899 {
7900 /*
7901 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
7902 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
7903 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
7904 */
7905 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
7906 }
7907
7908 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
7909 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
7910 {
7911 switch (idMsr)
7912 {
7913 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
7914 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
7915 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
7916 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
7917 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
7918 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
7919 default:
7920 {
7921#ifndef IN_NEM_DARWIN
7922 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7923 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
7924 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7925 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
7926#else
7927 AssertMsgFailed(("TODO\n"));
7928#endif
7929 break;
7930 }
7931 }
7932 }
7933#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7934 else
7935 {
7936 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7937 switch (idMsr)
7938 {
7939 case MSR_IA32_SYSENTER_CS:
7940 case MSR_IA32_SYSENTER_EIP:
7941 case MSR_IA32_SYSENTER_ESP:
7942 case MSR_K8_FS_BASE:
7943 case MSR_K8_GS_BASE:
7944 {
7945 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
7946 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7947 }
7948
7949 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
7950 default:
7951 {
7952 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7953 {
7954 /* EFER MSR writes are always intercepted. */
7955 if (idMsr != MSR_K6_EFER)
7956 {
7957 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
7958 idMsr));
7959 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7960 }
7961 }
7962
7963 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7964 {
7965 Assert(pVmcsInfo->pvMsrBitmap);
7966 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7967 if (fMsrpm & VMXMSRPM_ALLOW_WR)
7968 {
7969 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
7970 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7971 }
7972 }
7973 break;
7974 }
7975 }
7976 }
7977#endif /* VBOX_STRICT */
7978 }
7979 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7980 {
7981 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7982 rcStrict = VINF_SUCCESS;
7983 }
7984 else
7985 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
7986 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7987
7988 return rcStrict;
7989}
7990
7991
7992/**
7993 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
7994 */
7995HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7996{
7997 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7998
7999 /** @todo The guest has likely hit a contended spinlock. We might want to
8000 * poke a schedule different guest VCPU. */
8001 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8002 if (RT_SUCCESS(rc))
8003 return VINF_EM_RAW_INTERRUPT;
8004
8005 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8006 return rc;
8007}
8008
8009
8010/**
8011 * VM-exit handler for when the TPR value is lowered below the specified
8012 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8013 */
8014HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8015{
8016 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8017 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8018
8019 /*
8020 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8021 * We'll re-evaluate pending interrupts and inject them before the next VM
8022 * entry so we can just continue execution here.
8023 */
8024 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8025 return VINF_SUCCESS;
8026}
8027
8028
8029/**
8030 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8031 * VM-exit.
8032 *
8033 * @retval VINF_SUCCESS when guest execution can continue.
8034 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8035 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8036 * incompatible guest state for VMX execution (real-on-v86 case).
8037 */
8038HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8039{
8040 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8041 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8042
8043 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8044 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8045 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8046
8047 VBOXSTRICTRC rcStrict;
8048 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8049 uint64_t const uExitQual = pVmxTransient->uExitQual;
8050 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8051 switch (uAccessType)
8052 {
8053 /*
8054 * MOV to CRx.
8055 */
8056 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8057 {
8058 /*
8059 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8060 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8061 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8062 * PAE PDPTEs as well.
8063 */
8064 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8065 AssertRCReturn(rc, rc);
8066
8067 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8068#ifndef IN_NEM_DARWIN
8069 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8070#endif
8071 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8072 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8073
8074 /*
8075 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8076 * - When nested paging isn't used.
8077 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8078 * - We are executing in the VM debug loop.
8079 */
8080#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8081# ifndef IN_NEM_DARWIN
8082 Assert( iCrReg != 3
8083 || !VM_IS_VMX_NESTED_PAGING(pVM)
8084 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8085 || pVCpu->hmr0.s.fUsingDebugLoop);
8086# else
8087 Assert( iCrReg != 3
8088 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8089# endif
8090#endif
8091
8092 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8093 Assert( iCrReg != 8
8094 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8095
8096 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8097 AssertMsg( rcStrict == VINF_SUCCESS
8098 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8099
8100#ifndef IN_NEM_DARWIN
8101 /*
8102 * This is a kludge for handling switches back to real mode when we try to use
8103 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8104 * deal with special selector values, so we have to return to ring-3 and run
8105 * there till the selector values are V86 mode compatible.
8106 *
8107 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8108 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8109 * this function.
8110 */
8111 if ( iCrReg == 0
8112 && rcStrict == VINF_SUCCESS
8113 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8114 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8115 && (uOldCr0 & X86_CR0_PE)
8116 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8117 {
8118 /** @todo Check selectors rather than returning all the time. */
8119 Assert(!pVmxTransient->fIsNestedGuest);
8120 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8121 rcStrict = VINF_EM_RESCHEDULE_REM;
8122 }
8123#endif
8124
8125 break;
8126 }
8127
8128 /*
8129 * MOV from CRx.
8130 */
8131 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8132 {
8133 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8134 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8135
8136 /*
8137 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8138 * - When nested paging isn't used.
8139 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8140 * - We are executing in the VM debug loop.
8141 */
8142#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8143# ifndef IN_NEM_DARWIN
8144 Assert( iCrReg != 3
8145 || !VM_IS_VMX_NESTED_PAGING(pVM)
8146 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8147 || pVCpu->hmr0.s.fLeaveDone);
8148# else
8149 Assert( iCrReg != 3
8150 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8151# endif
8152#endif
8153
8154 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8155 Assert( iCrReg != 8
8156 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8157
8158 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8159 break;
8160 }
8161
8162 /*
8163 * CLTS (Clear Task-Switch Flag in CR0).
8164 */
8165 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8166 {
8167 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8168 break;
8169 }
8170
8171 /*
8172 * LMSW (Load Machine-Status Word into CR0).
8173 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8174 */
8175 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8176 {
8177 RTGCPTR GCPtrEffDst;
8178 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8179 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8180 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8181 if (fMemOperand)
8182 {
8183 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
8184 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8185 }
8186 else
8187 GCPtrEffDst = NIL_RTGCPTR;
8188 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8189 break;
8190 }
8191
8192 default:
8193 {
8194 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8195 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8196 }
8197 }
8198
8199 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8200 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8201 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8202
8203 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8204 NOREF(pVM);
8205 return rcStrict;
8206}
8207
8208
8209/**
8210 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8211 * VM-exit.
8212 */
8213HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8214{
8215 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8216 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8217
8218 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8219 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8220 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8221 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8222 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
8223 | CPUMCTX_EXTRN_EFER);
8224 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8225 AssertRCReturn(rc, rc);
8226
8227 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8228 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8229 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8230 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8231 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8232 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8233 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8234 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8235
8236 /*
8237 * Update exit history to see if this exit can be optimized.
8238 */
8239 VBOXSTRICTRC rcStrict;
8240 PCEMEXITREC pExitRec = NULL;
8241 if ( !fGstStepping
8242 && !fDbgStepping)
8243 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8244 !fIOString
8245 ? !fIOWrite
8246 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8247 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8248 : !fIOWrite
8249 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8250 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8251 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8252 if (!pExitRec)
8253 {
8254 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8255 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8256
8257 uint32_t const cbValue = s_aIOSizes[uIOSize];
8258 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8259 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8260 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8261 if (fIOString)
8262 {
8263 /*
8264 * INS/OUTS - I/O String instruction.
8265 *
8266 * Use instruction-information if available, otherwise fall back on
8267 * interpreting the instruction.
8268 */
8269 Log4Func(("cs:rip=%#04x:%#RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8270 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8271 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8272 if (fInsOutsInfo)
8273 {
8274 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8275 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8276 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8277 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8278 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8279 if (fIOWrite)
8280 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8281 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8282 else
8283 {
8284 /*
8285 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8286 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8287 * See Intel Instruction spec. for "INS".
8288 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8289 */
8290 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8291 }
8292 }
8293 else
8294 rcStrict = IEMExecOne(pVCpu);
8295
8296 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8297 fUpdateRipAlready = true;
8298 }
8299 else
8300 {
8301 /*
8302 * IN/OUT - I/O instruction.
8303 */
8304 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8305 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8306 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8307 if (fIOWrite)
8308 {
8309 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8310 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8311#ifndef IN_NEM_DARWIN
8312 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8313 && !pCtx->eflags.Bits.u1TF)
8314 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8315#endif
8316 }
8317 else
8318 {
8319 uint32_t u32Result = 0;
8320 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8321 if (IOM_SUCCESS(rcStrict))
8322 {
8323 /* Save result of I/O IN instr. in AL/AX/EAX. */
8324 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8325 }
8326#ifndef IN_NEM_DARWIN
8327 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8328 && !pCtx->eflags.Bits.u1TF)
8329 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8330#endif
8331 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8332 }
8333 }
8334
8335 if (IOM_SUCCESS(rcStrict))
8336 {
8337 if (!fUpdateRipAlready)
8338 {
8339 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8340 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8341 }
8342
8343 /*
8344 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8345 * while booting Fedora 17 64-bit guest.
8346 *
8347 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8348 */
8349 if (fIOString)
8350 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8351
8352 /*
8353 * If any I/O breakpoints are armed, we need to check if one triggered
8354 * and take appropriate action.
8355 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8356 */
8357 rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
8358 AssertRCReturn(rc, rc);
8359
8360 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8361 * execution engines about whether hyper BPs and such are pending. */
8362 uint32_t const uDr7 = pCtx->dr[7];
8363 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8364 && X86_DR7_ANY_RW_IO(uDr7)
8365 && (pCtx->cr4 & X86_CR4_DE))
8366 || DBGFBpIsHwIoArmed(pVM)))
8367 {
8368 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8369
8370#ifndef IN_NEM_DARWIN
8371 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8372 VMMRZCallRing3Disable(pVCpu);
8373 HM_DISABLE_PREEMPT(pVCpu);
8374
8375 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8376
8377 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8378 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8379 {
8380 /* Raise #DB. */
8381 if (fIsGuestDbgActive)
8382 ASMSetDR6(pCtx->dr[6]);
8383 if (pCtx->dr[7] != uDr7)
8384 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8385
8386 vmxHCSetPendingXcptDB(pVCpu);
8387 }
8388 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8389 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8390 else if ( rcStrict2 != VINF_SUCCESS
8391 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8392 rcStrict = rcStrict2;
8393 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8394
8395 HM_RESTORE_PREEMPT();
8396 VMMRZCallRing3Enable(pVCpu);
8397#else
8398 /** @todo */
8399#endif
8400 }
8401 }
8402
8403#ifdef VBOX_STRICT
8404 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8405 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8406 Assert(!fIOWrite);
8407 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8408 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8409 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8410 Assert(fIOWrite);
8411 else
8412 {
8413# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8414 * statuses, that the VMM device and some others may return. See
8415 * IOM_SUCCESS() for guidance. */
8416 AssertMsg( RT_FAILURE(rcStrict)
8417 || rcStrict == VINF_SUCCESS
8418 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8419 || rcStrict == VINF_EM_DBG_BREAKPOINT
8420 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8421 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8422# endif
8423 }
8424#endif
8425 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8426 }
8427 else
8428 {
8429 /*
8430 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8431 */
8432 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8433 AssertRCReturn(rc2, rc2);
8434 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8435 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8436 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8437 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8438 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8439 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8440
8441 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8442 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8443
8444 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8445 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8446 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8447 }
8448 return rcStrict;
8449}
8450
8451
8452/**
8453 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8454 * VM-exit.
8455 */
8456HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8457{
8458 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8459
8460 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8461 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8462 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8463 {
8464 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8465 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8466 {
8467 uint32_t uErrCode;
8468 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8469 {
8470 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8471 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8472 }
8473 else
8474 uErrCode = 0;
8475
8476 RTGCUINTPTR GCPtrFaultAddress;
8477 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8478 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8479 else
8480 GCPtrFaultAddress = 0;
8481
8482 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8483
8484 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8485 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8486
8487 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
8488 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
8489 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8490 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8491 }
8492 }
8493
8494 /* Fall back to the interpreter to emulate the task-switch. */
8495 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8496 return VERR_EM_INTERPRETER;
8497}
8498
8499
8500/**
8501 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8502 */
8503HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8504{
8505 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8506
8507 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8508 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
8509 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8510 AssertRC(rc);
8511 return VINF_EM_DBG_STEPPED;
8512}
8513
8514
8515/**
8516 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8517 */
8518HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8519{
8520 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8521 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
8522
8523 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8524 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8525 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8526 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8527 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8528
8529 /*
8530 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8531 */
8532 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8533 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8534 {
8535 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
8536 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
8537 {
8538 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8539 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8540 }
8541 }
8542 else
8543 {
8544 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8545 return rcStrict;
8546 }
8547
8548 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
8549 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8550 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8551 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8552 AssertRCReturn(rc, rc);
8553
8554 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8555 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
8556 switch (uAccessType)
8557 {
8558#ifndef IN_NEM_DARWIN
8559 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8560 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8561 {
8562 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
8563 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
8564 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8565
8566 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
8567 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
8568 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
8569 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
8570 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
8571
8572 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
8573 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
8574 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8575 if ( rcStrict == VINF_SUCCESS
8576 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8577 || rcStrict == VERR_PAGE_NOT_PRESENT)
8578 {
8579 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8580 | HM_CHANGED_GUEST_APIC_TPR);
8581 rcStrict = VINF_SUCCESS;
8582 }
8583 break;
8584 }
8585#else
8586 /** @todo */
8587#endif
8588
8589 default:
8590 {
8591 Log4Func(("uAccessType=%#x\n", uAccessType));
8592 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
8593 break;
8594 }
8595 }
8596
8597 if (rcStrict != VINF_SUCCESS)
8598 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
8599 return rcStrict;
8600}
8601
8602
8603/**
8604 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8605 * VM-exit.
8606 */
8607HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8608{
8609 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8610 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8611
8612 /*
8613 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
8614 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
8615 * must emulate the MOV DRx access.
8616 */
8617 if (!pVmxTransient->fIsNestedGuest)
8618 {
8619 /* We should -not- get this VM-exit if the guest's debug registers were active. */
8620 if (pVmxTransient->fWasGuestDebugStateActive)
8621 {
8622 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
8623 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8624 }
8625
8626 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
8627 && !pVmxTransient->fWasHyperDebugStateActive)
8628 {
8629 Assert(!DBGFIsStepping(pVCpu));
8630 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
8631
8632 /* Don't intercept MOV DRx any more. */
8633 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
8634 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8635 AssertRC(rc);
8636
8637#ifndef IN_NEM_DARWIN
8638 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
8639 VMMRZCallRing3Disable(pVCpu);
8640 HM_DISABLE_PREEMPT(pVCpu);
8641
8642 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8643 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
8644 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8645
8646 HM_RESTORE_PREEMPT();
8647 VMMRZCallRing3Enable(pVCpu);
8648#else
8649 CPUMR3NemActivateGuestDebugState(pVCpu);
8650 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8651 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
8652#endif
8653
8654#ifdef VBOX_WITH_STATISTICS
8655 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8656 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8657 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8658 else
8659 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8660#endif
8661 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
8662 return VINF_SUCCESS;
8663 }
8664 }
8665
8666 /*
8667 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
8668 * The EFER MSR is always up-to-date.
8669 * Update the segment registers and DR7 from the CPU.
8670 */
8671 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8672 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8673 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
8674 AssertRCReturn(rc, rc);
8675 Log4Func(("cs:rip=%#04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
8676
8677 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8678 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8679 {
8680 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8681 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
8682 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
8683 if (RT_SUCCESS(rc))
8684 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
8685 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8686 }
8687 else
8688 {
8689 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8690 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
8691 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
8692 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8693 }
8694
8695 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8696 if (RT_SUCCESS(rc))
8697 {
8698 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8699 AssertRCReturn(rc2, rc2);
8700 return VINF_SUCCESS;
8701 }
8702 return rc;
8703}
8704
8705
8706/**
8707 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8708 * Conditional VM-exit.
8709 */
8710HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8711{
8712 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8713
8714#ifndef IN_NEM_DARWIN
8715 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8716
8717 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8718 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8719 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8720 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8721 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8722
8723 /*
8724 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8725 */
8726 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8727 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8728 {
8729 /*
8730 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
8731 * instruction emulation to inject the original event. Otherwise, injecting the original event
8732 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
8733 */
8734 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8735 { /* likely */ }
8736 else
8737 {
8738 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8739#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8740 /** @todo NSTVMX: Think about how this should be handled. */
8741 if (pVmxTransient->fIsNestedGuest)
8742 return VERR_VMX_IPE_3;
8743#endif
8744 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8745 }
8746 }
8747 else
8748 {
8749 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8750 return rcStrict;
8751 }
8752
8753 /*
8754 * Get sufficient state and update the exit history entry.
8755 */
8756 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8757 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8758 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8759 AssertRCReturn(rc, rc);
8760
8761 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8762 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8763 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
8764 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8765 if (!pExitRec)
8766 {
8767 /*
8768 * If we succeed, resume guest execution.
8769 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8770 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8771 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8772 * weird case. See @bugref{6043}.
8773 */
8774 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8775 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8776/** @todo bird: We can probably just go straight to IOM here and assume that
8777 * it's MMIO, then fall back on PGM if that hunch didn't work out so
8778 * well. However, we need to address that aliasing workarounds that
8779 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
8780 *
8781 * Might also be interesting to see if we can get this done more or
8782 * less locklessly inside IOM. Need to consider the lookup table
8783 * updating and use a bit more carefully first (or do all updates via
8784 * rendezvous) */
8785 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
8786 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
8787 if ( rcStrict == VINF_SUCCESS
8788 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8789 || rcStrict == VERR_PAGE_NOT_PRESENT)
8790 {
8791 /* Successfully handled MMIO operation. */
8792 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8793 | HM_CHANGED_GUEST_APIC_TPR);
8794 rcStrict = VINF_SUCCESS;
8795 }
8796 }
8797 else
8798 {
8799 /*
8800 * Frequent exit or something needing probing. Call EMHistoryExec.
8801 */
8802 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
8803 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
8804
8805 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8806 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8807
8808 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8809 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8810 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8811 }
8812 return rcStrict;
8813#else
8814 AssertFailed();
8815 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
8816#endif
8817}
8818
8819
8820/**
8821 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8822 * VM-exit.
8823 */
8824HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8825{
8826 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8827#ifndef IN_NEM_DARWIN
8828 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8829
8830 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8831 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8832 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8833 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8834 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8835 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8836
8837 /*
8838 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8839 */
8840 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8841 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8842 {
8843 /*
8844 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
8845 * we shall resolve the nested #PF and re-inject the original event.
8846 */
8847 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8848 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
8849 }
8850 else
8851 {
8852 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8853 return rcStrict;
8854 }
8855
8856 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8857 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8858 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8859 AssertRCReturn(rc, rc);
8860
8861 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8862 uint64_t const uExitQual = pVmxTransient->uExitQual;
8863 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
8864
8865 RTGCUINT uErrorCode = 0;
8866 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
8867 uErrorCode |= X86_TRAP_PF_ID;
8868 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8869 uErrorCode |= X86_TRAP_PF_RW;
8870 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
8871 uErrorCode |= X86_TRAP_PF_P;
8872
8873 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8874 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%#RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
8875
8876 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8877
8878 /*
8879 * Handle the pagefault trap for the nested shadow table.
8880 */
8881 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
8882 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
8883 TRPMResetTrap(pVCpu);
8884
8885 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8886 if ( rcStrict == VINF_SUCCESS
8887 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8888 || rcStrict == VERR_PAGE_NOT_PRESENT)
8889 {
8890 /* Successfully synced our nested page tables. */
8891 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
8892 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
8893 return VINF_SUCCESS;
8894 }
8895#else
8896 PVM pVM = pVCpu->CTX_SUFF(pVM);
8897 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
8898 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8899 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8900 vmxHCImportGuestRip(pVCpu);
8901 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
8902
8903 /*
8904 * Ask PGM for information about the given GCPhys. We need to check if we're
8905 * out of sync first.
8906 */
8907 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
8908 PGMPHYSNEMPAGEINFO Info;
8909 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
8910 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
8911 if (RT_SUCCESS(rc))
8912 {
8913 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8914 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
8915 {
8916 if (State.fCanResume)
8917 {
8918 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
8919 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8920 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8921 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8922 State.fDidSomething ? "" : " no-change"));
8923 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
8924 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8925 return VINF_SUCCESS;
8926 }
8927 }
8928
8929 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
8930 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8931 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8932 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8933 State.fDidSomething ? "" : " no-change"));
8934 }
8935 else
8936 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
8937 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8938 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
8939
8940 /*
8941 * Emulate the memory access, either access handler or special memory.
8942 */
8943 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
8944 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8945 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
8946 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
8947 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8948
8949 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8950 AssertRCReturn(rc, rc);
8951
8952 VBOXSTRICTRC rcStrict;
8953 if (!pExitRec)
8954 rcStrict = IEMExecOne(pVCpu);
8955 else
8956 {
8957 /* Frequent access or probing. */
8958 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8959 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8960 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8961 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8962 }
8963
8964 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8965#endif
8966
8967 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8968 return rcStrict;
8969}
8970
8971
8972#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8973/**
8974 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
8975 */
8976HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8977{
8978 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8979
8980 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8981 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8982 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8983 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8984 | CPUMCTX_EXTRN_HWVIRT
8985 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8986 AssertRCReturn(rc, rc);
8987
8988 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8989
8990 VMXVEXITINFO ExitInfo;
8991 RT_ZERO(ExitInfo);
8992 ExitInfo.uReason = pVmxTransient->uExitReason;
8993 ExitInfo.u64Qual = pVmxTransient->uExitQual;
8994 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
8995 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
8996 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
8997
8998 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
8999 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9000 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9001 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9002 {
9003 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9004 rcStrict = VINF_SUCCESS;
9005 }
9006 return rcStrict;
9007}
9008
9009
9010/**
9011 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9012 */
9013HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9014{
9015 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9016
9017 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9018 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9019 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9020 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9021 AssertRCReturn(rc, rc);
9022
9023 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9024
9025 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9026 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9027 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9028 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9029 {
9030 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9031 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9032 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9033 }
9034 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9035 return rcStrict;
9036}
9037
9038
9039/**
9040 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9041 */
9042HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9043{
9044 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9045
9046 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9047 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9048 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9049 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9050 | CPUMCTX_EXTRN_HWVIRT
9051 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9052 AssertRCReturn(rc, rc);
9053
9054 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9055
9056 VMXVEXITINFO ExitInfo;
9057 RT_ZERO(ExitInfo);
9058 ExitInfo.uReason = pVmxTransient->uExitReason;
9059 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9060 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9061 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9062 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9063
9064 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9065 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9066 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9067 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9068 {
9069 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9070 rcStrict = VINF_SUCCESS;
9071 }
9072 return rcStrict;
9073}
9074
9075
9076/**
9077 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9078 */
9079HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9080{
9081 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9082
9083 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9084 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9085 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9086 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9087 | CPUMCTX_EXTRN_HWVIRT
9088 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9089 AssertRCReturn(rc, rc);
9090
9091 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9092
9093 VMXVEXITINFO ExitInfo;
9094 RT_ZERO(ExitInfo);
9095 ExitInfo.uReason = pVmxTransient->uExitReason;
9096 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9097 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9098 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9099 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9100
9101 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9102 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9103 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9104 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9105 {
9106 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9107 rcStrict = VINF_SUCCESS;
9108 }
9109 return rcStrict;
9110}
9111
9112
9113/**
9114 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9115 */
9116HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9117{
9118 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9119
9120 /*
9121 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9122 * thus might not need to import the shadow VMCS state, it's safer just in case
9123 * code elsewhere dares look at unsynced VMCS fields.
9124 */
9125 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9126 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9127 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9128 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9129 | CPUMCTX_EXTRN_HWVIRT
9130 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9131 AssertRCReturn(rc, rc);
9132
9133 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9134
9135 VMXVEXITINFO ExitInfo;
9136 RT_ZERO(ExitInfo);
9137 ExitInfo.uReason = pVmxTransient->uExitReason;
9138 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9139 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9140 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9141 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9142 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9143
9144 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9145 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9146 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9147 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9148 {
9149 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9150 rcStrict = VINF_SUCCESS;
9151 }
9152 return rcStrict;
9153}
9154
9155
9156/**
9157 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9158 */
9159HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9160{
9161 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9162
9163 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9164 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9165 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9166 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9167 AssertRCReturn(rc, rc);
9168
9169 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9170
9171 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9172 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9173 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9174 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9175 {
9176 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9177 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9178 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9179 }
9180 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9181 return rcStrict;
9182}
9183
9184
9185/**
9186 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9187 */
9188HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9189{
9190 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9191
9192 /*
9193 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9194 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9195 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9196 */
9197 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9198 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9199 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9200 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9201 | CPUMCTX_EXTRN_HWVIRT
9202 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9203 AssertRCReturn(rc, rc);
9204
9205 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9206
9207 VMXVEXITINFO ExitInfo;
9208 RT_ZERO(ExitInfo);
9209 ExitInfo.uReason = pVmxTransient->uExitReason;
9210 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9211 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9212 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9213 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9214 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9215
9216 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9217 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9218 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9219 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9220 {
9221 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9222 rcStrict = VINF_SUCCESS;
9223 }
9224 return rcStrict;
9225}
9226
9227
9228/**
9229 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9230 */
9231HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9232{
9233 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9234
9235 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9236 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
9237 | CPUMCTX_EXTRN_HWVIRT
9238 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9239 AssertRCReturn(rc, rc);
9240
9241 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9242
9243 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9244 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9245 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9246 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9247 {
9248 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9249 rcStrict = VINF_SUCCESS;
9250 }
9251 return rcStrict;
9252}
9253
9254
9255/**
9256 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9257 */
9258HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9259{
9260 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9261
9262 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9263 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9264 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9265 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9266 | CPUMCTX_EXTRN_HWVIRT
9267 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9268 AssertRCReturn(rc, rc);
9269
9270 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9271
9272 VMXVEXITINFO ExitInfo;
9273 RT_ZERO(ExitInfo);
9274 ExitInfo.uReason = pVmxTransient->uExitReason;
9275 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9276 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9277 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9278 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9279
9280 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9281 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9282 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9283 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9284 {
9285 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9286 rcStrict = VINF_SUCCESS;
9287 }
9288 return rcStrict;
9289}
9290
9291
9292/**
9293 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9294 */
9295HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9296{
9297 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9298
9299 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9300 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9301 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9302 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9303 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9304 AssertRCReturn(rc, rc);
9305
9306 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9307
9308 VMXVEXITINFO ExitInfo;
9309 RT_ZERO(ExitInfo);
9310 ExitInfo.uReason = pVmxTransient->uExitReason;
9311 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9312 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9313 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9314 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9315
9316 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9317 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9318 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9319 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9320 {
9321 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9322 rcStrict = VINF_SUCCESS;
9323 }
9324 return rcStrict;
9325}
9326
9327
9328# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9329/**
9330 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9331 */
9332HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9333{
9334 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9335
9336 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9337 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9338 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9339 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9340 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9341 AssertRCReturn(rc, rc);
9342
9343 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9344
9345 VMXVEXITINFO ExitInfo;
9346 RT_ZERO(ExitInfo);
9347 ExitInfo.uReason = pVmxTransient->uExitReason;
9348 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9349 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9350 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9351 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9352
9353 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9354 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9355 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9356 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9357 {
9358 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9359 rcStrict = VINF_SUCCESS;
9360 }
9361 return rcStrict;
9362}
9363# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9364#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9365/** @} */
9366
9367
9368#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9369/** @name Nested-guest VM-exit handlers.
9370 * @{
9371 */
9372/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9373/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9374/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9375
9376/**
9377 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9378 * Conditional VM-exit.
9379 */
9380HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9381{
9382 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9383
9384 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
9385
9386 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9387 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9388 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9389
9390 switch (uExitIntType)
9391 {
9392#ifndef IN_NEM_DARWIN
9393 /*
9394 * Physical NMIs:
9395 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9396 */
9397 case VMX_EXIT_INT_INFO_TYPE_NMI:
9398 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9399#endif
9400
9401 /*
9402 * Hardware exceptions,
9403 * Software exceptions,
9404 * Privileged software exceptions:
9405 * Figure out if the exception must be delivered to the guest or the nested-guest.
9406 */
9407 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9408 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9409 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9410 {
9411 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
9412 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9413 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9414 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9415
9416 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9417 bool const fIntercept = CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo),
9418 pVmxTransient->uExitIntErrorCode);
9419 if (fIntercept)
9420 {
9421 /* Exit qualification is required for debug and page-fault exceptions. */
9422 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9423
9424 /*
9425 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9426 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9427 * length. However, if delivery of a software interrupt, software exception or privileged
9428 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9429 */
9430 VMXVEXITINFO ExitInfo;
9431 RT_ZERO(ExitInfo);
9432 ExitInfo.uReason = pVmxTransient->uExitReason;
9433 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9434 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9435
9436 VMXVEXITEVENTINFO ExitEventInfo;
9437 RT_ZERO(ExitEventInfo);
9438 ExitEventInfo.uExitIntInfo = pVmxTransient->uExitIntInfo;
9439 ExitEventInfo.uExitIntErrCode = pVmxTransient->uExitIntErrorCode;
9440 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9441 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9442
9443#ifdef DEBUG_ramshankar
9444 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9445 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n", pVmxTransient->uExitIntInfo,
9446 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9447 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9448 {
9449 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n", pVmxTransient->uIdtVectoringInfo,
9450 pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9451 }
9452#endif
9453 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9454 }
9455
9456 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9457 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9458 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9459 }
9460
9461 /*
9462 * Software interrupts:
9463 * VM-exits cannot be caused by software interrupts.
9464 *
9465 * External interrupts:
9466 * This should only happen when "acknowledge external interrupts on VM-exit"
9467 * control is set. However, we never set this when executing a guest or
9468 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9469 * the guest.
9470 */
9471 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9472 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
9473 default:
9474 {
9475 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
9476 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9477 }
9478 }
9479}
9480
9481
9482/**
9483 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
9484 * Unconditional VM-exit.
9485 */
9486HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9487{
9488 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9489 return IEMExecVmxVmexitTripleFault(pVCpu);
9490}
9491
9492
9493/**
9494 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9495 */
9496HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9497{
9498 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9499
9500 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
9501 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9502 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9503}
9504
9505
9506/**
9507 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9508 */
9509HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9510{
9511 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9512
9513 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
9514 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9515 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9516}
9517
9518
9519/**
9520 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
9521 * Unconditional VM-exit.
9522 */
9523HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9524{
9525 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9526
9527 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9528 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9529 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9530 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9531
9532 VMXVEXITINFO ExitInfo;
9533 RT_ZERO(ExitInfo);
9534 ExitInfo.uReason = pVmxTransient->uExitReason;
9535 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9536 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9537
9538 VMXVEXITEVENTINFO ExitEventInfo;
9539 RT_ZERO(ExitEventInfo);
9540 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9541 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9542 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
9543}
9544
9545
9546/**
9547 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
9548 */
9549HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9550{
9551 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9552
9553 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
9554 {
9555 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9556 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9557 }
9558 return vmxHCExitHlt(pVCpu, pVmxTransient);
9559}
9560
9561
9562/**
9563 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
9564 */
9565HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9566{
9567 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9568
9569 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
9570 {
9571 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9572 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9573
9574 VMXVEXITINFO ExitInfo;
9575 RT_ZERO(ExitInfo);
9576 ExitInfo.uReason = pVmxTransient->uExitReason;
9577 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9578 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9579 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9580 }
9581 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
9582}
9583
9584
9585/**
9586 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
9587 */
9588HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9589{
9590 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9591
9592 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
9593 {
9594 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9595 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9596 }
9597 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
9598}
9599
9600
9601/**
9602 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
9603 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
9604 */
9605HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9606{
9607 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9608
9609 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
9610 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
9611
9612 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9613
9614 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
9615 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9616 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9617
9618 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
9619 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
9620 u64VmcsField &= UINT64_C(0xffffffff);
9621
9622 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
9623 {
9624 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9625 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9626
9627 VMXVEXITINFO ExitInfo;
9628 RT_ZERO(ExitInfo);
9629 ExitInfo.uReason = pVmxTransient->uExitReason;
9630 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9631 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9632 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9633 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9634 }
9635
9636 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
9637 return vmxHCExitVmread(pVCpu, pVmxTransient);
9638 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
9639}
9640
9641
9642/**
9643 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
9644 */
9645HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9646{
9647 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9648
9649 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
9650 {
9651 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9652 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9653 }
9654
9655 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
9656}
9657
9658
9659/**
9660 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
9661 * Conditional VM-exit.
9662 */
9663HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9664{
9665 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9666
9667 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9668 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9669
9670 VBOXSTRICTRC rcStrict;
9671 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
9672 switch (uAccessType)
9673 {
9674 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
9675 {
9676 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9677 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9678 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9679 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9680
9681 bool fIntercept;
9682 switch (iCrReg)
9683 {
9684 case 0:
9685 case 4:
9686 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
9687 break;
9688
9689 case 3:
9690 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
9691 break;
9692
9693 case 8:
9694 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
9695 break;
9696
9697 default:
9698 fIntercept = false;
9699 break;
9700 }
9701 if (fIntercept)
9702 {
9703 VMXVEXITINFO ExitInfo;
9704 RT_ZERO(ExitInfo);
9705 ExitInfo.uReason = pVmxTransient->uExitReason;
9706 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9707 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9708 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9709 }
9710 else
9711 {
9712 int const rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
9713 AssertRCReturn(rc, rc);
9714 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9715 }
9716 break;
9717 }
9718
9719 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
9720 {
9721 /*
9722 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
9723 * CR2 reads do not cause a VM-exit.
9724 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
9725 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
9726 */
9727 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9728 if ( iCrReg == 3
9729 || iCrReg == 8)
9730 {
9731 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
9732 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
9733 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
9734 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
9735 {
9736 VMXVEXITINFO ExitInfo;
9737 RT_ZERO(ExitInfo);
9738 ExitInfo.uReason = pVmxTransient->uExitReason;
9739 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9740 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9741 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9742 }
9743 else
9744 {
9745 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9746 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9747 }
9748 }
9749 else
9750 {
9751 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
9752 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
9753 }
9754 break;
9755 }
9756
9757 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
9758 {
9759 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
9760 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
9761 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
9762 if ( (uGstHostMask & X86_CR0_TS)
9763 && (uReadShadow & X86_CR0_TS))
9764 {
9765 VMXVEXITINFO ExitInfo;
9766 RT_ZERO(ExitInfo);
9767 ExitInfo.uReason = pVmxTransient->uExitReason;
9768 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9769 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9770 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9771 }
9772 else
9773 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
9774 break;
9775 }
9776
9777 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
9778 {
9779 RTGCPTR GCPtrEffDst;
9780 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
9781 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
9782 if (fMemOperand)
9783 {
9784 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9785 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
9786 }
9787 else
9788 GCPtrEffDst = NIL_RTGCPTR;
9789
9790 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
9791 {
9792 VMXVEXITINFO ExitInfo;
9793 RT_ZERO(ExitInfo);
9794 ExitInfo.uReason = pVmxTransient->uExitReason;
9795 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9796 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
9797 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9798 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9799 }
9800 else
9801 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
9802 break;
9803 }
9804
9805 default:
9806 {
9807 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
9808 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
9809 }
9810 }
9811
9812 if (rcStrict == VINF_IEM_RAISED_XCPT)
9813 {
9814 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9815 rcStrict = VINF_SUCCESS;
9816 }
9817 return rcStrict;
9818}
9819
9820
9821/**
9822 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
9823 * Conditional VM-exit.
9824 */
9825HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9826{
9827 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9828
9829 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
9830 {
9831 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9832 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9833
9834 VMXVEXITINFO ExitInfo;
9835 RT_ZERO(ExitInfo);
9836 ExitInfo.uReason = pVmxTransient->uExitReason;
9837 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9838 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9839 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9840 }
9841 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
9842}
9843
9844
9845/**
9846 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
9847 * Conditional VM-exit.
9848 */
9849HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9850{
9851 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9852
9853 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9854
9855 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
9856 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
9857 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
9858
9859 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
9860 uint8_t const cbAccess = s_aIOSizes[uIOSize];
9861 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
9862 {
9863 /*
9864 * IN/OUT instruction:
9865 * - Provides VM-exit instruction length.
9866 *
9867 * INS/OUTS instruction:
9868 * - Provides VM-exit instruction length.
9869 * - Provides Guest-linear address.
9870 * - Optionally provides VM-exit instruction info (depends on CPU feature).
9871 */
9872 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9873 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9874
9875 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
9876 pVmxTransient->ExitInstrInfo.u = 0;
9877 pVmxTransient->uGuestLinearAddr = 0;
9878
9879 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
9880 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
9881 if (fIOString)
9882 {
9883 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9884 if (fVmxInsOutsInfo)
9885 {
9886 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
9887 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9888 }
9889 }
9890
9891 VMXVEXITINFO ExitInfo;
9892 RT_ZERO(ExitInfo);
9893 ExitInfo.uReason = pVmxTransient->uExitReason;
9894 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9895 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9896 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9897 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
9898 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9899 }
9900 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
9901}
9902
9903
9904/**
9905 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
9906 */
9907HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9908{
9909 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9910
9911 uint32_t fMsrpm;
9912 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9913 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9914 else
9915 fMsrpm = VMXMSRPM_EXIT_RD;
9916
9917 if (fMsrpm & VMXMSRPM_EXIT_RD)
9918 {
9919 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9920 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9921 }
9922 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
9923}
9924
9925
9926/**
9927 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
9928 */
9929HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9930{
9931 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9932
9933 uint32_t fMsrpm;
9934 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9935 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9936 else
9937 fMsrpm = VMXMSRPM_EXIT_WR;
9938
9939 if (fMsrpm & VMXMSRPM_EXIT_WR)
9940 {
9941 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9942 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9943 }
9944 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
9945}
9946
9947
9948/**
9949 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
9950 */
9951HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9952{
9953 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9954
9955 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
9956 {
9957 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9958 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9959 }
9960 return vmxHCExitMwait(pVCpu, pVmxTransient);
9961}
9962
9963
9964/**
9965 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
9966 * VM-exit.
9967 */
9968HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9969{
9970 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9971
9972 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
9973 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9974 VMXVEXITINFO ExitInfo;
9975 RT_ZERO(ExitInfo);
9976 ExitInfo.uReason = pVmxTransient->uExitReason;
9977 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
9978 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9979}
9980
9981
9982/**
9983 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
9984 */
9985HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9986{
9987 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9988
9989 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
9990 {
9991 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9992 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9993 }
9994 return vmxHCExitMonitor(pVCpu, pVmxTransient);
9995}
9996
9997
9998/**
9999 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10000 */
10001HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10002{
10003 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10004
10005 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10006 * PAUSE when executing a nested-guest? If it does not, we would not need
10007 * to check for the intercepts here. Just call VM-exit... */
10008
10009 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10010 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10011 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10012 {
10013 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10014 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10015 }
10016 return vmxHCExitPause(pVCpu, pVmxTransient);
10017}
10018
10019
10020/**
10021 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10022 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10023 */
10024HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10025{
10026 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10027
10028 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10029 {
10030 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
10031 VMXVEXITINFO ExitInfo;
10032 RT_ZERO(ExitInfo);
10033 ExitInfo.uReason = pVmxTransient->uExitReason;
10034 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
10035 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10036 }
10037 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10038}
10039
10040
10041/**
10042 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10043 * VM-exit.
10044 */
10045HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10046{
10047 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10048
10049 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10050 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10051 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10052 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10053
10054 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10055
10056 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10057 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10058
10059 VMXVEXITINFO ExitInfo;
10060 RT_ZERO(ExitInfo);
10061 ExitInfo.uReason = pVmxTransient->uExitReason;
10062 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10063 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10064
10065 VMXVEXITEVENTINFO ExitEventInfo;
10066 RT_ZERO(ExitEventInfo);
10067 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10068 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10069 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10070}
10071
10072
10073/**
10074 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10075 * Conditional VM-exit.
10076 */
10077HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10078{
10079 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10080
10081 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10082 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10083 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10084}
10085
10086
10087/**
10088 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10089 * Conditional VM-exit.
10090 */
10091HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10092{
10093 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10094
10095 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10096 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10097 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10098}
10099
10100
10101/**
10102 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10103 */
10104HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10105{
10106 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10107
10108 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10109 {
10110 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10111 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10112 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10113 }
10114 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10115}
10116
10117
10118/**
10119 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10120 */
10121HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10122{
10123 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10124
10125 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10126 {
10127 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10128 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10129 }
10130 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10131}
10132
10133
10134/**
10135 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10136 */
10137HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10138{
10139 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10140
10141 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10142 {
10143 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10144 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10145 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10146 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10147
10148 VMXVEXITINFO ExitInfo;
10149 RT_ZERO(ExitInfo);
10150 ExitInfo.uReason = pVmxTransient->uExitReason;
10151 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10152 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10153 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10154 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10155 }
10156 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10157}
10158
10159
10160/**
10161 * Nested-guest VM-exit handler for invalid-guest state
10162 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10163 */
10164HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10165{
10166 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10167
10168 /*
10169 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10170 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10171 * Handle it like it's in an invalid guest state of the outer guest.
10172 *
10173 * When the fast path is implemented, this should be changed to cause the corresponding
10174 * nested-guest VM-exit.
10175 */
10176 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10177}
10178
10179
10180/**
10181 * Nested-guest VM-exit handler for instructions that cause VM-exits uncondtionally
10182 * and only provide the instruction length.
10183 *
10184 * Unconditional VM-exit.
10185 */
10186HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10187{
10188 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10189
10190#ifdef VBOX_STRICT
10191 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10192 switch (pVmxTransient->uExitReason)
10193 {
10194 case VMX_EXIT_ENCLS:
10195 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10196 break;
10197
10198 case VMX_EXIT_VMFUNC:
10199 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10200 break;
10201 }
10202#endif
10203
10204 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10205 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10206}
10207
10208
10209/**
10210 * Nested-guest VM-exit handler for instructions that provide instruction length as
10211 * well as more information.
10212 *
10213 * Unconditional VM-exit.
10214 */
10215HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10216{
10217 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10218
10219#ifdef VBOX_STRICT
10220 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10221 switch (pVmxTransient->uExitReason)
10222 {
10223 case VMX_EXIT_GDTR_IDTR_ACCESS:
10224 case VMX_EXIT_LDTR_TR_ACCESS:
10225 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10226 break;
10227
10228 case VMX_EXIT_RDRAND:
10229 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10230 break;
10231
10232 case VMX_EXIT_RDSEED:
10233 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10234 break;
10235
10236 case VMX_EXIT_XSAVES:
10237 case VMX_EXIT_XRSTORS:
10238 /** @todo NSTVMX: Verify XSS-bitmap. */
10239 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10240 break;
10241
10242 case VMX_EXIT_UMWAIT:
10243 case VMX_EXIT_TPAUSE:
10244 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10245 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10246 break;
10247
10248 case VMX_EXIT_LOADIWKEY:
10249 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10250 break;
10251 }
10252#endif
10253
10254 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10255 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10256 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10257
10258 VMXVEXITINFO ExitInfo;
10259 RT_ZERO(ExitInfo);
10260 ExitInfo.uReason = pVmxTransient->uExitReason;
10261 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10262 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10263 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10264 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10265}
10266
10267
10268# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10269/**
10270 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10271 * Conditional VM-exit.
10272 */
10273HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10274{
10275 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10276 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10277
10278 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10279 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10280 {
10281 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10282 AssertRCReturn(rc, rc);
10283
10284 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10285 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
10286
10287 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10288 uint64_t const uExitQual = pVmxTransient->uExitQual;
10289
10290 RTGCPTR GCPtrNestedFault;
10291 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10292 if (fIsLinearAddrValid)
10293 {
10294 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
10295 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10296 }
10297 else
10298 GCPtrNestedFault = 0;
10299
10300 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10301 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10302 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10303 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10304 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10305
10306 PGMPTWALK Walk;
10307 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10308 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, CPUMCTX2CORE(pCtx),
10309 GCPhysNestedFault, fIsLinearAddrValid, GCPtrNestedFault,
10310 &Walk);
10311 if (RT_SUCCESS(rcStrict))
10312 return rcStrict;
10313
10314 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10315 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10316 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10317
10318 VMXVEXITEVENTINFO ExitEventInfo;
10319 RT_ZERO(ExitEventInfo);
10320 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10321 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10322
10323 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10324 {
10325 VMXVEXITINFO ExitInfo;
10326 RT_ZERO(ExitInfo);
10327 ExitInfo.uReason = VMX_EXIT_EPT_VIOLATION;
10328 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10329 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10330 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
10331 ExitInfo.u64GuestPhysAddr = pVmxTransient->uGuestPhysicalAddr;
10332 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10333 }
10334
10335 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10336 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10337 }
10338
10339 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10340}
10341
10342
10343/**
10344 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10345 * Conditional VM-exit.
10346 */
10347HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10348{
10349 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10350 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10351
10352 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10353 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10354 {
10355 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_ALL);
10356 AssertRCReturn(rc, rc);
10357
10358 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
10359
10360 PGMPTWALK Walk;
10361 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10362 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10363 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, CPUMCTX2CORE(pCtx),
10364 GCPhysNestedFault, false /* fIsLinearAddrValid */,
10365 0 /* GCPtrNestedFault */, &Walk);
10366 if (RT_SUCCESS(rcStrict))
10367 {
10368 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
10369 return rcStrict;
10370 }
10371
10372 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
10373 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10374 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10375
10376 VMXVEXITEVENTINFO ExitEventInfo;
10377 RT_ZERO(ExitEventInfo);
10378 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10379 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10380
10381 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10382 }
10383
10384 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10385}
10386# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10387
10388/** @} */
10389#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10390
10391
10392/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10393 * probes.
10394 *
10395 * The following few functions and associated structure contains the bloat
10396 * necessary for providing detailed debug events and dtrace probes as well as
10397 * reliable host side single stepping. This works on the principle of
10398 * "subclassing" the normal execution loop and workers. We replace the loop
10399 * method completely and override selected helpers to add necessary adjustments
10400 * to their core operation.
10401 *
10402 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10403 * any performance for debug and analysis features.
10404 *
10405 * @{
10406 */
10407
10408/**
10409 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10410 * the debug run loop.
10411 */
10412typedef struct VMXRUNDBGSTATE
10413{
10414 /** The RIP we started executing at. This is for detecting that we stepped. */
10415 uint64_t uRipStart;
10416 /** The CS we started executing with. */
10417 uint16_t uCsStart;
10418
10419 /** Whether we've actually modified the 1st execution control field. */
10420 bool fModifiedProcCtls : 1;
10421 /** Whether we've actually modified the 2nd execution control field. */
10422 bool fModifiedProcCtls2 : 1;
10423 /** Whether we've actually modified the exception bitmap. */
10424 bool fModifiedXcptBitmap : 1;
10425
10426 /** We desire the modified the CR0 mask to be cleared. */
10427 bool fClearCr0Mask : 1;
10428 /** We desire the modified the CR4 mask to be cleared. */
10429 bool fClearCr4Mask : 1;
10430 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10431 uint32_t fCpe1Extra;
10432 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10433 uint32_t fCpe1Unwanted;
10434 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10435 uint32_t fCpe2Extra;
10436 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10437 uint32_t bmXcptExtra;
10438 /** The sequence number of the Dtrace provider settings the state was
10439 * configured against. */
10440 uint32_t uDtraceSettingsSeqNo;
10441 /** VM-exits to check (one bit per VM-exit). */
10442 uint32_t bmExitsToCheck[3];
10443
10444 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10445 uint32_t fProcCtlsInitial;
10446 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10447 uint32_t fProcCtls2Initial;
10448 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10449 uint32_t bmXcptInitial;
10450} VMXRUNDBGSTATE;
10451AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10452typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10453
10454
10455/**
10456 * Initializes the VMXRUNDBGSTATE structure.
10457 *
10458 * @param pVCpu The cross context virtual CPU structure of the
10459 * calling EMT.
10460 * @param pVmxTransient The VMX-transient structure.
10461 * @param pDbgState The debug state to initialize.
10462 */
10463static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10464{
10465 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10466 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10467
10468 pDbgState->fModifiedProcCtls = false;
10469 pDbgState->fModifiedProcCtls2 = false;
10470 pDbgState->fModifiedXcptBitmap = false;
10471 pDbgState->fClearCr0Mask = false;
10472 pDbgState->fClearCr4Mask = false;
10473 pDbgState->fCpe1Extra = 0;
10474 pDbgState->fCpe1Unwanted = 0;
10475 pDbgState->fCpe2Extra = 0;
10476 pDbgState->bmXcptExtra = 0;
10477 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10478 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10479 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10480}
10481
10482
10483/**
10484 * Updates the VMSC fields with changes requested by @a pDbgState.
10485 *
10486 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10487 * immediately before executing guest code, i.e. when interrupts are disabled.
10488 * We don't check status codes here as we cannot easily assert or return in the
10489 * latter case.
10490 *
10491 * @param pVCpu The cross context virtual CPU structure.
10492 * @param pVmxTransient The VMX-transient structure.
10493 * @param pDbgState The debug state.
10494 */
10495static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10496{
10497 /*
10498 * Ensure desired flags in VMCS control fields are set.
10499 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10500 *
10501 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10502 * there should be no stale data in pCtx at this point.
10503 */
10504 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10505 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10506 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10507 {
10508 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10509 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10510 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10511 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10512 pDbgState->fModifiedProcCtls = true;
10513 }
10514
10515 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
10516 {
10517 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
10518 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
10519 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
10520 pDbgState->fModifiedProcCtls2 = true;
10521 }
10522
10523 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
10524 {
10525 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
10526 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
10527 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
10528 pDbgState->fModifiedXcptBitmap = true;
10529 }
10530
10531 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
10532 {
10533 pVmcsInfo->u64Cr0Mask = 0;
10534 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
10535 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
10536 }
10537
10538 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
10539 {
10540 pVmcsInfo->u64Cr4Mask = 0;
10541 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
10542 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
10543 }
10544
10545 NOREF(pVCpu);
10546}
10547
10548
10549/**
10550 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
10551 * re-entry next time around.
10552 *
10553 * @returns Strict VBox status code (i.e. informational status codes too).
10554 * @param pVCpu The cross context virtual CPU structure.
10555 * @param pVmxTransient The VMX-transient structure.
10556 * @param pDbgState The debug state.
10557 * @param rcStrict The return code from executing the guest using single
10558 * stepping.
10559 */
10560static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
10561 VBOXSTRICTRC rcStrict)
10562{
10563 /*
10564 * Restore VM-exit control settings as we may not reenter this function the
10565 * next time around.
10566 */
10567 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10568
10569 /* We reload the initial value, trigger what we can of recalculations the
10570 next time around. From the looks of things, that's all that's required atm. */
10571 if (pDbgState->fModifiedProcCtls)
10572 {
10573 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
10574 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
10575 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
10576 AssertRC(rc2);
10577 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
10578 }
10579
10580 /* We're currently the only ones messing with this one, so just restore the
10581 cached value and reload the field. */
10582 if ( pDbgState->fModifiedProcCtls2
10583 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
10584 {
10585 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
10586 AssertRC(rc2);
10587 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
10588 }
10589
10590 /* If we've modified the exception bitmap, we restore it and trigger
10591 reloading and partial recalculation the next time around. */
10592 if (pDbgState->fModifiedXcptBitmap)
10593 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
10594
10595 return rcStrict;
10596}
10597
10598
10599/**
10600 * Configures VM-exit controls for current DBGF and DTrace settings.
10601 *
10602 * This updates @a pDbgState and the VMCS execution control fields to reflect
10603 * the necessary VM-exits demanded by DBGF and DTrace.
10604 *
10605 * @param pVCpu The cross context virtual CPU structure.
10606 * @param pVmxTransient The VMX-transient structure. May update
10607 * fUpdatedTscOffsettingAndPreemptTimer.
10608 * @param pDbgState The debug state.
10609 */
10610static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10611{
10612#ifndef IN_NEM_DARWIN
10613 /*
10614 * Take down the dtrace serial number so we can spot changes.
10615 */
10616 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
10617 ASMCompilerBarrier();
10618#endif
10619
10620 /*
10621 * We'll rebuild most of the middle block of data members (holding the
10622 * current settings) as we go along here, so start by clearing it all.
10623 */
10624 pDbgState->bmXcptExtra = 0;
10625 pDbgState->fCpe1Extra = 0;
10626 pDbgState->fCpe1Unwanted = 0;
10627 pDbgState->fCpe2Extra = 0;
10628 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
10629 pDbgState->bmExitsToCheck[i] = 0;
10630
10631 /*
10632 * Software interrupts (INT XXh) - no idea how to trigger these...
10633 */
10634 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10635 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
10636 || VBOXVMM_INT_SOFTWARE_ENABLED())
10637 {
10638 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10639 }
10640
10641 /*
10642 * INT3 breakpoints - triggered by #BP exceptions.
10643 */
10644 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
10645 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10646
10647 /*
10648 * Exception bitmap and XCPT events+probes.
10649 */
10650 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
10651 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
10652 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
10653
10654 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
10655 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
10656 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10657 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
10658 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
10659 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
10660 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
10661 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
10662 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
10663 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
10664 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
10665 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
10666 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
10667 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
10668 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
10669 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
10670 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
10671 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
10672
10673 if (pDbgState->bmXcptExtra)
10674 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10675
10676 /*
10677 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
10678 *
10679 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
10680 * So, when adding/changing/removing please don't forget to update it.
10681 *
10682 * Some of the macros are picking up local variables to save horizontal space,
10683 * (being able to see it in a table is the lesser evil here).
10684 */
10685#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
10686 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
10687 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
10688#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
10689 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10690 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10691 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10692 } else do { } while (0)
10693#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
10694 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10695 { \
10696 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
10697 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10698 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10699 } else do { } while (0)
10700#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
10701 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10702 { \
10703 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
10704 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10705 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10706 } else do { } while (0)
10707#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
10708 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10709 { \
10710 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
10711 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10712 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10713 } else do { } while (0)
10714
10715 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
10716 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
10717 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
10718 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
10719 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
10720
10721 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
10722 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
10723 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
10724 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
10725 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
10726 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
10727 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
10728 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
10729 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
10730 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
10731 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
10732 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
10733 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
10734 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
10735 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
10736 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
10737 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
10738 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
10739 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
10740 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
10741 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
10742 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
10743 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
10744 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
10745 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
10746 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
10747 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
10748 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
10749 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
10750 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
10751 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
10752 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
10753 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
10754 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
10755 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
10756 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
10757
10758 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
10759 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10760 {
10761 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4
10762 | CPUMCTX_EXTRN_APIC_TPR);
10763 AssertRC(rc);
10764
10765#if 0 /** @todo fix me */
10766 pDbgState->fClearCr0Mask = true;
10767 pDbgState->fClearCr4Mask = true;
10768#endif
10769 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
10770 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
10771 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10772 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
10773 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
10774 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
10775 require clearing here and in the loop if we start using it. */
10776 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
10777 }
10778 else
10779 {
10780 if (pDbgState->fClearCr0Mask)
10781 {
10782 pDbgState->fClearCr0Mask = false;
10783 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
10784 }
10785 if (pDbgState->fClearCr4Mask)
10786 {
10787 pDbgState->fClearCr4Mask = false;
10788 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
10789 }
10790 }
10791 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
10792 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
10793
10794 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
10795 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
10796 {
10797 /** @todo later, need to fix handler as it assumes this won't usually happen. */
10798 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
10799 }
10800 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
10801 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
10802
10803 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
10804 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
10805 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
10806 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
10807 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
10808 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
10809 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
10810 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
10811#if 0 /** @todo too slow, fix handler. */
10812 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
10813#endif
10814 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
10815
10816 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
10817 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
10818 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
10819 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
10820 {
10821 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10822 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
10823 }
10824 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10825 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10826 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10827 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10828
10829 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
10830 || IS_EITHER_ENABLED(pVM, INSTR_STR)
10831 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
10832 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
10833 {
10834 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10835 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
10836 }
10837 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
10838 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
10839 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
10840 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
10841
10842 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
10843 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
10844 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
10845 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
10846 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
10847 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
10848 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
10849 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
10850 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
10851 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
10852 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
10853 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
10854 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
10855 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
10856 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
10857 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
10858 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
10859 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
10860 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
10861 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
10862 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
10863 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
10864
10865#undef IS_EITHER_ENABLED
10866#undef SET_ONLY_XBM_IF_EITHER_EN
10867#undef SET_CPE1_XBM_IF_EITHER_EN
10868#undef SET_CPEU_XBM_IF_EITHER_EN
10869#undef SET_CPE2_XBM_IF_EITHER_EN
10870
10871 /*
10872 * Sanitize the control stuff.
10873 */
10874 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
10875 if (pDbgState->fCpe2Extra)
10876 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
10877 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
10878 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
10879#ifndef IN_NEM_DARWIN
10880 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10881 {
10882 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
10883 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10884 }
10885#else
10886 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10887 {
10888 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
10889 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10890 }
10891#endif
10892
10893 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
10894 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
10895 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
10896 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
10897}
10898
10899
10900/**
10901 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
10902 * appropriate.
10903 *
10904 * The caller has checked the VM-exit against the
10905 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
10906 * already, so we don't have to do that either.
10907 *
10908 * @returns Strict VBox status code (i.e. informational status codes too).
10909 * @param pVCpu The cross context virtual CPU structure.
10910 * @param pVmxTransient The VMX-transient structure.
10911 * @param uExitReason The VM-exit reason.
10912 *
10913 * @remarks The name of this function is displayed by dtrace, so keep it short
10914 * and to the point. No longer than 33 chars long, please.
10915 */
10916static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
10917{
10918 /*
10919 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
10920 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
10921 *
10922 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
10923 * does. Must add/change/remove both places. Same ordering, please.
10924 *
10925 * Added/removed events must also be reflected in the next section
10926 * where we dispatch dtrace events.
10927 */
10928 bool fDtrace1 = false;
10929 bool fDtrace2 = false;
10930 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
10931 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
10932 uint32_t uEventArg = 0;
10933#define SET_EXIT(a_EventSubName) \
10934 do { \
10935 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10936 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10937 } while (0)
10938#define SET_BOTH(a_EventSubName) \
10939 do { \
10940 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
10941 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10942 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
10943 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10944 } while (0)
10945 switch (uExitReason)
10946 {
10947 case VMX_EXIT_MTF:
10948 return vmxHCExitMtf(pVCpu, pVmxTransient);
10949
10950 case VMX_EXIT_XCPT_OR_NMI:
10951 {
10952 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
10953 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
10954 {
10955 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10956 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10957 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10958 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
10959 {
10960 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
10961 {
10962 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
10963 uEventArg = pVmxTransient->uExitIntErrorCode;
10964 }
10965 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
10966 switch (enmEvent1)
10967 {
10968 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
10969 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
10970 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
10971 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
10972 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
10973 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
10974 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
10975 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
10976 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
10977 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
10978 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
10979 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
10980 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
10981 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
10982 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
10983 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
10984 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
10985 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
10986 default: break;
10987 }
10988 }
10989 else
10990 AssertFailed();
10991 break;
10992
10993 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10994 uEventArg = idxVector;
10995 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
10996 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
10997 break;
10998 }
10999 break;
11000 }
11001
11002 case VMX_EXIT_TRIPLE_FAULT:
11003 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11004 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11005 break;
11006 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11007 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11008 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11009 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11010 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11011
11012 /* Instruction specific VM-exits: */
11013 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11014 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11015 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11016 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11017 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11018 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11019 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11020 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11021 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11022 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11023 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11024 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11025 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11026 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11027 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11028 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11029 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11030 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11031 case VMX_EXIT_MOV_CRX:
11032 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11033 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11034 SET_BOTH(CRX_READ);
11035 else
11036 SET_BOTH(CRX_WRITE);
11037 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11038 break;
11039 case VMX_EXIT_MOV_DRX:
11040 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11041 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11042 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11043 SET_BOTH(DRX_READ);
11044 else
11045 SET_BOTH(DRX_WRITE);
11046 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11047 break;
11048 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11049 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11050 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11051 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11052 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11053 case VMX_EXIT_GDTR_IDTR_ACCESS:
11054 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
11055 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11056 {
11057 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11058 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11059 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11060 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11061 }
11062 break;
11063
11064 case VMX_EXIT_LDTR_TR_ACCESS:
11065 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
11066 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11067 {
11068 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11069 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11070 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11071 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11072 }
11073 break;
11074
11075 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11076 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11077 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11078 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11079 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11080 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11081 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11082 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11083 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11084 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11085 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11086
11087 /* Events that aren't relevant at this point. */
11088 case VMX_EXIT_EXT_INT:
11089 case VMX_EXIT_INT_WINDOW:
11090 case VMX_EXIT_NMI_WINDOW:
11091 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11092 case VMX_EXIT_PREEMPT_TIMER:
11093 case VMX_EXIT_IO_INSTR:
11094 break;
11095
11096 /* Errors and unexpected events. */
11097 case VMX_EXIT_INIT_SIGNAL:
11098 case VMX_EXIT_SIPI:
11099 case VMX_EXIT_IO_SMI:
11100 case VMX_EXIT_SMI:
11101 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11102 case VMX_EXIT_ERR_MSR_LOAD:
11103 case VMX_EXIT_ERR_MACHINE_CHECK:
11104 case VMX_EXIT_PML_FULL:
11105 case VMX_EXIT_VIRTUALIZED_EOI:
11106 break;
11107
11108 default:
11109 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11110 break;
11111 }
11112#undef SET_BOTH
11113#undef SET_EXIT
11114
11115 /*
11116 * Dtrace tracepoints go first. We do them here at once so we don't
11117 * have to copy the guest state saving and stuff a few dozen times.
11118 * Down side is that we've got to repeat the switch, though this time
11119 * we use enmEvent since the probes are a subset of what DBGF does.
11120 */
11121 if (fDtrace1 || fDtrace2)
11122 {
11123 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11124 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11125 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11126 switch (enmEvent1)
11127 {
11128 /** @todo consider which extra parameters would be helpful for each probe. */
11129 case DBGFEVENT_END: break;
11130 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11131 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11132 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11133 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11134 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11135 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11136 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11137 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11138 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11139 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11140 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11141 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11142 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11143 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11144 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11145 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11146 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11147 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11148 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11149 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11150 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11151 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11152 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11153 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11154 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11155 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11156 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11157 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11158 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11159 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11160 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11161 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11162 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11163 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11164 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11165 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11166 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11167 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11168 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11169 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11170 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11171 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11172 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11173 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11174 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11175 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11176 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11177 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11178 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11179 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11180 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11181 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11182 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11183 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11184 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11185 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11186 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11187 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11188 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11189 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11190 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11191 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11192 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11193 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11194 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11195 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11196 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11197 }
11198 switch (enmEvent2)
11199 {
11200 /** @todo consider which extra parameters would be helpful for each probe. */
11201 case DBGFEVENT_END: break;
11202 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11203 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11204 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11205 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11206 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11207 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11208 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11209 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11210 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11211 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11212 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11213 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11214 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11215 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11216 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11217 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11218 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11219 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11220 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11221 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11222 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11223 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11224 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11225 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11226 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11227 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11228 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11229 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11230 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11231 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11232 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11233 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11234 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11235 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11236 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11237 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11238 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11239 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11240 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11241 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11242 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11243 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11244 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11245 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11246 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11247 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11248 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11249 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11250 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11251 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11252 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11253 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11254 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11255 }
11256 }
11257
11258 /*
11259 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11260 * the DBGF call will do a full check).
11261 *
11262 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11263 * Note! If we have to events, we prioritize the first, i.e. the instruction
11264 * one, in order to avoid event nesting.
11265 */
11266 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11267 if ( enmEvent1 != DBGFEVENT_END
11268 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11269 {
11270 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11271 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11272 if (rcStrict != VINF_SUCCESS)
11273 return rcStrict;
11274 }
11275 else if ( enmEvent2 != DBGFEVENT_END
11276 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11277 {
11278 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11279 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11280 if (rcStrict != VINF_SUCCESS)
11281 return rcStrict;
11282 }
11283
11284 return VINF_SUCCESS;
11285}
11286
11287
11288/**
11289 * Single-stepping VM-exit filtering.
11290 *
11291 * This is preprocessing the VM-exits and deciding whether we've gotten far
11292 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11293 * handling is performed.
11294 *
11295 * @returns Strict VBox status code (i.e. informational status codes too).
11296 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11297 * @param pVmxTransient The VMX-transient structure.
11298 * @param pDbgState The debug state.
11299 */
11300DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11301{
11302 /*
11303 * Expensive (saves context) generic dtrace VM-exit probe.
11304 */
11305 uint32_t const uExitReason = pVmxTransient->uExitReason;
11306 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11307 { /* more likely */ }
11308 else
11309 {
11310 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11311 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11312 AssertRC(rc);
11313 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11314 }
11315
11316#ifndef IN_NEM_DARWIN
11317 /*
11318 * Check for host NMI, just to get that out of the way.
11319 */
11320 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11321 { /* normally likely */ }
11322 else
11323 {
11324 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
11325 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11326 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11327 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11328 }
11329#endif
11330
11331 /*
11332 * Check for single stepping event if we're stepping.
11333 */
11334 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11335 {
11336 switch (uExitReason)
11337 {
11338 case VMX_EXIT_MTF:
11339 return vmxHCExitMtf(pVCpu, pVmxTransient);
11340
11341 /* Various events: */
11342 case VMX_EXIT_XCPT_OR_NMI:
11343 case VMX_EXIT_EXT_INT:
11344 case VMX_EXIT_TRIPLE_FAULT:
11345 case VMX_EXIT_INT_WINDOW:
11346 case VMX_EXIT_NMI_WINDOW:
11347 case VMX_EXIT_TASK_SWITCH:
11348 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11349 case VMX_EXIT_APIC_ACCESS:
11350 case VMX_EXIT_EPT_VIOLATION:
11351 case VMX_EXIT_EPT_MISCONFIG:
11352 case VMX_EXIT_PREEMPT_TIMER:
11353
11354 /* Instruction specific VM-exits: */
11355 case VMX_EXIT_CPUID:
11356 case VMX_EXIT_GETSEC:
11357 case VMX_EXIT_HLT:
11358 case VMX_EXIT_INVD:
11359 case VMX_EXIT_INVLPG:
11360 case VMX_EXIT_RDPMC:
11361 case VMX_EXIT_RDTSC:
11362 case VMX_EXIT_RSM:
11363 case VMX_EXIT_VMCALL:
11364 case VMX_EXIT_VMCLEAR:
11365 case VMX_EXIT_VMLAUNCH:
11366 case VMX_EXIT_VMPTRLD:
11367 case VMX_EXIT_VMPTRST:
11368 case VMX_EXIT_VMREAD:
11369 case VMX_EXIT_VMRESUME:
11370 case VMX_EXIT_VMWRITE:
11371 case VMX_EXIT_VMXOFF:
11372 case VMX_EXIT_VMXON:
11373 case VMX_EXIT_MOV_CRX:
11374 case VMX_EXIT_MOV_DRX:
11375 case VMX_EXIT_IO_INSTR:
11376 case VMX_EXIT_RDMSR:
11377 case VMX_EXIT_WRMSR:
11378 case VMX_EXIT_MWAIT:
11379 case VMX_EXIT_MONITOR:
11380 case VMX_EXIT_PAUSE:
11381 case VMX_EXIT_GDTR_IDTR_ACCESS:
11382 case VMX_EXIT_LDTR_TR_ACCESS:
11383 case VMX_EXIT_INVEPT:
11384 case VMX_EXIT_RDTSCP:
11385 case VMX_EXIT_INVVPID:
11386 case VMX_EXIT_WBINVD:
11387 case VMX_EXIT_XSETBV:
11388 case VMX_EXIT_RDRAND:
11389 case VMX_EXIT_INVPCID:
11390 case VMX_EXIT_VMFUNC:
11391 case VMX_EXIT_RDSEED:
11392 case VMX_EXIT_XSAVES:
11393 case VMX_EXIT_XRSTORS:
11394 {
11395 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11396 AssertRCReturn(rc, rc);
11397 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11398 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11399 return VINF_EM_DBG_STEPPED;
11400 break;
11401 }
11402
11403 /* Errors and unexpected events: */
11404 case VMX_EXIT_INIT_SIGNAL:
11405 case VMX_EXIT_SIPI:
11406 case VMX_EXIT_IO_SMI:
11407 case VMX_EXIT_SMI:
11408 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11409 case VMX_EXIT_ERR_MSR_LOAD:
11410 case VMX_EXIT_ERR_MACHINE_CHECK:
11411 case VMX_EXIT_PML_FULL:
11412 case VMX_EXIT_VIRTUALIZED_EOI:
11413 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11414 break;
11415
11416 default:
11417 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11418 break;
11419 }
11420 }
11421
11422 /*
11423 * Check for debugger event breakpoints and dtrace probes.
11424 */
11425 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11426 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11427 {
11428 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11429 if (rcStrict != VINF_SUCCESS)
11430 return rcStrict;
11431 }
11432
11433 /*
11434 * Normal processing.
11435 */
11436#ifdef HMVMX_USE_FUNCTION_TABLE
11437 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11438#else
11439 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11440#endif
11441}
11442
11443/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use