VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h

Last change on this file was 104259, checked in by vboxsync, 6 weeks ago

VMM/IEM: Fixed missing import-all-state in the history-exec/probe code path of vmxHCExitEptMisconfig.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 529.5 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 104259 2024-04-09 23:34:18Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Guest-CPU state required for split-lock \#AC handling VM-exits.
92 */
93#define HMVMX_CPUMCTX_XPCT_AC ( CPUMCTX_EXTRN_CR0 \
94 | CPUMCTX_EXTRN_RFLAGS \
95 | CPUMCTX_EXTRN_SS \
96 | CPUMCTX_EXTRN_CS)
97
98/**
99 * Exception bitmap mask for real-mode guests (real-on-v86).
100 *
101 * We need to intercept all exceptions manually except:
102 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
103 * due to bugs in Intel CPUs.
104 * - \#PF need not be intercepted even in real-mode if we have nested paging
105 * support.
106 */
107#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
108 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
109 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
110 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
111 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
112 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
113 | RT_BIT(X86_XCPT_XF))
114
115/** Maximum VM-instruction error number. */
116#define HMVMX_INSTR_ERROR_MAX 28
117
118/** Profiling macro. */
119#ifdef HM_PROFILE_EXIT_DISPATCH
120# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
121# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
122#else
123# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
124# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
125#endif
126
127#ifndef IN_NEM_DARWIN
128/** Assert that preemption is disabled or covered by thread-context hooks. */
129# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
130 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
131
132/** Assert that we haven't migrated CPUs when thread-context hooks are not
133 * used. */
134# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
135 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
136 ("Illegal migration! Entered on CPU %u Current %u\n", \
137 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
138#else
139# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
140# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
141#endif
142
143/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
144 * context. */
145#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
146 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
147 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
148
149/** Log the VM-exit reason with an easily visible marker to identify it in a
150 * potential sea of logging data. */
151#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
152 do { \
153 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
154 HMGetVmxExitName(a_uExitReason))); \
155 } while (0) \
156
157
158/*********************************************************************************************************************************
159* Structures and Typedefs *
160*********************************************************************************************************************************/
161/**
162 * Memory operand read or write access.
163 */
164typedef enum VMXMEMACCESS
165{
166 VMXMEMACCESS_READ = 0,
167 VMXMEMACCESS_WRITE = 1
168} VMXMEMACCESS;
169
170
171/**
172 * VMX VM-exit handler.
173 *
174 * @returns Strict VBox status code (i.e. informational status codes too).
175 * @param pVCpu The cross context virtual CPU structure.
176 * @param pVmxTransient The VMX-transient structure.
177 */
178#ifndef HMVMX_USE_FUNCTION_TABLE
179typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
180#else
181typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
182/** Pointer to VM-exit handler. */
183typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
184#endif
185
186/**
187 * VMX VM-exit handler, non-strict status code.
188 *
189 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
190 *
191 * @returns VBox status code, no informational status code returned.
192 * @param pVCpu The cross context virtual CPU structure.
193 * @param pVmxTransient The VMX-transient structure.
194 *
195 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
196 * use of that status code will be replaced with VINF_EM_SOMETHING
197 * later when switching over to IEM.
198 */
199#ifndef HMVMX_USE_FUNCTION_TABLE
200typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
201#else
202typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
203#endif
204
205
206/*********************************************************************************************************************************
207* Internal Functions *
208*********************************************************************************************************************************/
209#ifndef HMVMX_USE_FUNCTION_TABLE
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
212# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
213#else
214# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
215# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
216#endif
217#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
218DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
219#endif
220
221static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
222
223/** @name VM-exit handler prototypes.
224 * @{
225 */
226static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
227static FNVMXEXITHANDLER vmxHCExitExtInt;
228static FNVMXEXITHANDLER vmxHCExitTripleFault;
229static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
230static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
231static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
232static FNVMXEXITHANDLER vmxHCExitCpuid;
233static FNVMXEXITHANDLER vmxHCExitGetsec;
234static FNVMXEXITHANDLER vmxHCExitHlt;
235static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
236static FNVMXEXITHANDLER vmxHCExitInvlpg;
237static FNVMXEXITHANDLER vmxHCExitRdpmc;
238static FNVMXEXITHANDLER vmxHCExitVmcall;
239#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
240static FNVMXEXITHANDLER vmxHCExitVmclear;
241static FNVMXEXITHANDLER vmxHCExitVmlaunch;
242static FNVMXEXITHANDLER vmxHCExitVmptrld;
243static FNVMXEXITHANDLER vmxHCExitVmptrst;
244static FNVMXEXITHANDLER vmxHCExitVmread;
245static FNVMXEXITHANDLER vmxHCExitVmresume;
246static FNVMXEXITHANDLER vmxHCExitVmwrite;
247static FNVMXEXITHANDLER vmxHCExitVmxoff;
248static FNVMXEXITHANDLER vmxHCExitVmxon;
249static FNVMXEXITHANDLER vmxHCExitInvvpid;
250# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
251static FNVMXEXITHANDLER vmxHCExitInvept;
252# endif
253#endif
254static FNVMXEXITHANDLER vmxHCExitRdtsc;
255static FNVMXEXITHANDLER vmxHCExitMovCRx;
256static FNVMXEXITHANDLER vmxHCExitMovDRx;
257static FNVMXEXITHANDLER vmxHCExitIoInstr;
258static FNVMXEXITHANDLER vmxHCExitRdmsr;
259static FNVMXEXITHANDLER vmxHCExitWrmsr;
260static FNVMXEXITHANDLER vmxHCExitMwait;
261static FNVMXEXITHANDLER vmxHCExitMtf;
262static FNVMXEXITHANDLER vmxHCExitMonitor;
263static FNVMXEXITHANDLER vmxHCExitPause;
264static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
265static FNVMXEXITHANDLER vmxHCExitApicAccess;
266static FNVMXEXITHANDLER vmxHCExitEptViolation;
267static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
268static FNVMXEXITHANDLER vmxHCExitRdtscp;
269static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
270static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
271static FNVMXEXITHANDLER vmxHCExitXsetbv;
272static FNVMXEXITHANDLER vmxHCExitInvpcid;
273#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
274static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
275#endif
276static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
277static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
278/** @} */
279
280#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
281/** @name Nested-guest VM-exit handler prototypes.
282 * @{
283 */
284static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
285static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
286static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
287static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
288static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
289static FNVMXEXITHANDLER vmxHCExitHltNested;
290static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
291static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
292static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
293static FNVMXEXITHANDLER vmxHCExitRdtscNested;
294static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
295static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
296static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
297static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
298static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
299static FNVMXEXITHANDLER vmxHCExitMwaitNested;
300static FNVMXEXITHANDLER vmxHCExitMtfNested;
301static FNVMXEXITHANDLER vmxHCExitMonitorNested;
302static FNVMXEXITHANDLER vmxHCExitPauseNested;
303static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
304static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
305static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
306static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
307static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
308static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
309static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
310static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
311static FNVMXEXITHANDLER vmxHCExitInstrNested;
312static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
313# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
314static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
315static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
316# endif
317/** @} */
318#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
319
320
321/*********************************************************************************************************************************
322* Global Variables *
323*********************************************************************************************************************************/
324#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
325/**
326 * Array of all VMCS fields.
327 * Any fields added to the VT-x spec. should be added here.
328 *
329 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
330 * of nested-guests.
331 */
332static const uint32_t g_aVmcsFields[] =
333{
334 /* 16-bit control fields. */
335 VMX_VMCS16_VPID,
336 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
337 VMX_VMCS16_EPTP_INDEX,
338 VMX_VMCS16_HLAT_PREFIX_SIZE,
339 VMX_VMCS16_LAST_PID_PTR_INDEX,
340
341 /* 16-bit guest-state fields. */
342 VMX_VMCS16_GUEST_ES_SEL,
343 VMX_VMCS16_GUEST_CS_SEL,
344 VMX_VMCS16_GUEST_SS_SEL,
345 VMX_VMCS16_GUEST_DS_SEL,
346 VMX_VMCS16_GUEST_FS_SEL,
347 VMX_VMCS16_GUEST_GS_SEL,
348 VMX_VMCS16_GUEST_LDTR_SEL,
349 VMX_VMCS16_GUEST_TR_SEL,
350 VMX_VMCS16_GUEST_INTR_STATUS,
351 VMX_VMCS16_GUEST_PML_INDEX,
352 VMX_VMCS16_GUEST_UINV,
353
354 /* 16-bits host-state fields. */
355 VMX_VMCS16_HOST_ES_SEL,
356 VMX_VMCS16_HOST_CS_SEL,
357 VMX_VMCS16_HOST_SS_SEL,
358 VMX_VMCS16_HOST_DS_SEL,
359 VMX_VMCS16_HOST_FS_SEL,
360 VMX_VMCS16_HOST_GS_SEL,
361 VMX_VMCS16_HOST_TR_SEL,
362
363 /* 64-bit control fields. */
364 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
365 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
366 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
367 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
368 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
369 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
370 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
371 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
372 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
373 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
374 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
375 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
376 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
377 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
378 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
379 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
380 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
381 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
382 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
383 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
384 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
385 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
386 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
387 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
388 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
389 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
390 VMX_VMCS64_CTRL_EPTP_FULL,
391 VMX_VMCS64_CTRL_EPTP_HIGH,
392 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
393 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
394 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
395 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
396 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
397 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
398 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
399 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
400 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
401 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
402 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
403 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
404 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
405 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
406 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
407 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
408 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
409 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
410 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
411 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
412 VMX_VMCS64_CTRL_SPPTP_FULL,
413 VMX_VMCS64_CTRL_SPPTP_HIGH,
414 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
415 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
416 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
417 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
418 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
419 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
420 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_FULL,
421 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_HIGH,
422 VMX_VMCS64_CTRL_HLAT_PTR_FULL,
423 VMX_VMCS64_CTRL_HLAT_PTR_HIGH,
424 VMX_VMCS64_CTRL_EXIT2_FULL,
425 VMX_VMCS64_CTRL_EXIT2_HIGH,
426 VMX_VMCS64_CTRL_SPEC_CTRL_MASK_FULL,
427 VMX_VMCS64_CTRL_SPEC_CTRL_MASK_HIGH,
428 VMX_VMCS64_CTRL_SPEC_CTRL_SHADOW_FULL,
429 VMX_VMCS64_CTRL_SPEC_CTRL_SHADOW_HIGH,
430
431 /* 64-bit read-only data fields. */
432 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
433 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
434
435 /* 64-bit guest-state fields. */
436 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
437 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
438 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
439 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
440 VMX_VMCS64_GUEST_PAT_FULL,
441 VMX_VMCS64_GUEST_PAT_HIGH,
442 VMX_VMCS64_GUEST_EFER_FULL,
443 VMX_VMCS64_GUEST_EFER_HIGH,
444 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
445 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
446 VMX_VMCS64_GUEST_PDPTE0_FULL,
447 VMX_VMCS64_GUEST_PDPTE0_HIGH,
448 VMX_VMCS64_GUEST_PDPTE1_FULL,
449 VMX_VMCS64_GUEST_PDPTE1_HIGH,
450 VMX_VMCS64_GUEST_PDPTE2_FULL,
451 VMX_VMCS64_GUEST_PDPTE2_HIGH,
452 VMX_VMCS64_GUEST_PDPTE3_FULL,
453 VMX_VMCS64_GUEST_PDPTE3_HIGH,
454 VMX_VMCS64_GUEST_BNDCFGS_FULL,
455 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
456 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
457 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
458 VMX_VMCS64_GUEST_PKRS_FULL,
459 VMX_VMCS64_GUEST_PKRS_HIGH,
460
461 /* 64-bit host-state fields. */
462 VMX_VMCS64_HOST_PAT_FULL,
463 VMX_VMCS64_HOST_PAT_HIGH,
464 VMX_VMCS64_HOST_EFER_FULL,
465 VMX_VMCS64_HOST_EFER_HIGH,
466 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
467 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
468 VMX_VMCS64_HOST_PKRS_FULL,
469 VMX_VMCS64_HOST_PKRS_HIGH,
470
471 /* 32-bit control fields. */
472 VMX_VMCS32_CTRL_PIN_EXEC,
473 VMX_VMCS32_CTRL_PROC_EXEC,
474 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
475 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
476 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
477 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
478 VMX_VMCS32_CTRL_EXIT,
479 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
480 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
481 VMX_VMCS32_CTRL_ENTRY,
482 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
483 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
484 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
485 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
486 VMX_VMCS32_CTRL_TPR_THRESHOLD,
487 VMX_VMCS32_CTRL_PROC_EXEC2,
488 VMX_VMCS32_CTRL_PLE_GAP,
489 VMX_VMCS32_CTRL_PLE_WINDOW,
490 VMX_VMCS32_CTRL_INSTR_TIMEOUT,
491
492 /* 32-bits read-only fields. */
493 VMX_VMCS32_RO_VM_INSTR_ERROR,
494 VMX_VMCS32_RO_EXIT_REASON,
495 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
496 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
497 VMX_VMCS32_RO_IDT_VECTORING_INFO,
498 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
499 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
500 VMX_VMCS32_RO_EXIT_INSTR_INFO,
501
502 /* 32-bit guest-state fields. */
503 VMX_VMCS32_GUEST_ES_LIMIT,
504 VMX_VMCS32_GUEST_CS_LIMIT,
505 VMX_VMCS32_GUEST_SS_LIMIT,
506 VMX_VMCS32_GUEST_DS_LIMIT,
507 VMX_VMCS32_GUEST_FS_LIMIT,
508 VMX_VMCS32_GUEST_GS_LIMIT,
509 VMX_VMCS32_GUEST_LDTR_LIMIT,
510 VMX_VMCS32_GUEST_TR_LIMIT,
511 VMX_VMCS32_GUEST_GDTR_LIMIT,
512 VMX_VMCS32_GUEST_IDTR_LIMIT,
513 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
514 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
515 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
516 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
517 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
518 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
519 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
520 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
521 VMX_VMCS32_GUEST_INT_STATE,
522 VMX_VMCS32_GUEST_ACTIVITY_STATE,
523 VMX_VMCS32_GUEST_SMBASE,
524 VMX_VMCS32_GUEST_SYSENTER_CS,
525 VMX_VMCS32_PREEMPT_TIMER_VALUE,
526
527 /* 32-bit host-state fields. */
528 VMX_VMCS32_HOST_SYSENTER_CS,
529
530 /* Natural-width control fields. */
531 VMX_VMCS_CTRL_CR0_MASK,
532 VMX_VMCS_CTRL_CR4_MASK,
533 VMX_VMCS_CTRL_CR0_READ_SHADOW,
534 VMX_VMCS_CTRL_CR4_READ_SHADOW,
535 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
536 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
537 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
538 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
539
540 /* Natural-width read-only data fields. */
541 VMX_VMCS_RO_EXIT_QUALIFICATION,
542 VMX_VMCS_RO_IO_RCX,
543 VMX_VMCS_RO_IO_RSI,
544 VMX_VMCS_RO_IO_RDI,
545 VMX_VMCS_RO_IO_RIP,
546 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
547
548 /* Natural-width guest-state field */
549 VMX_VMCS_GUEST_CR0,
550 VMX_VMCS_GUEST_CR3,
551 VMX_VMCS_GUEST_CR4,
552 VMX_VMCS_GUEST_ES_BASE,
553 VMX_VMCS_GUEST_CS_BASE,
554 VMX_VMCS_GUEST_SS_BASE,
555 VMX_VMCS_GUEST_DS_BASE,
556 VMX_VMCS_GUEST_FS_BASE,
557 VMX_VMCS_GUEST_GS_BASE,
558 VMX_VMCS_GUEST_LDTR_BASE,
559 VMX_VMCS_GUEST_TR_BASE,
560 VMX_VMCS_GUEST_GDTR_BASE,
561 VMX_VMCS_GUEST_IDTR_BASE,
562 VMX_VMCS_GUEST_DR7,
563 VMX_VMCS_GUEST_RSP,
564 VMX_VMCS_GUEST_RIP,
565 VMX_VMCS_GUEST_RFLAGS,
566 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
567 VMX_VMCS_GUEST_SYSENTER_ESP,
568 VMX_VMCS_GUEST_SYSENTER_EIP,
569 VMX_VMCS_GUEST_S_CET,
570 VMX_VMCS_GUEST_SSP,
571 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
572
573 /* Natural-width host-state fields */
574 VMX_VMCS_HOST_CR0,
575 VMX_VMCS_HOST_CR3,
576 VMX_VMCS_HOST_CR4,
577 VMX_VMCS_HOST_FS_BASE,
578 VMX_VMCS_HOST_GS_BASE,
579 VMX_VMCS_HOST_TR_BASE,
580 VMX_VMCS_HOST_GDTR_BASE,
581 VMX_VMCS_HOST_IDTR_BASE,
582 VMX_VMCS_HOST_SYSENTER_ESP,
583 VMX_VMCS_HOST_SYSENTER_EIP,
584 VMX_VMCS_HOST_RSP,
585 VMX_VMCS_HOST_RIP,
586 VMX_VMCS_HOST_S_CET,
587 VMX_VMCS_HOST_SSP,
588 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
589};
590#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
591
592#ifdef HMVMX_USE_FUNCTION_TABLE
593/**
594 * VMX_EXIT dispatch table.
595 */
596static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
597{
598 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
599 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
600 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
601 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
602 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
603 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
604 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
605 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
606 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
607 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
608 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
609 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
610 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
611 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
612 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
613 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
614 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
615 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
616 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
617#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
618 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
619 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
620 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
621 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
622 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
623 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
624 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
625 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
626 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
627#else
628 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
629 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
630 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
631 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
632 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
633 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
634 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
635 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
636 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
637#endif
638 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
639 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
640 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
641 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
642 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
643 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
644 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
645 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
646 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
647 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
648 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
649 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
650 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
651 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
652 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
653 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
654 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
655 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
656 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
657 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
658 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
659 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
660#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
661 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
662#else
663 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
664#endif
665 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
666 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
667#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
668 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
669#else
670 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
671#endif
672 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
673 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
674 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
675 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
676 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
677 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
678 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
679 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
680 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
681 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
682 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
683 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
684 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
685 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
686 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
687 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
688};
689#endif /* HMVMX_USE_FUNCTION_TABLE */
690
691#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
692static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
693{
694 /* 0 */ "(Not Used)",
695 /* 1 */ "VMCALL executed in VMX root operation.",
696 /* 2 */ "VMCLEAR with invalid physical address.",
697 /* 3 */ "VMCLEAR with VMXON pointer.",
698 /* 4 */ "VMLAUNCH with non-clear VMCS.",
699 /* 5 */ "VMRESUME with non-launched VMCS.",
700 /* 6 */ "VMRESUME after VMXOFF",
701 /* 7 */ "VM-entry with invalid control fields.",
702 /* 8 */ "VM-entry with invalid host state fields.",
703 /* 9 */ "VMPTRLD with invalid physical address.",
704 /* 10 */ "VMPTRLD with VMXON pointer.",
705 /* 11 */ "VMPTRLD with incorrect revision identifier.",
706 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
707 /* 13 */ "VMWRITE to read-only VMCS component.",
708 /* 14 */ "(Not Used)",
709 /* 15 */ "VMXON executed in VMX root operation.",
710 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
711 /* 17 */ "VM-entry with non-launched executing VMCS.",
712 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
713 /* 19 */ "VMCALL with non-clear VMCS.",
714 /* 20 */ "VMCALL with invalid VM-exit control fields.",
715 /* 21 */ "(Not Used)",
716 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
717 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
718 /* 24 */ "VMCALL with invalid SMM-monitor features.",
719 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
720 /* 26 */ "VM-entry with events blocked by MOV SS.",
721 /* 27 */ "(Not Used)",
722 /* 28 */ "Invalid operand to INVEPT/INVVPID."
723};
724#endif /* VBOX_STRICT && LOG_ENABLED */
725
726
727/**
728 * Gets the CR0 guest/host mask.
729 *
730 * These bits typically does not change through the lifetime of a VM. Any bit set in
731 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
732 * by the guest.
733 *
734 * @returns The CR0 guest/host mask.
735 * @param pVCpu The cross context virtual CPU structure.
736 */
737static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
738{
739 /*
740 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
741 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
742 *
743 * Furthermore, modifications to any bits that are reserved/unspecified currently
744 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
745 * when future CPUs specify and use currently reserved/unspecified bits.
746 */
747 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
748 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
749 * and @bugref{6944}. */
750 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
751 AssertCompile(RT_HI_U32(VMX_EXIT_HOST_CR0_IGNORE_MASK) == UINT32_C(0xffffffff)); /* Paranoia. */
752 return ( X86_CR0_PE
753 | X86_CR0_NE
754 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
755 | X86_CR0_PG
756 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
757}
758
759
760/**
761 * Gets the CR4 guest/host mask.
762 *
763 * These bits typically does not change through the lifetime of a VM. Any bit set in
764 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
765 * by the guest.
766 *
767 * @returns The CR4 guest/host mask.
768 * @param pVCpu The cross context virtual CPU structure.
769 */
770static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
771{
772 /*
773 * We construct a mask of all CR4 bits that the guest can modify without causing
774 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
775 * a VM-exit when the guest attempts to modify them when executing using
776 * hardware-assisted VMX.
777 *
778 * When a feature is not exposed to the guest (and may be present on the host),
779 * we want to intercept guest modifications to the bit so we can emulate proper
780 * behavior (e.g., #GP).
781 *
782 * Furthermore, only modifications to those bits that don't require immediate
783 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
784 * depends on CR3 which might not always be the guest value while executing
785 * using hardware-assisted VMX.
786 */
787 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
788 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
789#ifdef IN_NEM_DARWIN
790 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
791#endif
792 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
793
794 /*
795 * Paranoia.
796 * Ensure features exposed to the guest are present on the host.
797 */
798 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
799#ifdef IN_NEM_DARWIN
800 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
801#endif
802 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
803
804 uint64_t const fGstMask = X86_CR4_PVI
805 | X86_CR4_TSD
806 | X86_CR4_DE
807 | X86_CR4_MCE
808 | X86_CR4_PCE
809 | X86_CR4_OSXMMEEXCPT
810 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
811#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
812 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
813 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
814#endif
815 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
816 return ~fGstMask;
817}
818
819
820/**
821 * Checks whether an \#AC exception generated while executing a guest (or
822 * nested-guest) was due to a split-lock memory access.
823 *
824 * @returns @c true if split-lock triggered the \#AC, @c false otherwise.
825 * @param pVCpu The cross context virtual CPU structure.
826 */
827DECL_FORCE_INLINE(bool) vmxHCIsSplitLockAcXcpt(PVMCPU pVCpu)
828{
829 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
830 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM) /* 1. If 486-style alignment checks aren't enabled, this must be a split-lock #AC. */
831 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) /* 2. When the EFLAGS.AC != 0 this can only be a split-lock case. */
832 || CPUMGetGuestCPL(pVCpu) != 3) /* 3. #AC cannot happen in rings 0-2 except for split-lock detection. */
833 return true;
834 return false;
835}
836
837
838/**
839 * Adds one or more exceptions to the exception bitmap and commits it to the current
840 * VMCS.
841 *
842 * @param pVCpu The cross context virtual CPU structure.
843 * @param pVmxTransient The VMX-transient structure.
844 * @param uXcptMask The exception(s) to add.
845 */
846static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
847{
848 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
849 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
850 if ((uXcptBitmap & uXcptMask) != uXcptMask)
851 {
852 uXcptBitmap |= uXcptMask;
853 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
854 AssertRC(rc);
855 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
856 }
857}
858
859
860/**
861 * Adds an exception to the exception bitmap and commits it to the current VMCS.
862 *
863 * @param pVCpu The cross context virtual CPU structure.
864 * @param pVmxTransient The VMX-transient structure.
865 * @param uXcpt The exception to add.
866 */
867static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
868{
869 Assert(uXcpt <= X86_XCPT_LAST);
870 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
871}
872
873
874/**
875 * Remove one or more exceptions from the exception bitmap and commits it to the
876 * current VMCS.
877 *
878 * This takes care of not removing the exception intercept if a nested-guest
879 * requires the exception to be intercepted.
880 *
881 * @returns VBox status code.
882 * @param pVCpu The cross context virtual CPU structure.
883 * @param pVmxTransient The VMX-transient structure.
884 * @param uXcptMask The exception(s) to remove.
885 */
886static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
887{
888 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
889 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
890 if (uXcptBitmap & uXcptMask)
891 {
892#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
893 if (!pVmxTransient->fIsNestedGuest)
894 { /* likely */ }
895 else
896 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
897#endif
898#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
899 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
900 | RT_BIT(X86_XCPT_DE)
901 | RT_BIT(X86_XCPT_NM)
902 | RT_BIT(X86_XCPT_TS)
903 | RT_BIT(X86_XCPT_UD)
904 | RT_BIT(X86_XCPT_NP)
905 | RT_BIT(X86_XCPT_SS)
906 | RT_BIT(X86_XCPT_GP)
907 | RT_BIT(X86_XCPT_PF)
908 | RT_BIT(X86_XCPT_MF));
909#elif defined(HMVMX_ALWAYS_TRAP_PF)
910 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
911#endif
912 if (uXcptMask)
913 {
914 /* Validate we are not removing any essential exception intercepts. */
915#ifndef IN_NEM_DARWIN
916 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
917#else
918 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
919#endif
920 NOREF(pVCpu);
921 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
922 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
923
924 /* Remove it from the exception bitmap. */
925 uXcptBitmap &= ~uXcptMask;
926
927 /* Commit and update the cache if necessary. */
928 if (pVmcsInfo->u32XcptBitmap != uXcptBitmap)
929 {
930 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
931 AssertRC(rc);
932 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
933 }
934 }
935 }
936 return VINF_SUCCESS;
937}
938
939
940/**
941 * Remove an exceptions from the exception bitmap and commits it to the current
942 * VMCS.
943 *
944 * @returns VBox status code.
945 * @param pVCpu The cross context virtual CPU structure.
946 * @param pVmxTransient The VMX-transient structure.
947 * @param uXcpt The exception to remove.
948 */
949static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
950{
951 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
952}
953
954#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
955
956/**
957 * Loads the shadow VMCS specified by the VMCS info. object.
958 *
959 * @returns VBox status code.
960 * @param pVmcsInfo The VMCS info. object.
961 *
962 * @remarks Can be called with interrupts disabled.
963 */
964static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
965{
966 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
967 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
968
969 return VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
970}
971
972
973/**
974 * Clears the shadow VMCS specified by the VMCS info. object.
975 *
976 * @returns VBox status code.
977 * @param pVmcsInfo The VMCS info. object.
978 *
979 * @remarks Can be called with interrupts disabled.
980 */
981static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
982{
983 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
984 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
985
986 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
987 if (RT_SUCCESS(rc))
988 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
989 return rc;
990}
991
992
993/**
994 * Switches from and to the specified VMCSes.
995 *
996 * @returns VBox status code.
997 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
998 * @param pVmcsInfoTo The VMCS info. object we are switching to.
999 *
1000 * @remarks Called with interrupts disabled.
1001 */
1002static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
1003{
1004 /*
1005 * Clear the VMCS we are switching out if it has not already been cleared.
1006 * This will sync any CPU internal data back to the VMCS.
1007 */
1008 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1009 {
1010 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
1011 if (RT_SUCCESS(rc))
1012 {
1013 /*
1014 * The shadow VMCS, if any, would not be active at this point since we
1015 * would have cleared it while importing the virtual hardware-virtualization
1016 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
1017 * clear the shadow VMCS here, just assert for safety.
1018 */
1019 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
1020 }
1021 else
1022 return rc;
1023 }
1024
1025 /*
1026 * Clear the VMCS we are switching to if it has not already been cleared.
1027 * This will initialize the VMCS launch state to "clear" required for loading it.
1028 *
1029 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1030 */
1031 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1032 {
1033 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1034 if (RT_SUCCESS(rc))
1035 { /* likely */ }
1036 else
1037 return rc;
1038 }
1039
1040 /*
1041 * Finally, load the VMCS we are switching to.
1042 */
1043 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1044}
1045
1046
1047/**
1048 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1049 * caller.
1050 *
1051 * @returns VBox status code.
1052 * @param pVCpu The cross context virtual CPU structure.
1053 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1054 * true) or guest VMCS (pass false).
1055 */
1056static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1057{
1058 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1059 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1060
1061 PVMXVMCSINFO pVmcsInfoFrom;
1062 PVMXVMCSINFO pVmcsInfoTo;
1063 if (fSwitchToNstGstVmcs)
1064 {
1065 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1066 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1067 Assert(!pVCpu->hm.s.vmx.fMergedNstGstCtls);
1068 }
1069 else
1070 {
1071 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1072 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1073 }
1074
1075 /*
1076 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1077 * preemption hook code path acquires the current VMCS.
1078 */
1079 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1080
1081 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1082 if (RT_SUCCESS(rc))
1083 {
1084 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1085 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1086
1087 /*
1088 * If we are switching to a VMCS that was executed on a different host CPU or was
1089 * never executed before, flag that we need to export the host state before executing
1090 * guest/nested-guest code using hardware-assisted VMX.
1091 *
1092 * This could probably be done in a preemptible context since the preemption hook
1093 * will flag the necessary change in host context. However, since preemption is
1094 * already disabled and to avoid making assumptions about host specific code in
1095 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1096 * disabled.
1097 */
1098 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1099 { /* likely */ }
1100 else
1101 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1102
1103 ASMSetFlags(fEFlags);
1104
1105 /*
1106 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1107 * flag that we need to update the host MSR values there. Even if we decide in the
1108 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1109 * if its content differs, we would have to update the host MSRs anyway.
1110 */
1111 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1112 }
1113 else
1114 ASMSetFlags(fEFlags);
1115 return rc;
1116}
1117
1118#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1119#ifdef VBOX_STRICT
1120
1121/**
1122 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1123 * transient structure.
1124 *
1125 * @param pVCpu The cross context virtual CPU structure.
1126 * @param pVmxTransient The VMX-transient structure.
1127 */
1128DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1129{
1130 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1131 AssertRC(rc);
1132}
1133
1134
1135/**
1136 * Reads the VM-entry exception error code field from the VMCS into
1137 * the VMX transient structure.
1138 *
1139 * @param pVCpu The cross context virtual CPU structure.
1140 * @param pVmxTransient The VMX-transient structure.
1141 */
1142DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1143{
1144 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1145 AssertRC(rc);
1146}
1147
1148
1149/**
1150 * Reads the VM-entry exception error code field from the VMCS into
1151 * the VMX transient structure.
1152 *
1153 * @param pVCpu The cross context virtual CPU structure.
1154 * @param pVmxTransient The VMX-transient structure.
1155 */
1156DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1157{
1158 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1159 AssertRC(rc);
1160}
1161
1162#endif /* VBOX_STRICT */
1163
1164
1165/**
1166 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1167 *
1168 * Don't call directly unless the it's likely that some or all of the fields
1169 * given in @a a_fReadMask have already been read.
1170 *
1171 * @tparam a_fReadMask The fields to read.
1172 * @param pVCpu The cross context virtual CPU structure.
1173 * @param pVmxTransient The VMX-transient structure.
1174 */
1175template<uint32_t const a_fReadMask>
1176static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1177{
1178 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1179 | HMVMX_READ_EXIT_INSTR_LEN
1180 | HMVMX_READ_EXIT_INSTR_INFO
1181 | HMVMX_READ_IDT_VECTORING_INFO
1182 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1183 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1184 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1185 | HMVMX_READ_GUEST_LINEAR_ADDR
1186 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1187 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1188 )) == 0);
1189
1190 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1191 {
1192 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1193
1194 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1195 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1196 {
1197 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1198 AssertRC(rc);
1199 }
1200 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1201 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1202 {
1203 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1204 AssertRC(rc);
1205 }
1206 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1207 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1208 {
1209 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1210 AssertRC(rc);
1211 }
1212 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1213 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1214 {
1215 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1216 AssertRC(rc);
1217 }
1218 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1219 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1220 {
1221 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1222 AssertRC(rc);
1223 }
1224 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1225 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1226 {
1227 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1228 AssertRC(rc);
1229 }
1230 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1231 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1232 {
1233 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1234 AssertRC(rc);
1235 }
1236 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1237 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1238 {
1239 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1240 AssertRC(rc);
1241 }
1242 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1243 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1244 {
1245 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1246 AssertRC(rc);
1247 }
1248 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1249 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1250 {
1251 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1252 AssertRC(rc);
1253 }
1254
1255 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1256 }
1257}
1258
1259
1260/**
1261 * Reads VMCS fields into the VMXTRANSIENT structure.
1262 *
1263 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1264 * generating an optimized read sequences w/o any conditionals between in
1265 * non-strict builds.
1266 *
1267 * @tparam a_fReadMask The fields to read. One or more of the
1268 * HMVMX_READ_XXX fields ORed together.
1269 * @param pVCpu The cross context virtual CPU structure.
1270 * @param pVmxTransient The VMX-transient structure.
1271 */
1272template<uint32_t const a_fReadMask>
1273DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1274{
1275 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1276 | HMVMX_READ_EXIT_INSTR_LEN
1277 | HMVMX_READ_EXIT_INSTR_INFO
1278 | HMVMX_READ_IDT_VECTORING_INFO
1279 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1280 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1281 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1282 | HMVMX_READ_GUEST_LINEAR_ADDR
1283 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1284 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1285 )) == 0);
1286
1287 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1288 {
1289 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1290 {
1291 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1292 AssertRC(rc);
1293 }
1294 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1295 {
1296 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1297 AssertRC(rc);
1298 }
1299 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1300 {
1301 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1302 AssertRC(rc);
1303 }
1304 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1305 {
1306 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1307 AssertRC(rc);
1308 }
1309 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1310 {
1311 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1312 AssertRC(rc);
1313 }
1314 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1315 {
1316 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1317 AssertRC(rc);
1318 }
1319 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1320 {
1321 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1322 AssertRC(rc);
1323 }
1324 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1325 {
1326 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1327 AssertRC(rc);
1328 }
1329 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1330 {
1331 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1332 AssertRC(rc);
1333 }
1334 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1335 {
1336 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1337 AssertRC(rc);
1338 }
1339
1340 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1341 }
1342 else
1343 {
1344 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1345 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1346 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1347 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1348 }
1349}
1350
1351
1352#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1353/**
1354 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1355 *
1356 * @param pVCpu The cross context virtual CPU structure.
1357 * @param pVmxTransient The VMX-transient structure.
1358 */
1359static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1360{
1361 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1362 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1363 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1364 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1365 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1366 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1367 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1368 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1369 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1370 AssertRC(rc);
1371 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1372 | HMVMX_READ_EXIT_INSTR_LEN
1373 | HMVMX_READ_EXIT_INSTR_INFO
1374 | HMVMX_READ_IDT_VECTORING_INFO
1375 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1376 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1377 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1378 | HMVMX_READ_GUEST_LINEAR_ADDR
1379 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1380}
1381#endif
1382
1383/**
1384 * Verifies that our cached values of the VMCS fields are all consistent with
1385 * what's actually present in the VMCS.
1386 *
1387 * @returns VBox status code.
1388 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1389 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1390 * VMCS content. HMCPU error-field is
1391 * updated, see VMX_VCI_XXX.
1392 * @param pVCpu The cross context virtual CPU structure.
1393 * @param pVmcsInfo The VMCS info. object.
1394 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1395 */
1396static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1397{
1398 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1399
1400 uint32_t u32Val;
1401 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1402 AssertRC(rc);
1403 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1404 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1405 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1406 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1407
1408 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1409 AssertRC(rc);
1410 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1411 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1412 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1413 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1414
1415 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1416 AssertRC(rc);
1417 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1418 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1419 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1420 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1421
1422 /** @todo Currently disabled for nested-guests because we run into bit differences
1423 * with for INT_WINDOW, RDTSC/P, see @bugref{10318}. Later try figure out
1424 * why and re-enable. */
1425 if (!fIsNstGstVmcs)
1426 {
1427 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1428 AssertRC(rc);
1429 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1430 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1431 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1432 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1433 }
1434
1435 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1436 {
1437 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1438 AssertRC(rc);
1439 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1440 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1441 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1442 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1443 }
1444
1445 uint64_t u64Val;
1446 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1447 {
1448 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1449 AssertRC(rc);
1450 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1451 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1452 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1453 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1454 }
1455
1456 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1457 AssertRC(rc);
1458 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1459 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1460 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1461 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1462
1463 /*
1464 * The TSC offset will only be used when RDTSC is not intercepted.
1465 * Since we don't actively clear it while switching between intercepting or not,
1466 * the value here could be stale.
1467 */
1468 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))
1469 {
1470 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1471 AssertRC(rc);
1472 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1473 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1474 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1475 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1476 }
1477
1478 NOREF(pcszVmcs);
1479 return VINF_SUCCESS;
1480}
1481
1482
1483/**
1484 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1485 * VMCS.
1486 *
1487 * This is typically required when the guest changes paging mode.
1488 *
1489 * @returns VBox status code.
1490 * @param pVCpu The cross context virtual CPU structure.
1491 * @param pVmxTransient The VMX-transient structure.
1492 *
1493 * @remarks Requires EFER.
1494 * @remarks No-long-jump zone!!!
1495 */
1496static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1497{
1498 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1499 {
1500 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1501 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1502
1503 /*
1504 * VM-entry controls.
1505 */
1506 {
1507 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1508 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1509
1510 /*
1511 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1512 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1513 *
1514 * For nested-guests, this is a mandatory VM-entry control. It's also
1515 * required because we do not want to leak host bits to the nested-guest.
1516 */
1517 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1518
1519 /*
1520 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1521 *
1522 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1523 * required to get the nested-guest working with hardware-assisted VMX execution.
1524 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1525 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1526 * here rather than while merging the guest VMCS controls.
1527 */
1528 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1529 {
1530 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1531 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1532 }
1533 else
1534 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1535
1536 /*
1537 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1538 *
1539 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1540 * regardless of whether the nested-guest VMCS specifies it because we are free to
1541 * load whatever MSRs we require and we do not need to modify the guest visible copy
1542 * of the VM-entry MSR load area.
1543 */
1544 if ( g_fHmVmxSupportsVmcsEfer
1545#ifndef IN_NEM_DARWIN
1546 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1547#endif
1548 )
1549 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1550 else
1551 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1552
1553 /*
1554 * The following should -not- be set (since we're not in SMM mode):
1555 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1556 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1557 */
1558
1559 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1560 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1561
1562 if ((fVal & fZap) == fVal)
1563 { /* likely */ }
1564 else
1565 {
1566 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1567 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1568 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1569 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1570 }
1571
1572 /* Commit it to the VMCS. */
1573 if (pVmcsInfo->u32EntryCtls != fVal)
1574 {
1575 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1576 AssertRC(rc);
1577 pVmcsInfo->u32EntryCtls = fVal;
1578 }
1579 }
1580
1581 /*
1582 * VM-exit controls.
1583 */
1584 {
1585 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1586 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1587
1588 /*
1589 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1590 * supported the 1-setting of this bit.
1591 *
1592 * For nested-guests, we set the "save debug controls" as the converse
1593 * "load debug controls" is mandatory for nested-guests anyway.
1594 */
1595 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1596
1597 /*
1598 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1599 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1600 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1601 * vmxHCExportHostMsrs().
1602 *
1603 * For nested-guests, we always set this bit as we do not support 32-bit
1604 * hosts.
1605 */
1606 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1607
1608#ifndef IN_NEM_DARWIN
1609 /*
1610 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1611 *
1612 * For nested-guests, we should use the "save IA32_EFER" control if we also
1613 * used the "load IA32_EFER" control while exporting VM-entry controls.
1614 */
1615 if ( g_fHmVmxSupportsVmcsEfer
1616 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1617 {
1618 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1619 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1620 }
1621#endif
1622
1623 /*
1624 * Enable saving of the VMX-preemption timer value on VM-exit.
1625 * For nested-guests, currently not exposed/used.
1626 */
1627 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1628 * the timer value. */
1629 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1630 {
1631 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1632 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1633 }
1634
1635 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1636 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1637
1638 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1639 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1640 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1641
1642 if ((fVal & fZap) == fVal)
1643 { /* likely */ }
1644 else
1645 {
1646 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1647 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1648 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1649 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1650 }
1651
1652 /* Commit it to the VMCS. */
1653 if (pVmcsInfo->u32ExitCtls != fVal)
1654 {
1655 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1656 AssertRC(rc);
1657 pVmcsInfo->u32ExitCtls = fVal;
1658 }
1659 }
1660
1661 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1662 }
1663 return VINF_SUCCESS;
1664}
1665
1666
1667/**
1668 * Sets the TPR threshold in the VMCS.
1669 *
1670 * @param pVCpu The cross context virtual CPU structure.
1671 * @param pVmcsInfo The VMCS info. object.
1672 * @param u32TprThreshold The TPR threshold (task-priority class only).
1673 */
1674DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1675{
1676 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1677 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1678 RT_NOREF(pVmcsInfo);
1679 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1680 AssertRC(rc);
1681}
1682
1683
1684/**
1685 * Exports the guest APIC TPR state into the VMCS.
1686 *
1687 * @param pVCpu The cross context virtual CPU structure.
1688 * @param pVmxTransient The VMX-transient structure.
1689 *
1690 * @remarks No-long-jump zone!!!
1691 */
1692static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1693{
1694 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1695 {
1696 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1697
1698 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1699 if (!pVmxTransient->fIsNestedGuest)
1700 {
1701 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1702 && APICIsEnabled(pVCpu))
1703 {
1704 /*
1705 * Setup TPR shadowing.
1706 */
1707 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1708 {
1709 bool fPendingIntr = false;
1710 uint8_t u8Tpr = 0;
1711 uint8_t u8PendingIntr = 0;
1712 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1713 AssertRC(rc);
1714
1715 /*
1716 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1717 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1718 * priority of the pending interrupt so we can deliver the interrupt. If there
1719 * are no interrupts pending, set threshold to 0 to not cause any
1720 * TPR-below-threshold VM-exits.
1721 */
1722 uint32_t u32TprThreshold = 0;
1723 if (fPendingIntr)
1724 {
1725 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1726 (which is the Task-Priority Class). */
1727 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1728 const uint8_t u8TprPriority = u8Tpr >> 4;
1729 if (u8PendingPriority <= u8TprPriority)
1730 u32TprThreshold = u8PendingPriority;
1731 }
1732
1733 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1734 }
1735 }
1736 }
1737 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1738 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1739 }
1740}
1741
1742
1743/**
1744 * Gets the guest interruptibility-state and updates related internal eflags
1745 * inhibition state.
1746 *
1747 * @returns Guest's interruptibility-state.
1748 * @param pVCpu The cross context virtual CPU structure.
1749 *
1750 * @remarks No-long-jump zone!!!
1751 */
1752static uint32_t vmxHCGetGuestIntrStateWithUpdate(PVMCPUCC pVCpu)
1753{
1754 uint32_t fIntrState;
1755
1756 /*
1757 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1758 */
1759 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
1760 fIntrState = 0;
1761 else
1762 {
1763 /* If inhibition is active, RIP should've been imported from the VMCS already. */
1764 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1765
1766 if (CPUMIsInInterruptShadowAfterSs(&pVCpu->cpum.GstCtx))
1767 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1768 else
1769 {
1770 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1771
1772 /* Block-by-STI must not be set when interrupts are disabled. */
1773 AssertStmt(pVCpu->cpum.GstCtx.eflags.Bits.u1IF, fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1774 }
1775 }
1776
1777 /*
1778 * Check if we should inhibit NMI delivery.
1779 */
1780 if (!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1781 { /* likely */ }
1782 else
1783 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1784
1785 /*
1786 * Validate.
1787 */
1788 /* We don't support block-by-SMI yet.*/
1789 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1790
1791 return fIntrState;
1792}
1793
1794
1795/**
1796 * Exports the exception intercepts required for guest execution in the VMCS.
1797 *
1798 * @param pVCpu The cross context virtual CPU structure.
1799 * @param pVmxTransient The VMX-transient structure.
1800 *
1801 * @remarks No-long-jump zone!!!
1802 */
1803static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1804{
1805 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1806 {
1807 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1808 if ( !pVmxTransient->fIsNestedGuest
1809 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1810 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1811 else
1812 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1813
1814 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1815 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1816 }
1817}
1818
1819
1820/**
1821 * Exports the guest's RIP into the guest-state area in the VMCS.
1822 *
1823 * @param pVCpu The cross context virtual CPU structure.
1824 *
1825 * @remarks No-long-jump zone!!!
1826 */
1827static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1828{
1829 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1830 {
1831 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1832
1833 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1834 AssertRC(rc);
1835
1836 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1837 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1838 }
1839}
1840
1841
1842/**
1843 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1844 *
1845 * @param pVCpu The cross context virtual CPU structure.
1846 * @param pVmxTransient The VMX-transient structure.
1847 *
1848 * @remarks No-long-jump zone!!!
1849 */
1850static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1851{
1852 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1853 {
1854 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1855
1856 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits
1857 of RFLAGS are reserved (MBZ). We use bits 63:24 for internal purposes, so no need
1858 to assert this, the CPUMX86EFLAGS/CPUMX86RFLAGS union masks these off for us.
1859 Use 32-bit VMWRITE. */
1860 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
1861 Assert((fEFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
1862 AssertMsg(!(fEFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), ("%#x\n", fEFlags));
1863
1864#ifndef IN_NEM_DARWIN
1865 /*
1866 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1867 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1868 * can run the real-mode guest code under Virtual 8086 mode.
1869 */
1870 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1871 if (pVmcsInfo->RealMode.fRealOnV86Active)
1872 {
1873 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1874 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1875 Assert(!pVmxTransient->fIsNestedGuest);
1876 pVmcsInfo->RealMode.Eflags.u32 = fEFlags; /* Save the original eflags of the real-mode guest. */
1877 fEFlags |= X86_EFL_VM; /* Set the Virtual 8086 mode bit. */
1878 fEFlags &= ~X86_EFL_IOPL; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1879 }
1880#else
1881 RT_NOREF(pVmxTransient);
1882#endif
1883
1884 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags);
1885 AssertRC(rc);
1886
1887 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1888 Log4Func(("eflags=%#RX32\n", fEFlags));
1889 }
1890}
1891
1892
1893#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1894/**
1895 * Copies the nested-guest VMCS to the shadow VMCS.
1896 *
1897 * @returns VBox status code.
1898 * @param pVCpu The cross context virtual CPU structure.
1899 * @param pVmcsInfo The VMCS info. object.
1900 *
1901 * @remarks No-long-jump zone!!!
1902 */
1903static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1904{
1905 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1906 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1907
1908 /*
1909 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1910 * current VMCS, as we may try saving guest lazy MSRs.
1911 *
1912 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1913 * calling the import VMCS code which is currently performing the guest MSR reads
1914 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1915 * and the rest of the VMX leave session machinery.
1916 */
1917 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1918
1919 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1920 if (RT_SUCCESS(rc))
1921 {
1922 /*
1923 * Copy all guest read/write VMCS fields.
1924 *
1925 * We don't check for VMWRITE failures here for performance reasons and
1926 * because they are not expected to fail, barring irrecoverable conditions
1927 * like hardware errors.
1928 */
1929 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1930 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1931 {
1932 uint64_t u64Val;
1933 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1934 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1935 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1936 }
1937
1938 /*
1939 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1940 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1941 */
1942 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1943 {
1944 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1945 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1946 {
1947 uint64_t u64Val;
1948 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1949 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1950 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1951 }
1952 }
1953
1954 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1955 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1956 }
1957
1958 ASMSetFlags(fEFlags);
1959 return rc;
1960}
1961
1962
1963/**
1964 * Copies the shadow VMCS to the nested-guest VMCS.
1965 *
1966 * @returns VBox status code.
1967 * @param pVCpu The cross context virtual CPU structure.
1968 * @param pVmcsInfo The VMCS info. object.
1969 *
1970 * @remarks Called with interrupts disabled.
1971 */
1972static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1973{
1974 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1975 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1976 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1977
1978 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1979 if (RT_SUCCESS(rc))
1980 {
1981 /*
1982 * Copy guest read/write fields from the shadow VMCS.
1983 * Guest read-only fields cannot be modified, so no need to copy them.
1984 *
1985 * We don't check for VMREAD failures here for performance reasons and
1986 * because they are not expected to fail, barring irrecoverable conditions
1987 * like hardware errors.
1988 */
1989 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1990 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1991 {
1992 uint64_t u64Val;
1993 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1994 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1995 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1996 }
1997
1998 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1999 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
2000 }
2001 return rc;
2002}
2003
2004
2005/**
2006 * Enables VMCS shadowing for the given VMCS info. object.
2007 *
2008 * @param pVCpu The cross context virtual CPU structure.
2009 * @param pVmcsInfo The VMCS info. object.
2010 *
2011 * @remarks No-long-jump zone!!!
2012 */
2013static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2014{
2015 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2016 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
2017 {
2018 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
2019 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
2020 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2021 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
2022 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2023 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
2024 Log4Func(("Enabled\n"));
2025 }
2026}
2027
2028
2029/**
2030 * Disables VMCS shadowing for the given VMCS info. object.
2031 *
2032 * @param pVCpu The cross context virtual CPU structure.
2033 * @param pVmcsInfo The VMCS info. object.
2034 *
2035 * @remarks No-long-jump zone!!!
2036 */
2037static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2038{
2039 /*
2040 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
2041 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
2042 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2043 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2044 *
2045 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2046 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2047 */
2048 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2049 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2050 {
2051 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2052 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2053 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2054 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2055 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2056 Log4Func(("Disabled\n"));
2057 }
2058}
2059#endif
2060
2061
2062/**
2063 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2064 *
2065 * The guest FPU state is always pre-loaded hence we don't need to bother about
2066 * sharing FPU related CR0 bits between the guest and host.
2067 *
2068 * @returns VBox status code.
2069 * @param pVCpu The cross context virtual CPU structure.
2070 * @param pVmxTransient The VMX-transient structure.
2071 *
2072 * @remarks No-long-jump zone!!!
2073 */
2074static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2075{
2076 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2077 {
2078 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2079 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2080
2081 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2082 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2083 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2084 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2085 else
2086 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2087
2088 if (!pVmxTransient->fIsNestedGuest)
2089 {
2090 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2091 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2092 uint64_t const u64ShadowCr0 = u64GuestCr0;
2093 Assert(!RT_HI_U32(u64GuestCr0));
2094
2095 /*
2096 * Setup VT-x's view of the guest CR0.
2097 */
2098 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2099 if (VM_IS_VMX_NESTED_PAGING(pVM))
2100 {
2101#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2102 if (CPUMIsGuestPagingEnabled(pVCpu))
2103 {
2104 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2105 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2106 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2107 }
2108 else
2109 {
2110 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2111 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2112 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2113 }
2114
2115 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2116 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2117 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2118#endif
2119 }
2120 else
2121 {
2122 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2123 u64GuestCr0 |= X86_CR0_WP;
2124 }
2125
2126 /*
2127 * Guest FPU bits.
2128 *
2129 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2130 * using CR0.TS.
2131 *
2132 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2133 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2134 */
2135 u64GuestCr0 |= X86_CR0_NE;
2136
2137 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2138 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2139
2140 /*
2141 * Update exception intercepts.
2142 */
2143 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2144#ifndef IN_NEM_DARWIN
2145 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2146 {
2147 Assert(PDMVmmDevHeapIsEnabled(pVM));
2148 Assert(pVM->hm.s.vmx.pRealModeTSS);
2149 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2150 }
2151 else
2152#endif
2153 {
2154 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2155 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2156 if (fInterceptMF)
2157 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2158 }
2159
2160 /* Additional intercepts for debugging, define these yourself explicitly. */
2161#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2162 uXcptBitmap |= 0
2163 | RT_BIT(X86_XCPT_BP)
2164 | RT_BIT(X86_XCPT_DE)
2165 | RT_BIT(X86_XCPT_NM)
2166 | RT_BIT(X86_XCPT_TS)
2167 | RT_BIT(X86_XCPT_UD)
2168 | RT_BIT(X86_XCPT_NP)
2169 | RT_BIT(X86_XCPT_SS)
2170 | RT_BIT(X86_XCPT_GP)
2171 | RT_BIT(X86_XCPT_PF)
2172 | RT_BIT(X86_XCPT_MF)
2173 ;
2174#elif defined(HMVMX_ALWAYS_TRAP_PF)
2175 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2176#endif
2177 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2178 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2179 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2180 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2181 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2182
2183 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2184 u64GuestCr0 |= fSetCr0;
2185 u64GuestCr0 &= fZapCr0;
2186 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2187
2188 Assert(!RT_HI_U32(u64GuestCr0));
2189 Assert(u64GuestCr0 & X86_CR0_NE);
2190
2191 /* Commit the CR0 and related fields to the guest VMCS. */
2192 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2193 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2194 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2195 {
2196 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2197 AssertRC(rc);
2198 }
2199 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2200 {
2201 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2202 AssertRC(rc);
2203 }
2204
2205 /* Update our caches. */
2206 pVmcsInfo->u32ProcCtls = uProcCtls;
2207 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2208
2209 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2210 }
2211 else
2212 {
2213 /*
2214 * With nested-guests, we may have extended the guest/host mask here since we
2215 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2216 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2217 * originally supplied. We must copy those bits from the nested-guest CR0 into
2218 * the nested-guest CR0 read-shadow.
2219 */
2220 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2221 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2222 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2223
2224 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2225 u64GuestCr0 |= fSetCr0;
2226 u64GuestCr0 &= fZapCr0;
2227 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2228
2229 Assert(!RT_HI_U32(u64GuestCr0));
2230 Assert(u64GuestCr0 & X86_CR0_NE);
2231
2232 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2233 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2234 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2235
2236 Log4Func(("cr0=%#RX64 shadow=%#RX64 vmcs_read_shw=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0,
2237 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u, fSetCr0, fZapCr0));
2238 }
2239
2240 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2241 }
2242
2243 return VINF_SUCCESS;
2244}
2245
2246
2247/**
2248 * Exports the guest control registers (CR3, CR4) into the guest-state area
2249 * in the VMCS.
2250 *
2251 * @returns VBox strict status code.
2252 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2253 * without unrestricted guest access and the VMMDev is not presently
2254 * mapped (e.g. EFI32).
2255 *
2256 * @param pVCpu The cross context virtual CPU structure.
2257 * @param pVmxTransient The VMX-transient structure.
2258 *
2259 * @remarks No-long-jump zone!!!
2260 */
2261static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2262{
2263 int rc = VINF_SUCCESS;
2264 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2265
2266 /*
2267 * Guest CR2.
2268 * It's always loaded in the assembler code. Nothing to do here.
2269 */
2270
2271 /*
2272 * Guest CR3.
2273 */
2274 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2275 {
2276 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2277
2278 if (VM_IS_VMX_NESTED_PAGING(pVM))
2279 {
2280#ifndef IN_NEM_DARWIN
2281 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2282 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2283
2284 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2285 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2286 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2287 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2288
2289 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2290 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2291 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2292
2293 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2294 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2295 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2296 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2297 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2298 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2299 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2300
2301 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2302 AssertRC(rc);
2303#endif
2304
2305 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2306 uint64_t u64GuestCr3 = pCtx->cr3;
2307 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2308 || CPUMIsGuestPagingEnabledEx(pCtx))
2309 {
2310 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2311 if (CPUMIsGuestInPAEModeEx(pCtx))
2312 {
2313 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2314 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2315 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2316 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2317 }
2318
2319 /*
2320 * The guest's view of its CR3 is unblemished with nested paging when the
2321 * guest is using paging or we have unrestricted guest execution to handle
2322 * the guest when it's not using paging.
2323 */
2324 }
2325#ifndef IN_NEM_DARWIN
2326 else
2327 {
2328 /*
2329 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2330 * thinks it accesses physical memory directly, we use our identity-mapped
2331 * page table to map guest-linear to guest-physical addresses. EPT takes care
2332 * of translating it to host-physical addresses.
2333 */
2334 RTGCPHYS GCPhys;
2335 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2336
2337 /* We obtain it here every time as the guest could have relocated this PCI region. */
2338 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2339 if (RT_SUCCESS(rc))
2340 { /* likely */ }
2341 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2342 {
2343 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2344 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2345 }
2346 else
2347 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2348
2349 u64GuestCr3 = GCPhys;
2350 }
2351#endif
2352
2353 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2354 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2355 AssertRC(rc);
2356 }
2357 else
2358 {
2359 Assert(!pVmxTransient->fIsNestedGuest);
2360 /* Non-nested paging case, just use the hypervisor's CR3. */
2361 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2362
2363 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2364 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2365 AssertRC(rc);
2366 }
2367
2368 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2369 }
2370
2371 /*
2372 * Guest CR4.
2373 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2374 */
2375 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2376 {
2377 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2378 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2379
2380 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2381 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2382
2383 /*
2384 * With nested-guests, we may have extended the guest/host mask here (since we
2385 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2386 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2387 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2388 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2389 */
2390 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2391 uint64_t u64GuestCr4 = pCtx->cr4;
2392 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2393 ? pCtx->cr4
2394 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2395 Assert(!RT_HI_U32(u64GuestCr4));
2396
2397#ifndef IN_NEM_DARWIN
2398 /*
2399 * Setup VT-x's view of the guest CR4.
2400 *
2401 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2402 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2403 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2404 *
2405 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2406 */
2407 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2408 {
2409 Assert(pVM->hm.s.vmx.pRealModeTSS);
2410 Assert(PDMVmmDevHeapIsEnabled(pVM));
2411 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2412 }
2413#endif
2414
2415 if (VM_IS_VMX_NESTED_PAGING(pVM))
2416 {
2417 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2418 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2419 {
2420 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2421 u64GuestCr4 |= X86_CR4_PSE;
2422 /* Our identity mapping is a 32-bit page directory. */
2423 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2424 }
2425 /* else use guest CR4.*/
2426 }
2427 else
2428 {
2429 Assert(!pVmxTransient->fIsNestedGuest);
2430
2431 /*
2432 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2433 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2434 */
2435 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2436 {
2437 case PGMMODE_REAL: /* Real-mode. */
2438 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2439 case PGMMODE_32_BIT: /* 32-bit paging. */
2440 {
2441 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2442 break;
2443 }
2444
2445 case PGMMODE_PAE: /* PAE paging. */
2446 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2447 {
2448 u64GuestCr4 |= X86_CR4_PAE;
2449 break;
2450 }
2451
2452 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2453 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2454 {
2455#ifdef VBOX_WITH_64_BITS_GUESTS
2456 /* For our assumption in vmxHCShouldSwapEferMsr. */
2457 Assert(u64GuestCr4 & X86_CR4_PAE);
2458 break;
2459#endif
2460 }
2461 default:
2462 AssertFailed();
2463 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2464 }
2465 }
2466
2467 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2468 u64GuestCr4 |= fSetCr4;
2469 u64GuestCr4 &= fZapCr4;
2470
2471 Assert(!RT_HI_U32(u64GuestCr4));
2472 Assert(u64GuestCr4 & X86_CR4_VMXE);
2473
2474 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2475 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2476 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2477
2478#ifndef IN_NEM_DARWIN
2479 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2480 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2481 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2482 {
2483 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2484 hmR0VmxUpdateStartVmFunction(pVCpu);
2485 }
2486#endif
2487
2488 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2489
2490 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2491 }
2492 return rc;
2493}
2494
2495
2496#ifdef VBOX_STRICT
2497/**
2498 * Strict function to validate segment registers.
2499 *
2500 * @param pVCpu The cross context virtual CPU structure.
2501 * @param pVmcsInfo The VMCS info. object.
2502 *
2503 * @remarks Will import guest CR0 on strict builds during validation of
2504 * segments.
2505 */
2506static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2507{
2508 /*
2509 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2510 *
2511 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2512 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2513 * unusable bit and doesn't change the guest-context value.
2514 */
2515 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2516 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2517 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2518 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2519 && ( !CPUMIsGuestInRealModeEx(pCtx)
2520 && !CPUMIsGuestInV86ModeEx(pCtx)))
2521 {
2522 /* Protected mode checks */
2523 /* CS */
2524 Assert(pCtx->cs.Attr.n.u1Present);
2525 Assert(!(pCtx->cs.Attr.u & 0xf00));
2526 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2527 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2528 || !(pCtx->cs.Attr.n.u1Granularity));
2529 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2530 || (pCtx->cs.Attr.n.u1Granularity));
2531 /* CS cannot be loaded with NULL in protected mode. */
2532 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2533 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2534 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2535 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2536 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2537 else
2538 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2539 /* SS */
2540 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2541 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2542 if ( !(pCtx->cr0 & X86_CR0_PE)
2543 || pCtx->cs.Attr.n.u4Type == 3)
2544 {
2545 Assert(!pCtx->ss.Attr.n.u2Dpl);
2546 }
2547 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2548 {
2549 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2550 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2551 Assert(pCtx->ss.Attr.n.u1Present);
2552 Assert(!(pCtx->ss.Attr.u & 0xf00));
2553 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2554 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2555 || !(pCtx->ss.Attr.n.u1Granularity));
2556 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2557 || (pCtx->ss.Attr.n.u1Granularity));
2558 }
2559 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2560 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2561 {
2562 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2563 Assert(pCtx->ds.Attr.n.u1Present);
2564 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2565 Assert(!(pCtx->ds.Attr.u & 0xf00));
2566 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2567 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2568 || !(pCtx->ds.Attr.n.u1Granularity));
2569 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2570 || (pCtx->ds.Attr.n.u1Granularity));
2571 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2572 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2573 }
2574 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2575 {
2576 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2577 Assert(pCtx->es.Attr.n.u1Present);
2578 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2579 Assert(!(pCtx->es.Attr.u & 0xf00));
2580 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2581 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2582 || !(pCtx->es.Attr.n.u1Granularity));
2583 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2584 || (pCtx->es.Attr.n.u1Granularity));
2585 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2586 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2587 }
2588 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2589 {
2590 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2591 Assert(pCtx->fs.Attr.n.u1Present);
2592 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2593 Assert(!(pCtx->fs.Attr.u & 0xf00));
2594 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2595 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2596 || !(pCtx->fs.Attr.n.u1Granularity));
2597 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2598 || (pCtx->fs.Attr.n.u1Granularity));
2599 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2600 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2601 }
2602 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2603 {
2604 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2605 Assert(pCtx->gs.Attr.n.u1Present);
2606 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2607 Assert(!(pCtx->gs.Attr.u & 0xf00));
2608 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2609 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2610 || !(pCtx->gs.Attr.n.u1Granularity));
2611 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2612 || (pCtx->gs.Attr.n.u1Granularity));
2613 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2614 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2615 }
2616 /* 64-bit capable CPUs. */
2617 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2618 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2619 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2620 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2621 }
2622 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2623 || ( CPUMIsGuestInRealModeEx(pCtx)
2624 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2625 {
2626 /* Real and v86 mode checks. */
2627 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2628 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2629#ifndef IN_NEM_DARWIN
2630 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2631 {
2632 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2633 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2634 }
2635 else
2636#endif
2637 {
2638 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2639 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2640 }
2641
2642 /* CS */
2643 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2644 Assert(pCtx->cs.u32Limit == 0xffff);
2645 AssertMsg(u32CSAttr == 0xf3, ("cs=%#x %#x ", pCtx->cs.Sel, u32CSAttr));
2646 /* SS */
2647 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2648 Assert(pCtx->ss.u32Limit == 0xffff);
2649 Assert(u32SSAttr == 0xf3);
2650 /* DS */
2651 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2652 Assert(pCtx->ds.u32Limit == 0xffff);
2653 Assert(u32DSAttr == 0xf3);
2654 /* ES */
2655 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2656 Assert(pCtx->es.u32Limit == 0xffff);
2657 Assert(u32ESAttr == 0xf3);
2658 /* FS */
2659 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2660 Assert(pCtx->fs.u32Limit == 0xffff);
2661 Assert(u32FSAttr == 0xf3);
2662 /* GS */
2663 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2664 Assert(pCtx->gs.u32Limit == 0xffff);
2665 Assert(u32GSAttr == 0xf3);
2666 /* 64-bit capable CPUs. */
2667 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2668 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2669 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2670 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2671 }
2672}
2673#endif /* VBOX_STRICT */
2674
2675
2676/**
2677 * Exports a guest segment register into the guest-state area in the VMCS.
2678 *
2679 * @returns VBox status code.
2680 * @param pVCpu The cross context virtual CPU structure.
2681 * @param pVmcsInfo The VMCS info. object.
2682 * @param iSegReg The segment register number (X86_SREG_XXX).
2683 * @param pSelReg Pointer to the segment selector.
2684 *
2685 * @remarks No-long-jump zone!!!
2686 */
2687static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2688{
2689 Assert(iSegReg < X86_SREG_COUNT);
2690
2691 uint32_t u32Access = pSelReg->Attr.u;
2692#ifndef IN_NEM_DARWIN
2693 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2694#endif
2695 {
2696 /*
2697 * The way to differentiate between whether this is really a null selector or was just
2698 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2699 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2700 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2701 * NULL selectors loaded in protected-mode have their attribute as 0.
2702 */
2703 if (u32Access)
2704 { }
2705 else
2706 u32Access = X86DESCATTR_UNUSABLE;
2707 }
2708#ifndef IN_NEM_DARWIN
2709 else
2710 {
2711 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2712 u32Access = 0xf3;
2713 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2714 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2715 RT_NOREF_PV(pVCpu);
2716 }
2717#else
2718 RT_NOREF(pVmcsInfo);
2719#endif
2720
2721 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2722 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2723 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2724
2725 /*
2726 * Commit it to the VMCS.
2727 */
2728 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2729 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2730 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2731 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2732 return VINF_SUCCESS;
2733}
2734
2735
2736/**
2737 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2738 * area in the VMCS.
2739 *
2740 * @returns VBox status code.
2741 * @param pVCpu The cross context virtual CPU structure.
2742 * @param pVmxTransient The VMX-transient structure.
2743 *
2744 * @remarks Will import guest CR0 on strict builds during validation of
2745 * segments.
2746 * @remarks No-long-jump zone!!!
2747 */
2748static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2749{
2750 int rc = VERR_INTERNAL_ERROR_5;
2751#ifndef IN_NEM_DARWIN
2752 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2753#endif
2754 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2755 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2756#ifndef IN_NEM_DARWIN
2757 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2758#endif
2759
2760 /*
2761 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2762 */
2763 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2764 {
2765 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2766 {
2767 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2768#ifndef IN_NEM_DARWIN
2769 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2770 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2771#endif
2772 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2773 AssertRC(rc);
2774 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2775 }
2776
2777 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2778 {
2779 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2780#ifndef IN_NEM_DARWIN
2781 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2782 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2783#endif
2784 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2785 AssertRC(rc);
2786 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2787 }
2788
2789 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2790 {
2791 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2792#ifndef IN_NEM_DARWIN
2793 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2794 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2795#endif
2796 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2797 AssertRC(rc);
2798 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2799 }
2800
2801 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2802 {
2803 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2804#ifndef IN_NEM_DARWIN
2805 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2806 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2807#endif
2808 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2809 AssertRC(rc);
2810 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2811 }
2812
2813 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2814 {
2815 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2816#ifndef IN_NEM_DARWIN
2817 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2818 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2819#endif
2820 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2821 AssertRC(rc);
2822 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2823 }
2824
2825 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2826 {
2827 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2828#ifndef IN_NEM_DARWIN
2829 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2830 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2831#endif
2832 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2833 AssertRC(rc);
2834 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2835 }
2836
2837#ifdef VBOX_STRICT
2838 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2839#endif
2840 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2841 pCtx->cs.Attr.u));
2842 }
2843
2844 /*
2845 * Guest TR.
2846 */
2847 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2848 {
2849 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2850
2851 /*
2852 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2853 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2854 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2855 */
2856 uint16_t u16Sel;
2857 uint32_t u32Limit;
2858 uint64_t u64Base;
2859 uint32_t u32AccessRights;
2860#ifndef IN_NEM_DARWIN
2861 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2862#endif
2863 {
2864 u16Sel = pCtx->tr.Sel;
2865 u32Limit = pCtx->tr.u32Limit;
2866 u64Base = pCtx->tr.u64Base;
2867 u32AccessRights = pCtx->tr.Attr.u;
2868 }
2869#ifndef IN_NEM_DARWIN
2870 else
2871 {
2872 Assert(!pVmxTransient->fIsNestedGuest);
2873 Assert(pVM->hm.s.vmx.pRealModeTSS);
2874 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2875
2876 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2877 RTGCPHYS GCPhys;
2878 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2879 AssertRCReturn(rc, rc);
2880
2881 X86DESCATTR DescAttr;
2882 DescAttr.u = 0;
2883 DescAttr.n.u1Present = 1;
2884 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2885
2886 u16Sel = 0;
2887 u32Limit = HM_VTX_TSS_SIZE;
2888 u64Base = GCPhys;
2889 u32AccessRights = DescAttr.u;
2890 }
2891#endif
2892
2893 /* Validate. */
2894 Assert(!(u16Sel & RT_BIT(2)));
2895 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2896 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2897 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2898 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2899 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2900 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2901 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2902 Assert( (u32Limit & 0xfff) == 0xfff
2903 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2904 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2905 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2906
2907 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2908 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2909 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2910 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2911
2912 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2913 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2914 }
2915
2916 /*
2917 * Guest GDTR.
2918 */
2919 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2920 {
2921 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2922
2923 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2924 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2925
2926 /* Validate. */
2927 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2928
2929 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2930 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2931 }
2932
2933 /*
2934 * Guest LDTR.
2935 */
2936 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2937 {
2938 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2939
2940 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2941 uint32_t u32Access;
2942 if ( !pVmxTransient->fIsNestedGuest
2943 && !pCtx->ldtr.Attr.u)
2944 u32Access = X86DESCATTR_UNUSABLE;
2945 else
2946 u32Access = pCtx->ldtr.Attr.u;
2947
2948 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2949 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2950 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2951 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2952
2953 /* Validate. */
2954 if (!(u32Access & X86DESCATTR_UNUSABLE))
2955 {
2956 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2957 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2958 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2959 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2960 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2961 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2962 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2963 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2964 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2965 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2966 }
2967
2968 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2969 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2970 }
2971
2972 /*
2973 * Guest IDTR.
2974 */
2975 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2976 {
2977 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2978
2979 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2980 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2981
2982 /* Validate. */
2983 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2984
2985 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2986 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2987 }
2988
2989 return VINF_SUCCESS;
2990}
2991
2992
2993/**
2994 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2995 * VM-exit interruption info type.
2996 *
2997 * @returns The IEM exception flags.
2998 * @param uVector The event vector.
2999 * @param uVmxEventType The VMX event type.
3000 *
3001 * @remarks This function currently only constructs flags required for
3002 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
3003 * and CR2 aspects of an exception are not included).
3004 */
3005static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
3006{
3007 uint32_t fIemXcptFlags;
3008 switch (uVmxEventType)
3009 {
3010 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
3011 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
3012 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
3013 break;
3014
3015 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
3016 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
3017 break;
3018
3019 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
3020 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
3021 break;
3022
3023 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
3024 {
3025 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3026 if (uVector == X86_XCPT_BP)
3027 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
3028 else if (uVector == X86_XCPT_OF)
3029 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
3030 else
3031 {
3032 fIemXcptFlags = 0;
3033 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
3034 }
3035 break;
3036 }
3037
3038 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
3039 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3040 break;
3041
3042 default:
3043 fIemXcptFlags = 0;
3044 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
3045 break;
3046 }
3047 return fIemXcptFlags;
3048}
3049
3050
3051/**
3052 * Sets an event as a pending event to be injected into the guest.
3053 *
3054 * @param pVCpu The cross context virtual CPU structure.
3055 * @param u32IntInfo The VM-entry interruption-information field.
3056 * @param cbInstr The VM-entry instruction length in bytes (for
3057 * software interrupts, exceptions and privileged
3058 * software exceptions).
3059 * @param u32ErrCode The VM-entry exception error code.
3060 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3061 * page-fault.
3062 */
3063DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3064 RTGCUINTPTR GCPtrFaultAddress)
3065{
3066 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3067 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3068 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3069 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3070 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3071 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3072}
3073
3074
3075/**
3076 * Sets an external interrupt as pending-for-injection into the VM.
3077 *
3078 * @param pVCpu The cross context virtual CPU structure.
3079 * @param u8Interrupt The external interrupt vector.
3080 */
3081DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3082{
3083 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3084 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3085 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3086 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3087 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3088 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
3089}
3090
3091
3092/**
3093 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3094 *
3095 * @param pVCpu The cross context virtual CPU structure.
3096 */
3097DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3098{
3099 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3100 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3101 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3102 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3103 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3104 Log4Func(("NMI pending injection\n"));
3105}
3106
3107
3108/**
3109 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3110 *
3111 * @param pVCpu The cross context virtual CPU structure.
3112 */
3113DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3114{
3115 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3116 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3117 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3118 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3119 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3120}
3121
3122
3123/**
3124 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3125 *
3126 * @param pVCpu The cross context virtual CPU structure.
3127 */
3128DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3129{
3130 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3131 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3132 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3133 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3134 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3135}
3136
3137
3138/**
3139 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3140 *
3141 * @param pVCpu The cross context virtual CPU structure.
3142 */
3143DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3144{
3145 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3146 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3147 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3148 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3149 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3150}
3151
3152
3153#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3154/**
3155 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3156 *
3157 * @param pVCpu The cross context virtual CPU structure.
3158 * @param u32ErrCode The error code for the general-protection exception.
3159 */
3160DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3161{
3162 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3163 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3164 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3165 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3166 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3167}
3168
3169
3170/**
3171 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3172 *
3173 * @param pVCpu The cross context virtual CPU structure.
3174 * @param u32ErrCode The error code for the stack exception.
3175 */
3176DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3177{
3178 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3179 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3180 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3181 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3182 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3183}
3184#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3185
3186
3187/**
3188 * Fixes up attributes for the specified segment register.
3189 *
3190 * @param pVCpu The cross context virtual CPU structure.
3191 * @param pSelReg The segment register that needs fixing.
3192 * @param pszRegName The register name (for logging and assertions).
3193 */
3194static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3195{
3196 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3197
3198 /*
3199 * If VT-x marks the segment as unusable, most other bits remain undefined:
3200 * - For CS the L, D and G bits have meaning.
3201 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3202 * - For the remaining data segments no bits are defined.
3203 *
3204 * The present bit and the unusable bit has been observed to be set at the
3205 * same time (the selector was supposed to be invalid as we started executing
3206 * a V8086 interrupt in ring-0).
3207 *
3208 * What should be important for the rest of the VBox code, is that the P bit is
3209 * cleared. Some of the other VBox code recognizes the unusable bit, but
3210 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3211 * safe side here, we'll strip off P and other bits we don't care about. If
3212 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3213 *
3214 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3215 */
3216#ifdef VBOX_STRICT
3217 uint32_t const uAttr = pSelReg->Attr.u;
3218#endif
3219
3220 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3221 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3222 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3223
3224#ifdef VBOX_STRICT
3225# ifndef IN_NEM_DARWIN
3226 VMMRZCallRing3Disable(pVCpu);
3227# endif
3228 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3229# ifdef DEBUG_bird
3230 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3231 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3232 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3233# endif
3234# ifndef IN_NEM_DARWIN
3235 VMMRZCallRing3Enable(pVCpu);
3236# endif
3237 NOREF(uAttr);
3238#endif
3239 RT_NOREF2(pVCpu, pszRegName);
3240}
3241
3242
3243/**
3244 * Imports a guest segment register from the current VMCS into the guest-CPU
3245 * context.
3246 *
3247 * @param pVCpu The cross context virtual CPU structure.
3248 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3249 *
3250 * @remarks Called with interrupts and/or preemption disabled.
3251 */
3252template<uint32_t const a_iSegReg>
3253DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3254{
3255 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3256 /* Check that the macros we depend upon here and in the export parenter function works: */
3257#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3258 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3259 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3260 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3261 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3262 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3263 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3264 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3265 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3266 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3267 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3268
3269 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3270
3271 uint16_t u16Sel;
3272 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3273 pSelReg->Sel = u16Sel;
3274 pSelReg->ValidSel = u16Sel;
3275
3276 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3277 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3278
3279 uint32_t u32Attr;
3280 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3281 pSelReg->Attr.u = u32Attr;
3282 if (u32Attr & X86DESCATTR_UNUSABLE)
3283 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3284
3285 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3286}
3287
3288
3289/**
3290 * Imports the guest LDTR from the VMCS into the guest-CPU context.
3291 *
3292 * @param pVCpu The cross context virtual CPU structure.
3293 *
3294 * @remarks Called with interrupts and/or preemption disabled.
3295 */
3296DECL_FORCE_INLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3297{
3298 uint16_t u16Sel;
3299 uint64_t u64Base;
3300 uint32_t u32Limit, u32Attr;
3301 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3302 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3303 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3304 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3305
3306 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3307 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3308 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3309 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3310 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3311 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3312 if (u32Attr & X86DESCATTR_UNUSABLE)
3313 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3314}
3315
3316
3317/**
3318 * Imports the guest TR from the VMCS into the guest-CPU context.
3319 *
3320 * @param pVCpu The cross context virtual CPU structure.
3321 *
3322 * @remarks Called with interrupts and/or preemption disabled.
3323 */
3324DECL_FORCE_INLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3325{
3326 uint16_t u16Sel;
3327 uint64_t u64Base;
3328 uint32_t u32Limit, u32Attr;
3329 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3330 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3331 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3332 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3333
3334 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3335 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3336 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3337 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3338 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3339 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3340 /* TR is the only selector that can never be unusable. */
3341 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3342}
3343
3344
3345/**
3346 * Core: Imports the guest RIP from the VMCS into the guest-CPU context.
3347 *
3348 * @returns The RIP value.
3349 * @param pVCpu The cross context virtual CPU structure.
3350 *
3351 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3352 * @remarks Do -not- call this function directly!
3353 */
3354DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3355{
3356 uint64_t u64Val;
3357 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3358 AssertRC(rc);
3359
3360 pVCpu->cpum.GstCtx.rip = u64Val;
3361
3362 return u64Val;
3363}
3364
3365
3366/**
3367 * Imports the guest RIP from the VMCS into the guest-CPU context.
3368 *
3369 * @param pVCpu The cross context virtual CPU structure.
3370 *
3371 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3372 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3373 * instead!!!
3374 */
3375DECL_FORCE_INLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3376{
3377 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3378 {
3379 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3380 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3381 }
3382}
3383
3384
3385/**
3386 * Core: Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3387 *
3388 * @param pVCpu The cross context virtual CPU structure.
3389 * @param pVmcsInfo The VMCS info. object.
3390 *
3391 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3392 * @remarks Do -not- call this function directly!
3393 */
3394DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3395{
3396 uint64_t fRFlags;
3397 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &fRFlags);
3398 AssertRC(rc);
3399
3400 Assert((fRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
3401 Assert((fRFlags & ~(uint64_t)(X86_EFL_1 | X86_EFL_LIVE_MASK)) == 0);
3402
3403 pVCpu->cpum.GstCtx.rflags.u = fRFlags;
3404#ifndef IN_NEM_DARWIN
3405 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3406 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
3407 { /* mostly likely */ }
3408 else
3409 {
3410 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3411 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3412 }
3413#else
3414 RT_NOREF(pVmcsInfo);
3415#endif
3416}
3417
3418
3419/**
3420 * Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3421 *
3422 * @param pVCpu The cross context virtual CPU structure.
3423 * @param pVmcsInfo The VMCS info. object.
3424 *
3425 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3426 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3427 * instead!!!
3428 */
3429DECL_FORCE_INLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3430{
3431 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3432 {
3433 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3434 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3435 }
3436}
3437
3438
3439#ifndef IN_NEM_DARWIN
3440/**
3441 * Imports the guest TSX AUX and certain other MSRs from the VMCS into the guest-CPU
3442 * context.
3443 *
3444 * The other MSRs are in the VM-exit MSR-store.
3445 *
3446 * @returns VBox status code.
3447 * @param pVCpu The cross context virtual CPU structure.
3448 * @param pVmcsInfo The VMCS info. object.
3449 * @param fEFlags Saved EFLAGS for restoring the interrupt flag (in case of
3450 * unexpected errors). Ignored in NEM/darwin context.
3451 */
3452DECL_FORCE_INLINE(int) vmxHCImportGuestTscAuxAndOtherMsrs(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3453{
3454 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3455 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3456 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3457 Assert(pMsrs);
3458 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3459 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3460 for (uint32_t i = 0; i < cMsrs; i++)
3461 {
3462 uint32_t const idMsr = pMsrs[i].u32Msr;
3463 switch (idMsr)
3464 {
3465 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3466 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3467 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3468 default:
3469 {
3470 uint32_t idxLbrMsr;
3471 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3472 if (VM_IS_VMX_LBR(pVM))
3473 {
3474 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3475 {
3476 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3477 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3478 break;
3479 }
3480 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3481 {
3482 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3483 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3484 break;
3485 }
3486 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3487 {
3488 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3489 break;
3490 }
3491 /* Fallthru (no break) */
3492 }
3493 pVCpu->cpum.GstCtx.fExtrn = 0;
3494 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3495 ASMSetFlags(fEFlags);
3496 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3497 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3498 }
3499 }
3500 }
3501 return VINF_SUCCESS;
3502}
3503#endif /* !IN_NEM_DARWIN */
3504
3505
3506/**
3507 * Imports the guest CR0 from the VMCS into the guest-CPU context.
3508 *
3509 * @param pVCpu The cross context virtual CPU structure.
3510 * @param pVmcsInfo The VMCS info. object.
3511 */
3512DECL_FORCE_INLINE(void) vmxHCImportGuestCr0(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3513{
3514 uint64_t u64Cr0;
3515 uint64_t u64Shadow;
3516 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3517 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3518#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3519 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3520 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3521#else
3522 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
3523 {
3524 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3525 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3526 }
3527 else
3528 {
3529 /*
3530 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3531 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3532 * re-construct CR0. See @bugref{9180#c95} for details.
3533 */
3534 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3535 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3536 u64Cr0 = (u64Cr0 & ~(pVmcsInfoGst->u64Cr0Mask & pVmcsNstGst->u64Cr0Mask.u))
3537 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3538 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3539 Assert(u64Cr0 & X86_CR0_NE);
3540 }
3541#endif
3542
3543#ifndef IN_NEM_DARWIN
3544 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3545#endif
3546 CPUMSetGuestCR0(pVCpu, u64Cr0);
3547#ifndef IN_NEM_DARWIN
3548 VMMRZCallRing3Enable(pVCpu);
3549#endif
3550}
3551
3552
3553/**
3554 * Imports the guest CR3 from the VMCS into the guest-CPU context.
3555 *
3556 * @param pVCpu The cross context virtual CPU structure.
3557 */
3558DECL_FORCE_INLINE(void) vmxHCImportGuestCr3(PVMCPUCC pVCpu)
3559{
3560 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3561 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3562
3563 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3564 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3565 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3566 && CPUMIsGuestPagingEnabledEx(pCtx)))
3567 {
3568 uint64_t u64Cr3;
3569 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3570 if (pCtx->cr3 != u64Cr3)
3571 {
3572 pCtx->cr3 = u64Cr3;
3573 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3574 }
3575
3576 /*
3577 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3578 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3579 */
3580 if (CPUMIsGuestInPAEModeEx(pCtx))
3581 {
3582 X86PDPE aPaePdpes[4];
3583 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3584 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3585 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3586 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3587 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3588 {
3589 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3590 /* PGM now updates PAE PDPTEs while updating CR3. */
3591 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3592 }
3593 }
3594 }
3595}
3596
3597
3598/**
3599 * Imports the guest CR4 from the VMCS into the guest-CPU context.
3600 *
3601 * @param pVCpu The cross context virtual CPU structure.
3602 * @param pVmcsInfo The VMCS info. object.
3603 */
3604DECL_FORCE_INLINE(void) vmxHCImportGuestCr4(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3605{
3606 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3607 uint64_t u64Cr4;
3608 uint64_t u64Shadow;
3609 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3610 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3611#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3612 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3613 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3614#else
3615 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3616 {
3617 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3618 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3619 }
3620 else
3621 {
3622 /*
3623 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3624 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3625 * re-construct CR4. See @bugref{9180#c95} for details.
3626 */
3627 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3628 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3629 u64Cr4 = (u64Cr4 & ~(pVmcsInfo->u64Cr4Mask & pVmcsNstGst->u64Cr4Mask.u))
3630 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3631 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3632 Assert(u64Cr4 & X86_CR4_VMXE);
3633 }
3634#endif
3635 pCtx->cr4 = u64Cr4;
3636}
3637
3638
3639/**
3640 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
3641 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
3642 */
3643DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
3644{
3645 /*
3646 * We must import RIP here to set our EM interrupt-inhibited state.
3647 * We also import RFLAGS as our code that evaluates pending interrupts
3648 * before VM-entry requires it.
3649 */
3650 vmxHCImportGuestRip(pVCpu);
3651 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3652
3653 CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
3654 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
3655 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
3656 pVCpu->cpum.GstCtx.rip);
3657 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
3658}
3659
3660
3661/**
3662 * Imports the guest interruptibility-state from the VMCS into the guest-CPU
3663 * context.
3664 *
3665 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3666 *
3667 * @param pVCpu The cross context virtual CPU structure.
3668 * @param pVmcsInfo The VMCS info. object.
3669 *
3670 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3671 * do not log!
3672 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3673 * instead!!!
3674 */
3675DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3676{
3677 uint32_t u32Val;
3678 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3679 Assert((u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
3680 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
3681 if (!u32Val)
3682 {
3683 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
3684 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
3685 }
3686 else
3687 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
3688}
3689
3690
3691/**
3692 * Worker for VMXR0ImportStateOnDemand.
3693 *
3694 * @returns VBox status code.
3695 * @param pVCpu The cross context virtual CPU structure.
3696 * @param pVmcsInfo The VMCS info. object.
3697 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3698 */
3699static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3700{
3701 int rc = VINF_SUCCESS;
3702 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3703 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3704 uint32_t u32Val;
3705
3706 /*
3707 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3708 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3709 * neither are other host platforms.
3710 *
3711 * Committing this temporarily as it prevents BSOD.
3712 *
3713 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3714 */
3715#ifdef RT_OS_WINDOWS
3716 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3717 return VERR_HM_IPE_1;
3718#endif
3719
3720 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3721
3722#ifndef IN_NEM_DARWIN
3723 /*
3724 * We disable interrupts to make the updating of the state and in particular
3725 * the fExtrn modification atomic wrt to preemption hooks.
3726 */
3727 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3728#endif
3729
3730 fWhat &= pCtx->fExtrn;
3731 if (fWhat)
3732 {
3733 do
3734 {
3735 if (fWhat & CPUMCTX_EXTRN_RIP)
3736 vmxHCImportGuestRip(pVCpu);
3737
3738 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3739 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3740
3741 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3742 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3743 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3744
3745 if (fWhat & CPUMCTX_EXTRN_RSP)
3746 {
3747 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3748 AssertRC(rc);
3749 }
3750
3751 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3752 {
3753 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3754#ifndef IN_NEM_DARWIN
3755 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3756#else
3757 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3758#endif
3759 if (fWhat & CPUMCTX_EXTRN_CS)
3760 {
3761 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3762 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3763 if (fRealOnV86Active)
3764 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3765 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3766 }
3767 if (fWhat & CPUMCTX_EXTRN_SS)
3768 {
3769 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3770 if (fRealOnV86Active)
3771 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3772 }
3773 if (fWhat & CPUMCTX_EXTRN_DS)
3774 {
3775 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3776 if (fRealOnV86Active)
3777 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3778 }
3779 if (fWhat & CPUMCTX_EXTRN_ES)
3780 {
3781 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3782 if (fRealOnV86Active)
3783 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3784 }
3785 if (fWhat & CPUMCTX_EXTRN_FS)
3786 {
3787 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3788 if (fRealOnV86Active)
3789 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3790 }
3791 if (fWhat & CPUMCTX_EXTRN_GS)
3792 {
3793 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3794 if (fRealOnV86Active)
3795 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3796 }
3797 }
3798
3799 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3800 {
3801 if (fWhat & CPUMCTX_EXTRN_LDTR)
3802 vmxHCImportGuestLdtr(pVCpu);
3803
3804 if (fWhat & CPUMCTX_EXTRN_GDTR)
3805 {
3806 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3807 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3808 pCtx->gdtr.cbGdt = u32Val;
3809 }
3810
3811 /* Guest IDTR. */
3812 if (fWhat & CPUMCTX_EXTRN_IDTR)
3813 {
3814 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3815 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3816 pCtx->idtr.cbIdt = u32Val;
3817 }
3818
3819 /* Guest TR. */
3820 if (fWhat & CPUMCTX_EXTRN_TR)
3821 {
3822#ifndef IN_NEM_DARWIN
3823 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3824 don't need to import that one. */
3825 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3826#endif
3827 vmxHCImportGuestTr(pVCpu);
3828 }
3829 }
3830
3831 if (fWhat & CPUMCTX_EXTRN_DR7)
3832 {
3833#ifndef IN_NEM_DARWIN
3834 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3835#endif
3836 {
3837 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3838 AssertRC(rc);
3839 }
3840 }
3841
3842 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3843 {
3844 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3845 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3846 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3847 pCtx->SysEnter.cs = u32Val;
3848 }
3849
3850#ifndef IN_NEM_DARWIN
3851 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3852 {
3853 if ( pVM->hmr0.s.fAllow64BitGuests
3854 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3855 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3856 }
3857
3858 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3859 {
3860 if ( pVM->hmr0.s.fAllow64BitGuests
3861 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3862 {
3863 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3864 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3865 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3866 }
3867 }
3868
3869 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3870 {
3871 rc = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
3872 AssertRCReturn(rc, rc);
3873 }
3874#else
3875 NOREF(pVM);
3876#endif
3877
3878 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3879 {
3880 if (fWhat & CPUMCTX_EXTRN_CR0)
3881 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
3882
3883 if (fWhat & CPUMCTX_EXTRN_CR4)
3884 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
3885
3886 if (fWhat & CPUMCTX_EXTRN_CR3)
3887 vmxHCImportGuestCr3(pVCpu);
3888 }
3889
3890#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3891 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3892 {
3893 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3894 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3895 {
3896 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3897 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3898 if (RT_SUCCESS(rc))
3899 { /* likely */ }
3900 else
3901 break;
3902 }
3903 }
3904#endif
3905 } while (0);
3906
3907 if (RT_SUCCESS(rc))
3908 {
3909 /* Update fExtrn. */
3910 pCtx->fExtrn &= ~fWhat;
3911
3912 /* If everything has been imported, clear the HM keeper bit. */
3913 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3914 {
3915#ifndef IN_NEM_DARWIN
3916 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3917#else
3918 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3919#endif
3920 Assert(!pCtx->fExtrn);
3921 }
3922 }
3923 }
3924#ifndef IN_NEM_DARWIN
3925 else
3926 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3927
3928 /*
3929 * Restore interrupts.
3930 */
3931 ASMSetFlags(fEFlags);
3932#endif
3933
3934 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3935
3936 if (RT_SUCCESS(rc))
3937 { /* likely */ }
3938 else
3939 return rc;
3940
3941 /*
3942 * Honor any pending CR3 updates.
3943 *
3944 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3945 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3946 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3947 *
3948 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3949 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3950 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3951 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3952 *
3953 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3954 *
3955 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3956 */
3957 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3958#ifndef IN_NEM_DARWIN
3959 && VMMRZCallRing3IsEnabled(pVCpu)
3960#endif
3961 )
3962 {
3963 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3964 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3965 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3966 }
3967
3968 return VINF_SUCCESS;
3969}
3970
3971
3972/**
3973 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3974 *
3975 * @returns VBox status code.
3976 * @param pVCpu The cross context virtual CPU structure.
3977 * @param pVmcsInfo The VMCS info. object.
3978 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3979 * in NEM/darwin context.
3980 * @tparam a_fWhat What to import, zero or more bits from
3981 * HMVMX_CPUMCTX_EXTRN_ALL.
3982 */
3983template<uint64_t const a_fWhat>
3984static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3985{
3986 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3987 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3988 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3989 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3990
3991 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3992
3993 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3994
3995 /* RIP and RFLAGS may have been imported already by the post exit code
3996 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3997 of the code is skipping this part of the code. */
3998 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3999 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
4000 {
4001 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
4002 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
4003
4004 if (a_fWhat & CPUMCTX_EXTRN_RIP)
4005 {
4006 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
4007 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
4008 else
4009 vmxHCImportGuestCoreRip(pVCpu);
4010 }
4011 }
4012
4013 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
4014 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
4015 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
4016
4017 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
4018 {
4019 if (a_fWhat & CPUMCTX_EXTRN_CS)
4020 {
4021 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
4022 /** @todo try get rid of this carp, it smells and is probably never ever
4023 * used: */
4024 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
4025 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
4026 {
4027 vmxHCImportGuestCoreRip(pVCpu);
4028 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
4029 }
4030 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
4031 }
4032 if (a_fWhat & CPUMCTX_EXTRN_SS)
4033 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
4034 if (a_fWhat & CPUMCTX_EXTRN_DS)
4035 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
4036 if (a_fWhat & CPUMCTX_EXTRN_ES)
4037 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
4038 if (a_fWhat & CPUMCTX_EXTRN_FS)
4039 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
4040 if (a_fWhat & CPUMCTX_EXTRN_GS)
4041 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
4042
4043 /* Guest TR.
4044 Real-mode emulation using virtual-8086 mode has the fake TSS
4045 (pRealModeTSS) in TR, don't need to import that one. */
4046#ifndef IN_NEM_DARWIN
4047 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
4048 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
4049 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
4050#else
4051 if (a_fWhat & CPUMCTX_EXTRN_TR)
4052#endif
4053 vmxHCImportGuestTr(pVCpu);
4054
4055#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
4056 if (fRealOnV86Active)
4057 {
4058 if (a_fWhat & CPUMCTX_EXTRN_CS)
4059 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
4060 if (a_fWhat & CPUMCTX_EXTRN_SS)
4061 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
4062 if (a_fWhat & CPUMCTX_EXTRN_DS)
4063 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
4064 if (a_fWhat & CPUMCTX_EXTRN_ES)
4065 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
4066 if (a_fWhat & CPUMCTX_EXTRN_FS)
4067 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
4068 if (a_fWhat & CPUMCTX_EXTRN_GS)
4069 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
4070 }
4071#endif
4072 }
4073
4074 if (a_fWhat & CPUMCTX_EXTRN_RSP)
4075 {
4076 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
4077 AssertRC(rc);
4078 }
4079
4080 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
4081 vmxHCImportGuestLdtr(pVCpu);
4082
4083 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
4084 {
4085 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
4086 uint32_t u32Val;
4087 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
4088 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
4089 }
4090
4091 /* Guest IDTR. */
4092 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
4093 {
4094 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
4095 uint32_t u32Val;
4096 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
4097 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
4098 }
4099
4100 if (a_fWhat & CPUMCTX_EXTRN_DR7)
4101 {
4102#ifndef IN_NEM_DARWIN
4103 if (!pVCpu->hmr0.s.fUsingHyperDR7)
4104#endif
4105 {
4106 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
4107 AssertRC(rc);
4108 }
4109 }
4110
4111 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
4112 {
4113 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
4114 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
4115 uint32_t u32Val;
4116 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
4117 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
4118 }
4119
4120#ifndef IN_NEM_DARWIN
4121 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
4122 {
4123 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4124 && pVM->hmr0.s.fAllow64BitGuests)
4125 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
4126 }
4127
4128 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4129 {
4130 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4131 && pVM->hmr0.s.fAllow64BitGuests)
4132 {
4133 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4134 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4135 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4136 }
4137 }
4138
4139 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4140 {
4141 int const rc1 = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
4142 AssertRCReturn(rc1, rc1);
4143 }
4144#else
4145 NOREF(pVM);
4146#endif
4147
4148 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4149 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
4150
4151 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4152 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
4153
4154 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4155 vmxHCImportGuestCr3(pVCpu);
4156
4157#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4158 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4159 {
4160 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4161 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4162 {
4163 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4164 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4165 AssertRCReturn(rc, rc);
4166 }
4167 }
4168#endif
4169
4170 /* Update fExtrn. */
4171 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4172
4173 /* If everything has been imported, clear the HM keeper bit. */
4174 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4175 {
4176#ifndef IN_NEM_DARWIN
4177 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4178#else
4179 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4180#endif
4181 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4182 }
4183
4184 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4185
4186 /*
4187 * Honor any pending CR3 updates.
4188 *
4189 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4190 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4191 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4192 *
4193 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4194 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4195 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4196 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4197 *
4198 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4199 *
4200 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4201 */
4202#ifndef IN_NEM_DARWIN
4203 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4204 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4205 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4206 return VINF_SUCCESS;
4207 ASMSetFlags(fEFlags);
4208#else
4209 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4210 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4211 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4212 return VINF_SUCCESS;
4213 RT_NOREF_PV(fEFlags);
4214#endif
4215
4216 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4217 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4218 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4219 return VINF_SUCCESS;
4220}
4221
4222
4223/**
4224 * Internal state fetcher.
4225 *
4226 * @returns VBox status code.
4227 * @param pVCpu The cross context virtual CPU structure.
4228 * @param pVmcsInfo The VMCS info. object.
4229 * @param pszCaller For logging.
4230 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4231 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4232 * already. This is ORed together with @a a_fWhat when
4233 * calculating what needs fetching (just for safety).
4234 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4235 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4236 * already. This is ORed together with @a a_fWhat when
4237 * calculating what needs fetching (just for safety).
4238 */
4239template<uint64_t const a_fWhat,
4240 uint64_t const a_fDoneLocal = 0,
4241 uint64_t const a_fDonePostExit = 0
4242#ifndef IN_NEM_DARWIN
4243 | CPUMCTX_EXTRN_INHIBIT_INT
4244 | CPUMCTX_EXTRN_INHIBIT_NMI
4245# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4246 | HMVMX_CPUMCTX_EXTRN_ALL
4247# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4248 | CPUMCTX_EXTRN_RFLAGS
4249# endif
4250#else /* IN_NEM_DARWIN */
4251 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4252#endif /* IN_NEM_DARWIN */
4253>
4254DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4255{
4256 RT_NOREF_PV(pszCaller);
4257 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4258 {
4259#ifndef IN_NEM_DARWIN
4260 /*
4261 * We disable interrupts to make the updating of the state and in particular
4262 * the fExtrn modification atomic wrt to preemption hooks.
4263 */
4264 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4265#else
4266 RTCCUINTREG const fEFlags = 0;
4267#endif
4268
4269 /*
4270 * We combine all three parameters and take the (probably) inlined optimized
4271 * code path for the new things specified in a_fWhat.
4272 *
4273 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4274 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4275 * also take the streamlined path when both of these are cleared in fExtrn
4276 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4277 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4278 */
4279 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4280 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4281 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4282 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4283 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4284 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4285 {
4286 int const rc = vmxHCImportGuestStateInner< a_fWhat
4287 & HMVMX_CPUMCTX_EXTRN_ALL
4288 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4289#ifndef IN_NEM_DARWIN
4290 ASMSetFlags(fEFlags);
4291#endif
4292 return rc;
4293 }
4294
4295#ifndef IN_NEM_DARWIN
4296 ASMSetFlags(fEFlags);
4297#endif
4298
4299 /*
4300 * We shouldn't normally get here, but it may happen when executing
4301 * in the debug run-loops. Typically, everything should already have
4302 * been fetched then. Otherwise call the fallback state import function.
4303 */
4304 if (fWhatToDo == 0)
4305 { /* hope the cause was the debug loop or something similar */ }
4306 else
4307 {
4308 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4309 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4310 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4311 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4312 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4313 }
4314 }
4315 return VINF_SUCCESS;
4316}
4317
4318
4319/**
4320 * Check per-VM and per-VCPU force flag actions that require us to go back to
4321 * ring-3 for one reason or another.
4322 *
4323 * @returns Strict VBox status code (i.e. informational status codes too)
4324 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4325 * ring-3.
4326 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4327 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4328 * interrupts)
4329 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4330 * all EMTs to be in ring-3.
4331 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4332 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4333 * to the EM loop.
4334 *
4335 * @param pVCpu The cross context virtual CPU structure.
4336 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4337 * @param fStepping Whether we are single-stepping the guest using the
4338 * hypervisor debugger.
4339 *
4340 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4341 * is no longer in VMX non-root mode.
4342 */
4343static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4344{
4345#ifndef IN_NEM_DARWIN
4346 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4347#endif
4348
4349 /*
4350 * Update pending interrupts into the APIC's IRR.
4351 */
4352 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4353 APICUpdatePendingInterrupts(pVCpu);
4354
4355 /*
4356 * Anything pending? Should be more likely than not if we're doing a good job.
4357 */
4358 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4359 if ( !fStepping
4360 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4361 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4362 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4363 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4364 return VINF_SUCCESS;
4365
4366 /* Pending PGM C3 sync. */
4367 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4368 {
4369 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4370 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4371 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4372 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4373 if (rcStrict != VINF_SUCCESS)
4374 {
4375 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4376 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4377 return rcStrict;
4378 }
4379 }
4380
4381 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4382 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4383 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4384 {
4385 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4386 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4387 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4388 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4389 return rc;
4390 }
4391
4392 /* Pending VM request packets, such as hardware interrupts. */
4393 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4394 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4395 {
4396 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4397 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4398 return VINF_EM_PENDING_REQUEST;
4399 }
4400
4401 /* Pending PGM pool flushes. */
4402 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4403 {
4404 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4405 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4406 return VINF_PGM_POOL_FLUSH_PENDING;
4407 }
4408
4409 /* Pending DMA requests. */
4410 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4411 {
4412 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4413 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4414 return VINF_EM_RAW_TO_R3;
4415 }
4416
4417#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4418 /*
4419 * Pending nested-guest events.
4420 *
4421 * Please note the priority of these events are specified and important.
4422 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4423 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4424 *
4425 * Interrupt-window and NMI-window VM-exits for the nested-guest need not be
4426 * handled here. They'll be handled by the hardware while executing the nested-guest
4427 * or by us when we injecting events that are not part of VM-entry of the nested-guest.
4428 */
4429 if (fIsNestedGuest)
4430 {
4431 /* Pending nested-guest APIC-write (may or may not cause a VM-exit). */
4432 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4433 {
4434 Log4Func(("Pending nested-guest APIC-write\n"));
4435 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4436 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4437 if ( rcStrict == VINF_SUCCESS
4438 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4439 return rcStrict;
4440 }
4441
4442 /* Pending nested-guest monitor-trap flag (MTF). */
4443 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4444 {
4445 Log4Func(("Pending nested-guest MTF\n"));
4446 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4447 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4448 return rcStrict;
4449 }
4450
4451 /* Pending nested-guest VMX-preemption timer expired. */
4452 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4453 {
4454 Log4Func(("Pending nested-guest preempt timer\n"));
4455 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4456 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4457 return rcStrict;
4458 }
4459 }
4460#else
4461 NOREF(fIsNestedGuest);
4462#endif
4463
4464 return VINF_SUCCESS;
4465}
4466
4467
4468/**
4469 * Converts any TRPM trap into a pending HM event. This is typically used when
4470 * entering from ring-3 (not longjmp returns).
4471 *
4472 * @param pVCpu The cross context virtual CPU structure.
4473 */
4474static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4475{
4476 Assert(TRPMHasTrap(pVCpu));
4477 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4478
4479 uint8_t uVector;
4480 TRPMEVENT enmTrpmEvent;
4481 uint32_t uErrCode;
4482 RTGCUINTPTR GCPtrFaultAddress;
4483 uint8_t cbInstr;
4484 bool fIcebp;
4485
4486 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4487 AssertRC(rc);
4488
4489 uint32_t u32IntInfo;
4490 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4491 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4492
4493 rc = TRPMResetTrap(pVCpu);
4494 AssertRC(rc);
4495 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4496 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4497
4498 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4499}
4500
4501
4502/**
4503 * Converts the pending HM event into a TRPM trap.
4504 *
4505 * @param pVCpu The cross context virtual CPU structure.
4506 */
4507static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4508{
4509 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4510
4511 /* If a trap was already pending, we did something wrong! */
4512 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4513
4514 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4515 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4516 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4517
4518 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4519
4520 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4521 AssertRC(rc);
4522
4523 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4524 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4525
4526 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4527 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4528 else
4529 {
4530 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4531 switch (uVectorType)
4532 {
4533 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4534 TRPMSetTrapDueToIcebp(pVCpu);
4535 RT_FALL_THRU();
4536 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4537 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4538 {
4539 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4540 || ( uVector == X86_XCPT_BP /* INT3 */
4541 || uVector == X86_XCPT_OF /* INTO */
4542 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4543 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4544 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4545 break;
4546 }
4547 }
4548 }
4549
4550 /* We're now done converting the pending event. */
4551 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4552}
4553
4554
4555/**
4556 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4557 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4558 *
4559 * @param pVCpu The cross context virtual CPU structure.
4560 * @param pVmcsInfo The VMCS info. object.
4561 */
4562static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4563{
4564 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4565 {
4566 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4567 {
4568 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4569 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4570 AssertRC(rc);
4571 }
4572 Log4Func(("Enabled interrupt-window exiting\n"));
4573 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4574}
4575
4576
4577/**
4578 * Clears the interrupt-window exiting control in the VMCS.
4579 *
4580 * @param pVCpu The cross context virtual CPU structure.
4581 * @param pVmcsInfo The VMCS info. object.
4582 */
4583DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4584{
4585 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4586 {
4587 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4588 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4589 AssertRC(rc);
4590 Log4Func(("Disabled interrupt-window exiting\n"));
4591 }
4592}
4593
4594
4595/**
4596 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4597 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4598 *
4599 * @param pVCpu The cross context virtual CPU structure.
4600 * @param pVmcsInfo The VMCS info. object.
4601 */
4602static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4603{
4604 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4605 {
4606 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4607 {
4608 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4609 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4610 AssertRC(rc);
4611 Log4Func(("Enabled NMI-window exiting\n"));
4612 }
4613 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4614}
4615
4616
4617/**
4618 * Clears the NMI-window exiting control in the VMCS.
4619 *
4620 * @param pVCpu The cross context virtual CPU structure.
4621 * @param pVmcsInfo The VMCS info. object.
4622 */
4623DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4624{
4625 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4626 {
4627 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4628 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4629 AssertRC(rc);
4630 Log4Func(("Disabled NMI-window exiting\n"));
4631 }
4632}
4633
4634
4635/**
4636 * Injects an event into the guest upon VM-entry by updating the relevant fields
4637 * in the VM-entry area in the VMCS.
4638 *
4639 * @returns Strict VBox status code (i.e. informational status codes too).
4640 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4641 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4642 *
4643 * @param pVCpu The cross context virtual CPU structure.
4644 * @param pVmcsInfo The VMCS info object.
4645 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4646 * @param pEvent The event being injected.
4647 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4648 * will be updated if necessary. This cannot not be NULL.
4649 * @param fStepping Whether we're single-stepping guest execution and should
4650 * return VINF_EM_DBG_STEPPED if the event is injected
4651 * directly (registers modified by us, not by hardware on
4652 * VM-entry).
4653 */
4654static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4655 bool fStepping, uint32_t *pfIntrState)
4656{
4657 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4658 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4659 Assert(pfIntrState);
4660
4661#ifdef IN_NEM_DARWIN
4662 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4663#endif
4664
4665 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4666 uint32_t u32IntInfo = pEvent->u64IntInfo;
4667 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4668 uint32_t const cbInstr = pEvent->cbInstr;
4669 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4670 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4671 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4672
4673#ifdef VBOX_STRICT
4674 /*
4675 * Validate the error-code-valid bit for hardware exceptions.
4676 * No error codes for exceptions in real-mode.
4677 *
4678 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4679 */
4680 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4681 && !CPUMIsGuestInRealModeEx(pCtx))
4682 {
4683 switch (uVector)
4684 {
4685 case X86_XCPT_PF:
4686 case X86_XCPT_DF:
4687 case X86_XCPT_TS:
4688 case X86_XCPT_NP:
4689 case X86_XCPT_SS:
4690 case X86_XCPT_GP:
4691 case X86_XCPT_AC:
4692 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4693 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4694 RT_FALL_THRU();
4695 default:
4696 break;
4697 }
4698 }
4699
4700 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4701 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4702 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4703#endif
4704
4705 RT_NOREF(uVector);
4706 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4707 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4708 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4709 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4710 {
4711 Assert(uVector <= X86_XCPT_LAST);
4712 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4713 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4714 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4715 }
4716 else
4717 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4718
4719 /*
4720 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4721 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4722 * interrupt handler in the (real-mode) guest.
4723 *
4724 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4725 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4726 */
4727 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4728 {
4729#ifndef IN_NEM_DARWIN
4730 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4731#endif
4732 {
4733 /*
4734 * For CPUs with unrestricted guest execution enabled and with the guest
4735 * in real-mode, we must not set the deliver-error-code bit.
4736 *
4737 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4738 */
4739 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4740 }
4741#ifndef IN_NEM_DARWIN
4742 else
4743 {
4744 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4745 Assert(PDMVmmDevHeapIsEnabled(pVM));
4746 Assert(pVM->hm.s.vmx.pRealModeTSS);
4747 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4748
4749 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4750 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4751 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4752 AssertRCReturn(rc2, rc2);
4753
4754 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4755 size_t const cbIdtEntry = sizeof(X86IDTR16);
4756 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4757 {
4758 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4759 if (uVector == X86_XCPT_DF)
4760 return VINF_EM_RESET;
4761
4762 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4763 No error codes for exceptions in real-mode. */
4764 if (uVector == X86_XCPT_GP)
4765 {
4766 static HMEVENT const s_EventXcptDf
4767 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4768 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4769 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4770 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4771 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4772 }
4773
4774 /*
4775 * If we're injecting an event with no valid IDT entry, inject a #GP.
4776 * No error codes for exceptions in real-mode.
4777 *
4778 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4779 */
4780 static HMEVENT const s_EventXcptGp
4781 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4782 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4783 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4784 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4785 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4786 }
4787
4788 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4789 uint16_t uGuestIp = pCtx->ip;
4790 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4791 {
4792 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4793 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4794 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4795 }
4796 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4797 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4798
4799 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4800 X86IDTR16 IdtEntry;
4801 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4802 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4803 AssertRCReturn(rc2, rc2);
4804
4805 /* Construct the stack frame for the interrupt/exception handler. */
4806 VBOXSTRICTRC rcStrict;
4807 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, (uint16_t)pCtx->eflags.u);
4808 if (rcStrict == VINF_SUCCESS)
4809 {
4810 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4811 if (rcStrict == VINF_SUCCESS)
4812 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4813 }
4814
4815 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4816 if (rcStrict == VINF_SUCCESS)
4817 {
4818 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4819 pCtx->rip = IdtEntry.offSel;
4820 pCtx->cs.Sel = IdtEntry.uSel;
4821 pCtx->cs.ValidSel = IdtEntry.uSel;
4822 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4823 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4824 && uVector == X86_XCPT_PF)
4825 pCtx->cr2 = GCPtrFault;
4826
4827 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4828 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4829 | HM_CHANGED_GUEST_RSP);
4830
4831 /*
4832 * If we delivered a hardware exception (other than an NMI) and if there was
4833 * block-by-STI in effect, we should clear it.
4834 */
4835 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4836 {
4837 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4838 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4839 Log4Func(("Clearing inhibition due to STI\n"));
4840 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4841 }
4842
4843 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4844 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4845
4846 /*
4847 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4848 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4849 */
4850 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4851
4852 /*
4853 * If we eventually support nested-guest execution without unrestricted guest execution,
4854 * we should set fInterceptEvents here.
4855 */
4856 Assert(!fIsNestedGuest);
4857
4858 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4859 if (fStepping)
4860 rcStrict = VINF_EM_DBG_STEPPED;
4861 }
4862 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4863 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4864 return rcStrict;
4865 }
4866#else
4867 RT_NOREF(pVmcsInfo);
4868#endif
4869 }
4870
4871 /*
4872 * Validate.
4873 */
4874 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4875 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4876
4877 /*
4878 * Inject the event into the VMCS.
4879 */
4880 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4881 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4882 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4883 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4884 AssertRC(rc);
4885
4886 /*
4887 * Update guest CR2 if this is a page-fault.
4888 */
4889 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4890 pCtx->cr2 = GCPtrFault;
4891
4892 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4893 return VINF_SUCCESS;
4894}
4895
4896
4897/**
4898 * Evaluates the event to be delivered to the guest and sets it as the pending
4899 * event.
4900 *
4901 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4902 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4903 * NOT restore these force-flags.
4904 *
4905 * @returns Strict VBox status code (i.e. informational status codes too).
4906 * @param pVCpu The cross context virtual CPU structure.
4907 * @param pVmcsInfo The VMCS information structure.
4908 * @param pfIntrState Where to store the updated VMX guest-interruptibility
4909 * state.
4910 */
4911static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t *pfIntrState)
4912{
4913 Assert(pfIntrState);
4914 Assert(!TRPMHasTrap(pVCpu));
4915
4916 *pfIntrState = vmxHCGetGuestIntrStateWithUpdate(pVCpu);
4917
4918 /*
4919 * Evaluate if a new event needs to be injected.
4920 * An event that's already pending has already performed all necessary checks.
4921 */
4922 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4923 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
4924 {
4925 /** @todo SMI. SMIs take priority over NMIs. */
4926
4927 /*
4928 * NMIs.
4929 * NMIs take priority over external interrupts.
4930 */
4931 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4932 {
4933 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4934 {
4935 /* Finally, inject the NMI and we're done. */
4936 vmxHCSetPendingXcptNmi(pVCpu);
4937 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4938 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4939 return VINF_SUCCESS;
4940 }
4941 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4942 }
4943 else
4944 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4945
4946 /*
4947 * External interrupts (PIC/APIC).
4948 */
4949 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4950 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4951 {
4952 Assert(!DBGFIsStepping(pVCpu));
4953 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4954 AssertRC(rc);
4955
4956 if (pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF)
4957 {
4958 /*
4959 * Once PDMGetInterrupt() returns an interrupt we -must- deliver it.
4960 * We cannot re-request the interrupt from the controller again.
4961 */
4962 uint8_t u8Interrupt;
4963 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4964 if (RT_SUCCESS(rc))
4965 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4966 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4967 {
4968 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4969 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4970 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4971 /*
4972 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4973 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4974 * need to re-set this force-flag here.
4975 */
4976 }
4977 else
4978 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4979
4980 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4981 return VINF_SUCCESS;
4982 }
4983 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4984 }
4985 else
4986 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4987 }
4988 else
4989 {
4990 /*
4991 * An event is being injected or we are in an interrupt shadow.
4992 * If another event is pending currently, instruct VT-x to cause a VM-exit as
4993 * soon as the guest is ready to accept it.
4994 */
4995 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4996 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4997 else
4998 {
4999 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT));
5000 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5001 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5002 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
5003 else
5004 {
5005 /* It's possible that interrupt-window exiting is still active, clear it as it's now unnecessary. */
5006 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
5007 }
5008 }
5009 }
5010
5011 return VINF_SUCCESS;
5012}
5013
5014
5015#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5016/**
5017 * Evaluates the event to be delivered to the nested-guest and sets it as the
5018 * pending event.
5019 *
5020 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
5021 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
5022 * NOT restore these force-flags.
5023 *
5024 * @returns Strict VBox status code (i.e. informational status codes too).
5025 * @param pVCpu The cross context virtual CPU structure.
5026 * @param pVmcsInfo The VMCS information structure.
5027 * @param pfIntrState Where to store the updated VMX guest-interruptibility
5028 * state.
5029 *
5030 * @remarks The guest must be in VMX non-root mode.
5031 */
5032static VBOXSTRICTRC vmxHCEvaluatePendingEventNested(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t *pfIntrState)
5033{
5034 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5035
5036 Assert(pfIntrState);
5037 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
5038 Assert(!TRPMHasTrap(pVCpu));
5039
5040 *pfIntrState = vmxHCGetGuestIntrStateWithUpdate(pVCpu);
5041
5042 /*
5043 * If we are injecting an event, all necessary checks have been performed.
5044 * Any interrupt-window or NMI-window exiting would have been setup by the
5045 * nested-guest while we merged controls.
5046 */
5047 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5048 return VINF_SUCCESS;
5049
5050 /*
5051 * An event injected by VMLAUNCH/VMRESUME instruction emulation should've been
5052 * made pending (TRPM to HM event) and would be handled above if we resumed
5053 * execution in HM. If somehow we fell back to emulation after the
5054 * VMLAUNCH/VMRESUME instruction, it would have been handled in iemRaiseXcptOrInt
5055 * (calling iemVmxVmexitEvent). Thus, if we get here the nested-hypervisor's VMX
5056 * intercepts should be active and any events pending here have been generated
5057 * while executing the guest in VMX non-root mode after virtual VM-entry completed.
5058 */
5059 Assert(CPUMIsGuestVmxInterceptEvents(pCtx));
5060
5061 /*
5062 * Interrupt shadows MAY block NMIs.
5063 * They also blocks external-interrupts and MAY block external-interrupt VM-exits.
5064 *
5065 * See Intel spec. 24.4.2 "Guest Non-Register State".
5066 * See Intel spec. 25.4.1 "Event Blocking".
5067 */
5068 if (!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
5069 { /* likely */ }
5070 else
5071 return VINF_SUCCESS;
5072
5073 /** @todo SMI. SMIs take priority over NMIs. */
5074
5075 /*
5076 * NMIs.
5077 * NMIs take priority over interrupts.
5078 */
5079 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
5080 {
5081 /*
5082 * Nested-guest NMI-window exiting.
5083 * The NMI-window exit must happen regardless of whether an NMI is pending
5084 * provided virtual-NMI blocking is not in effect.
5085 *
5086 * See Intel spec. 25.2 "Other Causes Of VM Exits".
5087 */
5088 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
5089 && !CPUMIsGuestVmxVirtNmiBlocking(pCtx))
5090 {
5091 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
5092 return IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
5093 }
5094
5095 /*
5096 * For a nested-guest, the FF always indicates the outer guest's ability to
5097 * receive an NMI while the guest-interruptibility state bit depends on whether
5098 * the nested-hypervisor is using virtual-NMIs.
5099 *
5100 * It is very important that we also clear the force-flag if we are causing
5101 * an NMI VM-exit as it is the responsibility of the nested-hypervisor to deal
5102 * with re-injecting or discarding the NMI. This fixes the bug that showed up
5103 * with SMP Windows Server 2008 R2 with Hyper-V enabled, see @bugref{10318#c19}.
5104 */
5105 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5106 {
5107 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
5108 return IEMExecVmxVmexitXcptNmi(pVCpu);
5109 vmxHCSetPendingXcptNmi(pVCpu);
5110 return VINF_SUCCESS;
5111 }
5112 }
5113
5114 /*
5115 * Nested-guest interrupt-window exiting.
5116 *
5117 * We must cause the interrupt-window exit regardless of whether an interrupt is pending
5118 * provided virtual interrupts are enabled.
5119 *
5120 * See Intel spec. 25.2 "Other Causes Of VM Exits".
5121 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5122 */
5123 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
5124 && CPUMIsGuestVmxVirtIntrEnabled(pCtx))
5125 {
5126 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
5127 return IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
5128 }
5129
5130 /*
5131 * External interrupts (PIC/APIC).
5132 *
5133 * When "External interrupt exiting" is set the VM-exit happens regardless of RFLAGS.IF.
5134 * When it isn't set, RFLAGS.IF controls delivery of the interrupt as always.
5135 * This fixes a nasty SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued
5136 * by other VM-exits (like a preemption timer), see @bugref{9562#c18}.
5137 *
5138 * NMIs block external interrupts as they are dispatched through the interrupt gate (vector 2)
5139 * which automatically clears EFLAGS.IF. Also it's possible an NMI handler could enable interrupts
5140 * and thus we should not check for NMI inhibition here.
5141 *
5142 * See Intel spec. 25.4.1 "Event Blocking".
5143 * See Intel spec. 6.8.1 "Masking Maskable Hardware Interrupts".
5144 */
5145 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5146 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5147 {
5148 Assert(!DBGFIsStepping(pVCpu));
5149 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
5150 AssertRC(rc);
5151 if (CPUMIsGuestVmxPhysIntrEnabled(pCtx))
5152 {
5153 /* Nested-guest external interrupt VM-exit. */
5154 if ( CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
5155 && !CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
5156 {
5157 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
5158 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5159 return rcStrict;
5160 }
5161
5162 /*
5163 * Fetch the external interrupt from the interrupt controller.
5164 * Once PDMGetInterrupt() returns an interrupt we -must- deliver it or pass it to
5165 * the nested-hypervisor. We cannot re-request the interrupt from the controller again.
5166 */
5167 uint8_t u8Interrupt;
5168 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5169 if (RT_SUCCESS(rc))
5170 {
5171 /* Nested-guest external interrupt VM-exit when the "acknowledge interrupt on exit" is enabled. */
5172 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5173 {
5174 Assert(CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT));
5175 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
5176 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5177 return rcStrict;
5178 }
5179 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
5180 return VINF_SUCCESS;
5181 }
5182 }
5183 }
5184 return VINF_SUCCESS;
5185}
5186#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5187
5188
5189/**
5190 * Injects any pending events into the guest if the guest is in a state to
5191 * receive them.
5192 *
5193 * @returns Strict VBox status code (i.e. informational status codes too).
5194 * @param pVCpu The cross context virtual CPU structure.
5195 * @param pVmcsInfo The VMCS information structure.
5196 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5197 * @param fIntrState The VT-x guest-interruptibility state.
5198 * @param fStepping Whether we are single-stepping the guest using the
5199 * hypervisor debugger and should return
5200 * VINF_EM_DBG_STEPPED if the event was dispatched
5201 * directly.
5202 */
5203static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5204 uint32_t fIntrState, bool fStepping)
5205{
5206 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5207#ifndef IN_NEM_DARWIN
5208 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5209#endif
5210
5211#ifdef VBOX_STRICT
5212 /*
5213 * Verify guest-interruptibility state.
5214 *
5215 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5216 * since injecting an event may modify the interruptibility state and we must thus always
5217 * use fIntrState.
5218 */
5219 {
5220 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5221 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5222 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5223 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5224 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5225 Assert(!TRPMHasTrap(pVCpu));
5226 NOREF(fBlockMovSS); NOREF(fBlockSti);
5227 }
5228#endif
5229
5230 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5231 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5232 {
5233 /*
5234 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5235 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5236 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5237 *
5238 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5239 */
5240 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5241#ifdef VBOX_STRICT
5242 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5243 {
5244 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
5245 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5246 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5247 }
5248 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5249 {
5250 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5251 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5252 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5253 }
5254#endif
5255 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5256 uIntType));
5257
5258 /*
5259 * Inject the event and get any changes to the guest-interruptibility state.
5260 *
5261 * The guest-interruptibility state may need to be updated if we inject the event
5262 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5263 */
5264 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5265 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5266
5267 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5268 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5269 else
5270 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5271 }
5272
5273 /*
5274 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5275 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5276 */
5277 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5278 && !fIsNestedGuest)
5279 {
5280 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5281
5282 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5283 {
5284 /*
5285 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5286 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5287 */
5288 Assert(!DBGFIsStepping(pVCpu));
5289 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_TF);
5290 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
5291 fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5292 AssertRC(rc);
5293 }
5294 else
5295 {
5296 /*
5297 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5298 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5299 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5300 * we use MTF, so just make sure it's called before executing guest-code.
5301 */
5302 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5303 }
5304 }
5305 /* else: for nested-guest currently handling while merging controls. */
5306
5307 /*
5308 * Finally, update the guest-interruptibility state.
5309 *
5310 * This is required for the real-on-v86 software interrupt injection, for
5311 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5312 */
5313 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5314 AssertRC(rc);
5315
5316 /*
5317 * There's no need to clear the VM-entry interruption-information field here if we're not
5318 * injecting anything. VT-x clears the valid bit on every VM-exit.
5319 *
5320 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5321 */
5322
5323 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5324 return rcStrict;
5325}
5326
5327
5328/**
5329 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5330 * and update error record fields accordingly.
5331 *
5332 * @returns VMX_IGS_* error codes.
5333 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5334 * wrong with the guest state.
5335 *
5336 * @param pVCpu The cross context virtual CPU structure.
5337 * @param pVmcsInfo The VMCS info. object.
5338 *
5339 * @remarks This function assumes our cache of the VMCS controls
5340 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5341 */
5342static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5343{
5344#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5345#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { uError = (err); break; } else do { } while (0)
5346
5347 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5348 uint32_t uError = VMX_IGS_ERROR;
5349 uint32_t u32IntrState = 0;
5350#ifndef IN_NEM_DARWIN
5351 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5352 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5353#else
5354 bool const fUnrestrictedGuest = true;
5355#endif
5356 do
5357 {
5358 int rc;
5359
5360 /*
5361 * Guest-interruptibility state.
5362 *
5363 * Read this first so that any check that fails prior to those that actually
5364 * require the guest-interruptibility state would still reflect the correct
5365 * VMCS value and avoids causing further confusion.
5366 */
5367 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5368 AssertRC(rc);
5369
5370 uint32_t u32Val;
5371 uint64_t u64Val;
5372
5373 /*
5374 * CR0.
5375 */
5376 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5377 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5378 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5379 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5380 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5381 if (fUnrestrictedGuest)
5382 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5383
5384 uint64_t u64GuestCr0;
5385 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5386 AssertRC(rc);
5387 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5388 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5389 if ( !fUnrestrictedGuest
5390 && (u64GuestCr0 & X86_CR0_PG)
5391 && !(u64GuestCr0 & X86_CR0_PE))
5392 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5393
5394 /*
5395 * CR4.
5396 */
5397 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5398 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5399 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5400
5401 uint64_t u64GuestCr4;
5402 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5403 AssertRC(rc);
5404 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5405 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5406
5407 /*
5408 * IA32_DEBUGCTL MSR.
5409 */
5410 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5411 AssertRC(rc);
5412 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5413 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5414 {
5415 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5416 }
5417 uint64_t u64DebugCtlMsr = u64Val;
5418
5419#ifdef VBOX_STRICT
5420 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5421 AssertRC(rc);
5422 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5423#endif
5424 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5425
5426 /*
5427 * RIP and RFLAGS.
5428 */
5429 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5430 AssertRC(rc);
5431 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5432 if ( !fLongModeGuest
5433 || !pCtx->cs.Attr.n.u1Long)
5434 {
5435 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5436 }
5437 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5438 * must be identical if the "IA-32e mode guest" VM-entry
5439 * control is 1 and CS.L is 1. No check applies if the
5440 * CPU supports 64 linear-address bits. */
5441
5442 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5443 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5444 AssertRC(rc);
5445 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5446 VMX_IGS_RFLAGS_RESERVED);
5447 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5448 uint32_t const u32Eflags = u64Val;
5449
5450 if ( fLongModeGuest
5451 || ( fUnrestrictedGuest
5452 && !(u64GuestCr0 & X86_CR0_PE)))
5453 {
5454 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5455 }
5456
5457 uint32_t u32EntryInfo;
5458 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5459 AssertRC(rc);
5460 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5461 {
5462 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5463 }
5464
5465 /*
5466 * 64-bit checks.
5467 */
5468 if (fLongModeGuest)
5469 {
5470 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5471 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5472 }
5473
5474 if ( !fLongModeGuest
5475 && (u64GuestCr4 & X86_CR4_PCIDE))
5476 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5477
5478 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5479 * 51:32 beyond the processor's physical-address width are 0. */
5480
5481 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5482 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5483 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5484
5485#ifndef IN_NEM_DARWIN
5486 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5487 AssertRC(rc);
5488 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5489
5490 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5491 AssertRC(rc);
5492 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5493#endif
5494
5495 /*
5496 * PERF_GLOBAL MSR.
5497 */
5498 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5499 {
5500 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5501 AssertRC(rc);
5502 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5503 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5504 }
5505
5506 /*
5507 * PAT MSR.
5508 */
5509 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5510 {
5511 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5512 AssertRC(rc);
5513 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5514 for (unsigned i = 0; i < 8; i++)
5515 {
5516 uint8_t u8Val = (u64Val & 0xff);
5517 if ( u8Val > MSR_IA32_PAT_MT_UCD
5518 || u8Val == MSR_IA32_PAT_MT_RSVD_2
5519 || u8Val == MSR_IA32_PAT_MT_RSVD_3)
5520 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5521 u64Val >>= 8;
5522 }
5523 }
5524
5525 /*
5526 * EFER MSR.
5527 */
5528 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5529 {
5530 Assert(g_fHmVmxSupportsVmcsEfer);
5531 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5532 AssertRC(rc);
5533 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5534 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5535 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5536 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5537 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5538 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5539 * iemVmxVmentryCheckGuestState(). */
5540 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5541 || !(u64GuestCr0 & X86_CR0_PG)
5542 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5543 VMX_IGS_EFER_LMA_LME_MISMATCH);
5544 }
5545
5546 /*
5547 * Segment registers.
5548 */
5549 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5550 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5551 if (!(u32Eflags & X86_EFL_VM))
5552 {
5553 /* CS */
5554 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5555 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5556 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5557 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5558 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5559 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5560 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5561 /* CS cannot be loaded with NULL in protected mode. */
5562 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5563 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5564 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5565 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5566 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5567 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5568 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5569 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5570 else
5571 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5572
5573 /* SS */
5574 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5575 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5576 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5577 if ( !(pCtx->cr0 & X86_CR0_PE)
5578 || pCtx->cs.Attr.n.u4Type == 3)
5579 {
5580 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5581 }
5582
5583 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5584 {
5585 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5586 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5587 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5588 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5589 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5590 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5591 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5592 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5593 }
5594
5595 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5596 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5597 {
5598 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5599 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5600 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5601 || pCtx->ds.Attr.n.u4Type > 11
5602 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5603 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5604 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5605 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5606 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5607 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5608 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5609 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5610 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5611 }
5612 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5613 {
5614 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5615 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5616 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5617 || pCtx->es.Attr.n.u4Type > 11
5618 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5619 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5620 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5621 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5622 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5623 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5624 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5625 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5626 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5627 }
5628 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5629 {
5630 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5631 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5632 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5633 || pCtx->fs.Attr.n.u4Type > 11
5634 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5635 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5636 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5637 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5638 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5639 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5640 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5641 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5642 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5643 }
5644 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5645 {
5646 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5647 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5648 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5649 || pCtx->gs.Attr.n.u4Type > 11
5650 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5651 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5652 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5653 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5654 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5655 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5656 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5657 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5658 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5659 }
5660 /* 64-bit capable CPUs. */
5661 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5662 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5663 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5664 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5665 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5666 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5667 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5668 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5669 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5670 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5671 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5672 }
5673 else
5674 {
5675 /* V86 mode checks. */
5676 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5677 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5678 {
5679 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5680 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5681 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5682 }
5683 else
5684 {
5685 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5686 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5687 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5688 }
5689
5690 /* CS */
5691 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5692 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5693 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5694 /* SS */
5695 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5696 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5697 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5698 /* DS */
5699 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5700 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5701 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5702 /* ES */
5703 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5704 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5705 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5706 /* FS */
5707 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5708 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5709 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5710 /* GS */
5711 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5712 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5713 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5714 /* 64-bit capable CPUs. */
5715 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5716 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5717 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5718 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5719 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5720 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5721 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5722 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5723 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5724 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5725 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5726 }
5727
5728 /*
5729 * TR.
5730 */
5731 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5732 /* 64-bit capable CPUs. */
5733 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5734 if (fLongModeGuest)
5735 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5736 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5737 else
5738 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5739 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5740 VMX_IGS_TR_ATTR_TYPE_INVALID);
5741 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5742 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5743 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5744 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5745 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5746 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5747 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5748 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5749
5750 /*
5751 * GDTR and IDTR (64-bit capable checks).
5752 */
5753 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5754 AssertRC(rc);
5755 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5756
5757 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5758 AssertRC(rc);
5759 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5760
5761 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5762 AssertRC(rc);
5763 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5764
5765 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5766 AssertRC(rc);
5767 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5768
5769 /*
5770 * Guest Non-Register State.
5771 */
5772 /* Activity State. */
5773 uint32_t u32ActivityState;
5774 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5775 AssertRC(rc);
5776 HMVMX_CHECK_BREAK( !u32ActivityState
5777 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5778 VMX_IGS_ACTIVITY_STATE_INVALID);
5779 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5780 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5781
5782 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5783 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5784 {
5785 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5786 }
5787
5788 /** @todo Activity state and injecting interrupts. Left as a todo since we
5789 * currently don't use activity states but ACTIVE. */
5790
5791 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5792 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5793
5794 /* Guest interruptibility-state. */
5795 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5796 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5797 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5798 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5799 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5800 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5801 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5802 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5803 {
5804 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5805 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5806 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5807 }
5808 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5809 {
5810 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5811 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5812 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5813 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5814 }
5815 /** @todo Assumes the processor is not in SMM. */
5816 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5817 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5818 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5819 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5820 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5821 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5822 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5823 {
5824 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5825 }
5826
5827 /* Pending debug exceptions. */
5828 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5829 AssertRC(rc);
5830 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5831 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5832 u32Val = u64Val; /* For pending debug exceptions checks below. */
5833
5834 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5835 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5836 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5837 {
5838 if ( (u32Eflags & X86_EFL_TF)
5839 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5840 {
5841 /* Bit 14 is PendingDebug.BS. */
5842 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5843 }
5844 if ( !(u32Eflags & X86_EFL_TF)
5845 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5846 {
5847 /* Bit 14 is PendingDebug.BS. */
5848 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5849 }
5850 }
5851
5852#ifndef IN_NEM_DARWIN
5853 /* VMCS link pointer. */
5854 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5855 AssertRC(rc);
5856 if (u64Val != UINT64_C(0xffffffffffffffff))
5857 {
5858 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5859 /** @todo Bits beyond the processor's physical-address width MBZ. */
5860 /** @todo SMM checks. */
5861 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5862 Assert(pVmcsInfo->pvShadowVmcs);
5863 VMXVMCSREVID VmcsRevId;
5864 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5865 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5866 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5867 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5868 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5869 }
5870
5871 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5872 * not using nested paging? */
5873 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5874 && !fLongModeGuest
5875 && CPUMIsGuestInPAEModeEx(pCtx))
5876 {
5877 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5878 AssertRC(rc);
5879 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5880
5881 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5882 AssertRC(rc);
5883 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5884
5885 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5886 AssertRC(rc);
5887 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5888
5889 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5890 AssertRC(rc);
5891 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5892 }
5893#endif
5894
5895 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5896 if (uError == VMX_IGS_ERROR)
5897 uError = VMX_IGS_REASON_NOT_FOUND;
5898 } while (0);
5899
5900 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5901 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5902 return uError;
5903
5904#undef HMVMX_ERROR_BREAK
5905#undef HMVMX_CHECK_BREAK
5906}
5907
5908
5909#ifndef HMVMX_USE_FUNCTION_TABLE
5910/**
5911 * Handles a guest VM-exit from hardware-assisted VMX execution.
5912 *
5913 * @returns Strict VBox status code (i.e. informational status codes too).
5914 * @param pVCpu The cross context virtual CPU structure.
5915 * @param pVmxTransient The VMX-transient structure.
5916 */
5917DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5918{
5919#ifdef DEBUG_ramshankar
5920# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5921 do { \
5922 if (a_fSave != 0) \
5923 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5924 VBOXSTRICTRC rcStrict = a_CallExpr; \
5925 if (a_fSave != 0) \
5926 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5927 return rcStrict; \
5928 } while (0)
5929#else
5930# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5931#endif
5932 uint32_t const uExitReason = pVmxTransient->uExitReason;
5933 switch (uExitReason)
5934 {
5935 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5936 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5937 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5938 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5939 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5940 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5941 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5942 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5943 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5944 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5945 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5946 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5947 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5948 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5949 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5950 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5951 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5952 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5953 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5954 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5955 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5956 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5957 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5958 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5959 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5960 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5961 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5962 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5963 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5964 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5965#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5966 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5967 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5968 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5969 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5970 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5971 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5972 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5973 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5974 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5975 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5976#else
5977 case VMX_EXIT_VMCLEAR:
5978 case VMX_EXIT_VMLAUNCH:
5979 case VMX_EXIT_VMPTRLD:
5980 case VMX_EXIT_VMPTRST:
5981 case VMX_EXIT_VMREAD:
5982 case VMX_EXIT_VMRESUME:
5983 case VMX_EXIT_VMWRITE:
5984 case VMX_EXIT_VMXOFF:
5985 case VMX_EXIT_VMXON:
5986 case VMX_EXIT_INVVPID:
5987 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5988#endif
5989#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5990 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5991#else
5992 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5993#endif
5994
5995 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5996 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5997 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5998
5999 case VMX_EXIT_INIT_SIGNAL:
6000 case VMX_EXIT_SIPI:
6001 case VMX_EXIT_IO_SMI:
6002 case VMX_EXIT_SMI:
6003 case VMX_EXIT_ERR_MSR_LOAD:
6004 case VMX_EXIT_ERR_MACHINE_CHECK:
6005 case VMX_EXIT_PML_FULL:
6006 case VMX_EXIT_VIRTUALIZED_EOI:
6007 case VMX_EXIT_GDTR_IDTR_ACCESS:
6008 case VMX_EXIT_LDTR_TR_ACCESS:
6009 case VMX_EXIT_APIC_WRITE:
6010 case VMX_EXIT_RDRAND:
6011 case VMX_EXIT_RSM:
6012 case VMX_EXIT_VMFUNC:
6013 case VMX_EXIT_ENCLS:
6014 case VMX_EXIT_RDSEED:
6015 case VMX_EXIT_XSAVES:
6016 case VMX_EXIT_XRSTORS:
6017 case VMX_EXIT_UMWAIT:
6018 case VMX_EXIT_TPAUSE:
6019 case VMX_EXIT_LOADIWKEY:
6020 default:
6021 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6022 }
6023#undef VMEXIT_CALL_RET
6024}
6025#endif /* !HMVMX_USE_FUNCTION_TABLE */
6026
6027
6028#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6029/**
6030 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
6031 *
6032 * @returns Strict VBox status code (i.e. informational status codes too).
6033 * @param pVCpu The cross context virtual CPU structure.
6034 * @param pVmxTransient The VMX-transient structure.
6035 */
6036DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6037{
6038#ifdef DEBUG_ramshankar
6039# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
6040 do { \
6041 if (a_fSave != 0) \
6042 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
6043 VBOXSTRICTRC rcStrict = a_CallExpr; \
6044 return rcStrict; \
6045 } while (0)
6046#else
6047# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
6048#endif
6049
6050 uint32_t const uExitReason = pVmxTransient->uExitReason;
6051 switch (uExitReason)
6052 {
6053# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6054 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient));
6055 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolationNested(pVCpu, pVmxTransient));
6056# else
6057 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
6058 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
6059# endif
6060 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient));
6061 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstrNested(pVCpu, pVmxTransient));
6062 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHltNested(pVCpu, pVmxTransient));
6063
6064 /*
6065 * We shouldn't direct host physical interrupts to the nested-guest.
6066 */
6067 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
6068
6069 /*
6070 * Instructions that cause VM-exits unconditionally or the condition is
6071 * always taken solely from the nested hypervisor (meaning if the VM-exit
6072 * happens, it's guaranteed to be a nested-guest VM-exit).
6073 *
6074 * - Provides VM-exit instruction length ONLY.
6075 */
6076 case VMX_EXIT_CPUID: /* Unconditional. */
6077 case VMX_EXIT_VMCALL:
6078 case VMX_EXIT_GETSEC:
6079 case VMX_EXIT_INVD:
6080 case VMX_EXIT_XSETBV:
6081 case VMX_EXIT_VMLAUNCH:
6082 case VMX_EXIT_VMRESUME:
6083 case VMX_EXIT_VMXOFF:
6084 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
6085 case VMX_EXIT_VMFUNC:
6086 VMEXIT_CALL_RET(0, vmxHCExitInstrNested(pVCpu, pVmxTransient));
6087
6088 /*
6089 * Instructions that cause VM-exits unconditionally or the condition is
6090 * always taken solely from the nested hypervisor (meaning if the VM-exit
6091 * happens, it's guaranteed to be a nested-guest VM-exit).
6092 *
6093 * - Provides VM-exit instruction length.
6094 * - Provides VM-exit information.
6095 * - Optionally provides Exit qualification.
6096 *
6097 * Since Exit qualification is 0 for all VM-exits where it is not
6098 * applicable, reading and passing it to the guest should produce
6099 * defined behavior.
6100 *
6101 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
6102 */
6103 case VMX_EXIT_INVEPT: /* Unconditional. */
6104 case VMX_EXIT_INVVPID:
6105 case VMX_EXIT_VMCLEAR:
6106 case VMX_EXIT_VMPTRLD:
6107 case VMX_EXIT_VMPTRST:
6108 case VMX_EXIT_VMXON:
6109 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
6110 case VMX_EXIT_LDTR_TR_ACCESS:
6111 case VMX_EXIT_RDRAND:
6112 case VMX_EXIT_RDSEED:
6113 case VMX_EXIT_XSAVES:
6114 case VMX_EXIT_XRSTORS:
6115 case VMX_EXIT_UMWAIT:
6116 case VMX_EXIT_TPAUSE:
6117 VMEXIT_CALL_RET(0, vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient));
6118
6119 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtscNested(pVCpu, pVmxTransient));
6120 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscpNested(pVCpu, pVmxTransient));
6121 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsrNested(pVCpu, pVmxTransient));
6122 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsrNested(pVCpu, pVmxTransient));
6123 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpgNested(pVCpu, pVmxTransient));
6124 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcidNested(pVCpu, pVmxTransient));
6125 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient));
6126 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvdNested(pVCpu, pVmxTransient));
6127 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtfNested(pVCpu, pVmxTransient));
6128 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccessNested(pVCpu, pVmxTransient));
6129 case VMX_EXIT_APIC_WRITE: VMEXIT_CALL_RET(0, vmxHCExitApicWriteNested(pVCpu, pVmxTransient));
6130 case VMX_EXIT_VIRTUALIZED_EOI: VMEXIT_CALL_RET(0, vmxHCExitVirtEoiNested(pVCpu, pVmxTransient));
6131 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRxNested(pVCpu, pVmxTransient));
6132 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindowNested(pVCpu, pVmxTransient));
6133 case VMX_EXIT_NMI_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitNmiWindowNested(pVCpu, pVmxTransient));
6134 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient));
6135 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwaitNested(pVCpu, pVmxTransient));
6136 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitorNested(pVCpu, pVmxTransient));
6137 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPauseNested(pVCpu, pVmxTransient));
6138
6139 case VMX_EXIT_PREEMPT_TIMER:
6140 {
6141 /** @todo NSTVMX: Preempt timer. */
6142 VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
6143 }
6144
6145 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRxNested(pVCpu, pVmxTransient));
6146 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmcNested(pVCpu, pVmxTransient));
6147
6148 case VMX_EXIT_VMREAD:
6149 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient));
6150
6151 case VMX_EXIT_TRIPLE_FAULT: VMEXIT_CALL_RET(0, vmxHCExitTripleFaultNested(pVCpu, pVmxTransient));
6152 case VMX_EXIT_ERR_INVALID_GUEST_STATE: VMEXIT_CALL_RET(0, vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient));
6153
6154 case VMX_EXIT_INIT_SIGNAL:
6155 case VMX_EXIT_SIPI:
6156 case VMX_EXIT_IO_SMI:
6157 case VMX_EXIT_SMI:
6158 case VMX_EXIT_ERR_MSR_LOAD:
6159 case VMX_EXIT_ERR_MACHINE_CHECK:
6160 case VMX_EXIT_PML_FULL:
6161 case VMX_EXIT_RSM:
6162 default:
6163 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6164 }
6165#undef VMEXIT_CALL_RET
6166}
6167#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6168
6169
6170/** @name VM-exit helpers.
6171 * @{
6172 */
6173/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6174/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6175/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6176
6177/** Macro for VM-exits called unexpectedly. */
6178#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
6179 do { \
6180 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
6181 return VERR_VMX_UNEXPECTED_EXIT; \
6182 } while (0)
6183
6184#ifdef VBOX_STRICT
6185# ifndef IN_NEM_DARWIN
6186/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6187# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6188 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6189
6190# define HMVMX_ASSERT_PREEMPT_CPUID() \
6191 do { \
6192 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6193 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6194 } while (0)
6195
6196# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6197 do { \
6198 AssertPtr((a_pVCpu)); \
6199 AssertPtr((a_pVmxTransient)); \
6200 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6201 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6202 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6203 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6204 Assert((a_pVmxTransient)->pVmcsInfo); \
6205 Assert(ASMIntAreEnabled()); \
6206 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6207 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6208 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6209 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6210 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6211 HMVMX_ASSERT_PREEMPT_CPUID(); \
6212 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6213 } while (0)
6214# else
6215# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6216# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6217# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6218 do { \
6219 AssertPtr((a_pVCpu)); \
6220 AssertPtr((a_pVmxTransient)); \
6221 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6222 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6223 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6224 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6225 Assert((a_pVmxTransient)->pVmcsInfo); \
6226 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6227 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6228 } while (0)
6229# endif
6230
6231# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6232 do { \
6233 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6234 Assert((a_pVmxTransient)->fIsNestedGuest); \
6235 } while (0)
6236
6237# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6238 do { \
6239 Log4Func(("\n")); \
6240 } while (0)
6241#else
6242# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6243 do { \
6244 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6245 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6246 } while (0)
6247
6248# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6249 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6250
6251# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6252#endif
6253
6254#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6255/** Macro that does the necessary privilege checks and intercepted VM-exits for
6256 * guests that attempted to execute a VMX instruction. */
6257# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6258 do \
6259 { \
6260 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6261 if (rcStrictTmp == VINF_SUCCESS) \
6262 { /* likely */ } \
6263 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6264 { \
6265 Assert((a_pVCpu)->hm.s.Event.fPending); \
6266 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6267 return VINF_SUCCESS; \
6268 } \
6269 else \
6270 { \
6271 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6272 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6273 } \
6274 } while (0)
6275
6276/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6277# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6278 do \
6279 { \
6280 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6281 (a_pGCPtrEffAddr)); \
6282 if (rcStrictTmp == VINF_SUCCESS) \
6283 { /* likely */ } \
6284 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6285 { \
6286 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6287 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6288 NOREF(uXcptTmp); \
6289 return VINF_SUCCESS; \
6290 } \
6291 else \
6292 { \
6293 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6294 return rcStrictTmp; \
6295 } \
6296 } while (0)
6297#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6298
6299
6300/**
6301 * Advances the guest RIP by the specified number of bytes.
6302 *
6303 * @param pVCpu The cross context virtual CPU structure.
6304 * @param cbInstr Number of bytes to advance the RIP by.
6305 *
6306 * @remarks No-long-jump zone!!!
6307 */
6308DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6309{
6310 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
6311
6312 /*
6313 * Advance RIP.
6314 *
6315 * The upper 32 bits are only set when in 64-bit mode, so we have to detect
6316 * when the addition causes a "carry" into the upper half and check whether
6317 * we're in 64-bit and can go on with it or wether we should zap the top
6318 * half. (Note! The 8086, 80186 and 80286 emulation is done exclusively in
6319 * IEM, so we don't need to bother with pre-386 16-bit wraparound.)
6320 *
6321 * See PC wrap around tests in bs3-cpu-weird-1.
6322 */
6323 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
6324 uint64_t const uRipNext = uRipPrev + cbInstr;
6325 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & RT_BIT_64(32))
6326 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
6327 pVCpu->cpum.GstCtx.rip = uRipNext;
6328 else
6329 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
6330
6331 /*
6332 * Clear RF and interrupt shadowing.
6333 */
6334 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF))))
6335 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
6336 else
6337 {
6338 if ((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF)) == X86_EFL_TF)
6339 {
6340 /** @todo \#DB - single step. */
6341 }
6342 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
6343 }
6344 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
6345
6346 /* Mark both RIP and RFLAGS as updated. */
6347 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6348}
6349
6350
6351/**
6352 * Advances the guest RIP after reading it from the VMCS.
6353 *
6354 * @returns VBox status code, no informational status codes.
6355 * @param pVCpu The cross context virtual CPU structure.
6356 * @param pVmxTransient The VMX-transient structure.
6357 *
6358 * @remarks No-long-jump zone!!!
6359 */
6360static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6361{
6362 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6363 /** @todo consider template here after checking callers. */
6364 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6365 AssertRCReturn(rc, rc);
6366
6367 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6368 return VINF_SUCCESS;
6369}
6370
6371
6372/**
6373 * Handle a condition that occurred while delivering an event through the guest or
6374 * nested-guest IDT.
6375 *
6376 * @returns Strict VBox status code (i.e. informational status codes too).
6377 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6378 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6379 * to continue execution of the guest which will delivery the \#DF.
6380 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6381 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6382 *
6383 * @param pVCpu The cross context virtual CPU structure.
6384 * @param pVmxTransient The VMX-transient structure.
6385 *
6386 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6387 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6388 * is due to an EPT violation, PML full or SPP-related event.
6389 *
6390 * @remarks No-long-jump zone!!!
6391 */
6392static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6393{
6394 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6395 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6396 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6397 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6398 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6399 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6400
6401 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6402 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6403 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6404 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6405 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6406 {
6407 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6408 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6409
6410 /*
6411 * If the event was a software interrupt (generated with INT n) or a software exception
6412 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6413 * can handle the VM-exit and continue guest execution which will re-execute the
6414 * instruction rather than re-injecting the exception, as that can cause premature
6415 * trips to ring-3 before injection and involve TRPM which currently has no way of
6416 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6417 * the problem).
6418 */
6419 IEMXCPTRAISE enmRaise;
6420 IEMXCPTRAISEINFO fRaiseInfo;
6421 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6422 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6423 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6424 {
6425 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6426 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6427 }
6428 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6429 {
6430 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6431 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6432 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6433
6434 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6435 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6436
6437 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6438
6439 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6440 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6441 {
6442 pVmxTransient->fVectoringPF = true;
6443 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6444 }
6445 }
6446 else
6447 {
6448 /*
6449 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6450 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6451 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6452 */
6453 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6454 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6455 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6456 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6457 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6458 }
6459
6460 /*
6461 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6462 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6463 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6464 * subsequent VM-entry would fail, see @bugref{7445}.
6465 *
6466 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6467 */
6468 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6469 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6470 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6471 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6472 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6473
6474 switch (enmRaise)
6475 {
6476 case IEMXCPTRAISE_CURRENT_XCPT:
6477 {
6478 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6479 Assert(rcStrict == VINF_SUCCESS);
6480 break;
6481 }
6482
6483 case IEMXCPTRAISE_PREV_EVENT:
6484 {
6485 uint32_t u32ErrCode;
6486 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6487 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6488 else
6489 u32ErrCode = 0;
6490
6491 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6492 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6493 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6494 pVCpu->cpum.GstCtx.cr2);
6495
6496 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6497 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6498 Assert(rcStrict == VINF_SUCCESS);
6499 break;
6500 }
6501
6502 case IEMXCPTRAISE_REEXEC_INSTR:
6503 Assert(rcStrict == VINF_SUCCESS);
6504 break;
6505
6506 case IEMXCPTRAISE_DOUBLE_FAULT:
6507 {
6508 /*
6509 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6510 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6511 */
6512 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6513 {
6514 pVmxTransient->fVectoringDoublePF = true;
6515 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6516 pVCpu->cpum.GstCtx.cr2));
6517 rcStrict = VINF_SUCCESS;
6518 }
6519 else
6520 {
6521 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6522 vmxHCSetPendingXcptDF(pVCpu);
6523 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6524 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6525 rcStrict = VINF_HM_DOUBLE_FAULT;
6526 }
6527 break;
6528 }
6529
6530 case IEMXCPTRAISE_TRIPLE_FAULT:
6531 {
6532 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6533 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6534 rcStrict = VINF_EM_RESET;
6535 break;
6536 }
6537
6538 case IEMXCPTRAISE_CPU_HANG:
6539 {
6540 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6541 rcStrict = VERR_EM_GUEST_CPU_HANG;
6542 break;
6543 }
6544
6545 default:
6546 {
6547 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6548 rcStrict = VERR_VMX_IPE_2;
6549 break;
6550 }
6551 }
6552 }
6553 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6554 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6555 {
6556 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6557 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6558 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6559 {
6560 /*
6561 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6562 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6563 * that virtual NMIs remain blocked until the IRET execution is completed.
6564 *
6565 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6566 */
6567 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6568 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6569 }
6570 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6571 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6572 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6573 {
6574 /*
6575 * Execution of IRET caused an EPT violation, page-modification log-full event or
6576 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6577 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6578 * that virtual NMIs remain blocked until the IRET execution is completed.
6579 *
6580 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6581 */
6582 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6583 {
6584 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6585 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6586 }
6587 }
6588 }
6589
6590 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6591 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6592 return rcStrict;
6593}
6594
6595
6596#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6597/**
6598 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6599 * guest attempting to execute a VMX instruction.
6600 *
6601 * @returns Strict VBox status code (i.e. informational status codes too).
6602 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6603 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6604 *
6605 * @param pVCpu The cross context virtual CPU structure.
6606 * @param uExitReason The VM-exit reason.
6607 *
6608 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6609 * @remarks No-long-jump zone!!!
6610 */
6611static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6612{
6613 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6614 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6615
6616 /*
6617 * The physical CPU would have already checked the CPU mode/code segment.
6618 * We shall just assert here for paranoia.
6619 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6620 */
6621 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6622 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6623 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6624
6625 if (uExitReason == VMX_EXIT_VMXON)
6626 {
6627 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6628
6629 /*
6630 * We check CR4.VMXE because it is required to be always set while in VMX operation
6631 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6632 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6633 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6634 */
6635 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6636 {
6637 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6638 vmxHCSetPendingXcptUD(pVCpu);
6639 return VINF_HM_PENDING_XCPT;
6640 }
6641 }
6642 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6643 {
6644 /*
6645 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6646 * (other than VMXON), we need to raise a #UD.
6647 */
6648 Log4Func(("Not in VMX root mode -> #UD\n"));
6649 vmxHCSetPendingXcptUD(pVCpu);
6650 return VINF_HM_PENDING_XCPT;
6651 }
6652
6653 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6654 return VINF_SUCCESS;
6655}
6656
6657
6658/**
6659 * Decodes the memory operand of an instruction that caused a VM-exit.
6660 *
6661 * The Exit qualification field provides the displacement field for memory
6662 * operand instructions, if any.
6663 *
6664 * @returns Strict VBox status code (i.e. informational status codes too).
6665 * @retval VINF_SUCCESS if the operand was successfully decoded.
6666 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6667 * operand.
6668 * @param pVCpu The cross context virtual CPU structure.
6669 * @param uExitInstrInfo The VM-exit instruction information field.
6670 * @param enmMemAccess The memory operand's access type (read or write).
6671 * @param GCPtrDisp The instruction displacement field, if any. For
6672 * RIP-relative addressing pass RIP + displacement here.
6673 * @param pGCPtrMem Where to store the effective destination memory address.
6674 *
6675 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6676 * virtual-8086 mode hence skips those checks while verifying if the
6677 * segment is valid.
6678 */
6679static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6680 PRTGCPTR pGCPtrMem)
6681{
6682 Assert(pGCPtrMem);
6683 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6684 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6685 | CPUMCTX_EXTRN_CR0);
6686
6687 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6688 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6689 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6690
6691 VMXEXITINSTRINFO ExitInstrInfo;
6692 ExitInstrInfo.u = uExitInstrInfo;
6693 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6694 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6695 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6696 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6697 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6698 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6699 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6700 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6701 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6702
6703 /*
6704 * Validate instruction information.
6705 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6706 */
6707 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6708 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6709 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6710 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6711 AssertLogRelMsgReturn(fIsMemOperand,
6712 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6713
6714 /*
6715 * Compute the complete effective address.
6716 *
6717 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6718 * See AMD spec. 4.5.2 "Segment Registers".
6719 */
6720 RTGCPTR GCPtrMem = GCPtrDisp;
6721 if (fBaseRegValid)
6722 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6723 if (fIdxRegValid)
6724 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6725
6726 RTGCPTR const GCPtrOff = GCPtrMem;
6727 if ( !fIsLongMode
6728 || iSegReg >= X86_SREG_FS)
6729 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6730 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6731
6732 /*
6733 * Validate effective address.
6734 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6735 */
6736 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6737 Assert(cbAccess > 0);
6738 if (fIsLongMode)
6739 {
6740 if (X86_IS_CANONICAL(GCPtrMem))
6741 {
6742 *pGCPtrMem = GCPtrMem;
6743 return VINF_SUCCESS;
6744 }
6745
6746 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6747 * "Data Limit Checks in 64-bit Mode". */
6748 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6749 vmxHCSetPendingXcptGP(pVCpu, 0);
6750 return VINF_HM_PENDING_XCPT;
6751 }
6752
6753 /*
6754 * This is a watered down version of iemMemApplySegment().
6755 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6756 * and segment CPL/DPL checks are skipped.
6757 */
6758 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6759 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6760 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6761
6762 /* Check if the segment is present and usable. */
6763 if ( pSel->Attr.n.u1Present
6764 && !pSel->Attr.n.u1Unusable)
6765 {
6766 Assert(pSel->Attr.n.u1DescType);
6767 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6768 {
6769 /* Check permissions for the data segment. */
6770 if ( enmMemAccess == VMXMEMACCESS_WRITE
6771 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6772 {
6773 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6774 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6775 return VINF_HM_PENDING_XCPT;
6776 }
6777
6778 /* Check limits if it's a normal data segment. */
6779 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6780 {
6781 if ( GCPtrFirst32 > pSel->u32Limit
6782 || GCPtrLast32 > pSel->u32Limit)
6783 {
6784 Log4Func(("Data segment limit exceeded. "
6785 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6786 GCPtrLast32, pSel->u32Limit));
6787 if (iSegReg == X86_SREG_SS)
6788 vmxHCSetPendingXcptSS(pVCpu, 0);
6789 else
6790 vmxHCSetPendingXcptGP(pVCpu, 0);
6791 return VINF_HM_PENDING_XCPT;
6792 }
6793 }
6794 else
6795 {
6796 /* Check limits if it's an expand-down data segment.
6797 Note! The upper boundary is defined by the B bit, not the G bit! */
6798 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6799 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6800 {
6801 Log4Func(("Expand-down data segment limit exceeded. "
6802 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6803 GCPtrLast32, pSel->u32Limit));
6804 if (iSegReg == X86_SREG_SS)
6805 vmxHCSetPendingXcptSS(pVCpu, 0);
6806 else
6807 vmxHCSetPendingXcptGP(pVCpu, 0);
6808 return VINF_HM_PENDING_XCPT;
6809 }
6810 }
6811 }
6812 else
6813 {
6814 /* Check permissions for the code segment. */
6815 if ( enmMemAccess == VMXMEMACCESS_WRITE
6816 || ( enmMemAccess == VMXMEMACCESS_READ
6817 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6818 {
6819 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6820 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6821 vmxHCSetPendingXcptGP(pVCpu, 0);
6822 return VINF_HM_PENDING_XCPT;
6823 }
6824
6825 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6826 if ( GCPtrFirst32 > pSel->u32Limit
6827 || GCPtrLast32 > pSel->u32Limit)
6828 {
6829 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6830 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6831 if (iSegReg == X86_SREG_SS)
6832 vmxHCSetPendingXcptSS(pVCpu, 0);
6833 else
6834 vmxHCSetPendingXcptGP(pVCpu, 0);
6835 return VINF_HM_PENDING_XCPT;
6836 }
6837 }
6838 }
6839 else
6840 {
6841 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6842 vmxHCSetPendingXcptGP(pVCpu, 0);
6843 return VINF_HM_PENDING_XCPT;
6844 }
6845
6846 *pGCPtrMem = GCPtrMem;
6847 return VINF_SUCCESS;
6848}
6849#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6850
6851
6852/**
6853 * VM-exit helper for LMSW.
6854 */
6855static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6856{
6857 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6858 AssertRCReturn(rc, rc);
6859
6860 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6861 AssertMsg( rcStrict == VINF_SUCCESS
6862 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6863
6864 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6865 if (rcStrict == VINF_IEM_RAISED_XCPT)
6866 {
6867 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6868 rcStrict = VINF_SUCCESS;
6869 }
6870
6871 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6872 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6873 return rcStrict;
6874}
6875
6876
6877/**
6878 * VM-exit helper for CLTS.
6879 */
6880static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6881{
6882 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6883 AssertRCReturn(rc, rc);
6884
6885 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6886 AssertMsg( rcStrict == VINF_SUCCESS
6887 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6888
6889 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6890 if (rcStrict == VINF_IEM_RAISED_XCPT)
6891 {
6892 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6893 rcStrict = VINF_SUCCESS;
6894 }
6895
6896 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6897 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6898 return rcStrict;
6899}
6900
6901
6902/**
6903 * VM-exit helper for MOV from CRx (CRx read).
6904 */
6905static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6906{
6907 Assert(iCrReg < 16);
6908 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6909
6910 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6911 AssertRCReturn(rc, rc);
6912
6913 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6914 AssertMsg( rcStrict == VINF_SUCCESS
6915 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6916
6917 if (iGReg == X86_GREG_xSP)
6918 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6919 else
6920 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6921#ifdef VBOX_WITH_STATISTICS
6922 switch (iCrReg)
6923 {
6924 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6925 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6926 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6927 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6928 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6929 }
6930#endif
6931 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6932 return rcStrict;
6933}
6934
6935
6936/**
6937 * VM-exit helper for MOV to CRx (CRx write).
6938 */
6939static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6940{
6941 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6942
6943 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6944 AssertMsg( rcStrict == VINF_SUCCESS
6945 || rcStrict == VINF_IEM_RAISED_XCPT
6946 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6947
6948 switch (iCrReg)
6949 {
6950 case 0:
6951 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6952 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6953 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6954 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6955 break;
6956
6957 case 2:
6958 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6959 /* Nothing to do here, CR2 it's not part of the VMCS. */
6960 break;
6961
6962 case 3:
6963 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6964 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6965 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6966 break;
6967
6968 case 4:
6969 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6970 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6971#ifndef IN_NEM_DARWIN
6972 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6973 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6974#else
6975 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6976#endif
6977 break;
6978
6979 case 8:
6980 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6981 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6982 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6983 break;
6984
6985 default:
6986 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6987 break;
6988 }
6989
6990 if (rcStrict == VINF_IEM_RAISED_XCPT)
6991 {
6992 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6993 rcStrict = VINF_SUCCESS;
6994 }
6995 return rcStrict;
6996}
6997
6998
6999/**
7000 * VM-exit exception handler for \#PF (Page-fault exception).
7001 *
7002 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7003 */
7004static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7005{
7006 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7007 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7008
7009#ifndef IN_NEM_DARWIN
7010 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7011 if (!VM_IS_VMX_NESTED_PAGING(pVM))
7012 { /* likely */ }
7013 else
7014#endif
7015 {
7016#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
7017 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
7018#endif
7019 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
7020 if (!pVmxTransient->fVectoringDoublePF)
7021 {
7022 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
7023 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
7024 }
7025 else
7026 {
7027 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
7028 Assert(!pVmxTransient->fIsNestedGuest);
7029 vmxHCSetPendingXcptDF(pVCpu);
7030 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
7031 }
7032 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
7033 return VINF_SUCCESS;
7034 }
7035
7036 Assert(!pVmxTransient->fIsNestedGuest);
7037
7038 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
7039 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
7040 if (pVmxTransient->fVectoringPF)
7041 {
7042 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7043 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7044 }
7045
7046 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7047 AssertRCReturn(rc, rc);
7048
7049 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
7050 pVCpu->cpum.GstCtx.rip, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pVCpu->cpum.GstCtx.cr3));
7051
7052 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
7053 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, &pVCpu->cpum.GstCtx, (RTGCPTR)pVmxTransient->uExitQual);
7054
7055 Log4Func(("#PF: rc=%Rrc\n", rc));
7056 if (rc == VINF_SUCCESS)
7057 {
7058 /*
7059 * This is typically a shadow page table sync or a MMIO instruction. But we may have
7060 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
7061 */
7062 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7063 TRPMResetTrap(pVCpu);
7064 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
7065 return rc;
7066 }
7067
7068 if (rc == VINF_EM_RAW_GUEST_TRAP)
7069 {
7070 if (!pVmxTransient->fVectoringDoublePF)
7071 {
7072 /* It's a guest page fault and needs to be reflected to the guest. */
7073 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
7074 TRPMResetTrap(pVCpu);
7075 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
7076 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
7077 uGstErrorCode, pVmxTransient->uExitQual);
7078 }
7079 else
7080 {
7081 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
7082 TRPMResetTrap(pVCpu);
7083 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
7084 vmxHCSetPendingXcptDF(pVCpu);
7085 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
7086 }
7087
7088 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
7089 return VINF_SUCCESS;
7090 }
7091
7092 TRPMResetTrap(pVCpu);
7093 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
7094 return rc;
7095}
7096
7097
7098/**
7099 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
7100 *
7101 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7102 */
7103static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7104{
7105 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7106 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
7107
7108 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7109 AssertRCReturn(rc, rc);
7110
7111 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
7112 {
7113 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
7114 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
7115
7116 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
7117 * provides VM-exit instruction length. If this causes problem later,
7118 * disassemble the instruction like it's done on AMD-V. */
7119 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7120 AssertRCReturn(rc2, rc2);
7121 return rc;
7122 }
7123
7124 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
7125 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7126 return VINF_SUCCESS;
7127}
7128
7129
7130/**
7131 * VM-exit exception handler for \#BP (Breakpoint exception).
7132 *
7133 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7134 */
7135static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7136{
7137 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7138 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
7139
7140 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7141 AssertRCReturn(rc, rc);
7142
7143 VBOXSTRICTRC rcStrict;
7144 if (!pVmxTransient->fIsNestedGuest)
7145 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
7146 else
7147 rcStrict = VINF_EM_RAW_GUEST_TRAP;
7148
7149 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
7150 {
7151 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7152 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7153 rcStrict = VINF_SUCCESS;
7154 }
7155
7156 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
7157 return rcStrict;
7158}
7159
7160
7161/**
7162 * VM-exit helper for split-lock access triggered \#AC exceptions.
7163 */
7164static VBOXSTRICTRC vmxHCHandleSplitLockAcXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7165{
7166 /*
7167 * Check for debug/trace events and import state accordingly.
7168 */
7169 if (!pVmxTransient->fIsNestedGuest)
7170 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
7171 else
7172 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatNestedExitACSplitLock);
7173 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7174 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7175#ifndef IN_NEM_DARWIN
7176 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7177#endif
7178 )
7179 {
7180 if (pVM->cCpus == 1)
7181 {
7182#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7183 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7184 HMVMX_CPUMCTX_XPCT_AC>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7185#else
7186 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7187 HMVMX_CPUMCTX_XPCT_AC>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7188#endif
7189 AssertRCReturn(rc, rc);
7190 }
7191 }
7192 else
7193 {
7194 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7195 HMVMX_CPUMCTX_XPCT_AC>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7196 AssertRCReturn(rc, rc);
7197
7198 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7199
7200 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7201 {
7202 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7203 if (rcStrict != VINF_SUCCESS)
7204 return rcStrict;
7205 }
7206 }
7207
7208 /*
7209 * Emulate the instruction.
7210 *
7211 * We have to ignore the LOCK prefix here as we must not retrigger the
7212 * detection on the host. This isn't all that satisfactory, though...
7213 */
7214 if (pVM->cCpus == 1)
7215 {
7216 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7217 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7218
7219 /** @todo For SMP configs we should do a rendezvous here. */
7220 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7221 if (rcStrict == VINF_SUCCESS)
7222#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7223 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7224 HM_CHANGED_GUEST_RIP
7225 | HM_CHANGED_GUEST_RFLAGS
7226 | HM_CHANGED_GUEST_GPRS_MASK
7227 | HM_CHANGED_GUEST_CS
7228 | HM_CHANGED_GUEST_SS);
7229#else
7230 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7231#endif
7232 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7233 {
7234 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7235 rcStrict = VINF_SUCCESS;
7236 }
7237 return rcStrict;
7238 }
7239 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7240 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7241 return VINF_EM_EMULATE_SPLIT_LOCK;
7242}
7243
7244
7245/**
7246 * VM-exit exception handler for \#AC (Alignment-check exception).
7247 *
7248 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7249 */
7250static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7251{
7252 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7253
7254 /*
7255 * Detect #ACs caused by host having enabled split-lock detection.
7256 * Emulate such instructions.
7257 */
7258 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_XPCT_AC>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7259 AssertRCReturn(rc, rc);
7260 /** @todo detect split lock in cpu feature? */
7261 /** @todo r=ramshankar: is cpu feature detection really necessary since we are able
7262 * to detect the split-lock \#AC condition without it? More so since the
7263 * feature isn't cleanly detectable, see @bugref{10318#c125}. */
7264 if (vmxHCIsSplitLockAcXcpt(pVCpu))
7265 return vmxHCHandleSplitLockAcXcpt(pVCpu, pVmxTransient);
7266
7267 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7268 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7269 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7270
7271 /* Re-inject it. We'll detect any nesting before getting here. */
7272 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7273 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7274 return VINF_SUCCESS;
7275}
7276
7277
7278/**
7279 * VM-exit exception handler for \#DB (Debug exception).
7280 *
7281 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7282 */
7283static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7284{
7285 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7286 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7287
7288 /*
7289 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7290 */
7291 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7292
7293 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7294 uint64_t const uDR6 = X86_DR6_INIT_VAL
7295 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7296 | X86_DR6_BD | X86_DR6_BS));
7297 Log6Func(("uDR6=%#RX64 uExitQual=%#RX64\n", uDR6, pVmxTransient->uExitQual));
7298
7299 int rc;
7300 if (!pVmxTransient->fIsNestedGuest)
7301 {
7302 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx, uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7303
7304 /*
7305 * Prevents stepping twice over the same instruction when the guest is stepping using
7306 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7307 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7308 */
7309 if ( rc == VINF_EM_DBG_STEPPED
7310 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7311 {
7312 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7313 rc = VINF_EM_RAW_GUEST_TRAP;
7314 }
7315 }
7316 else
7317 rc = VINF_EM_RAW_GUEST_TRAP;
7318 Log6Func(("rc=%Rrc\n", rc));
7319 if (rc == VINF_EM_RAW_GUEST_TRAP)
7320 {
7321 /*
7322 * The exception was for the guest. Update DR6, DR7.GD and
7323 * IA32_DEBUGCTL.LBR before forwarding it.
7324 * See Intel spec. 27.1 "Architectural State before a VM-Exit"
7325 * and @sdmv3{077,622,17.2.3,Debug Status Register (DR6)}.
7326 */
7327#ifndef IN_NEM_DARWIN
7328 VMMRZCallRing3Disable(pVCpu);
7329 HM_DISABLE_PREEMPT(pVCpu);
7330
7331 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
7332 pVCpu->cpum.GstCtx.dr[6] |= uDR6;
7333 if (CPUMIsGuestDebugStateActive(pVCpu))
7334 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
7335
7336 HM_RESTORE_PREEMPT();
7337 VMMRZCallRing3Enable(pVCpu);
7338#else
7339 /** @todo */
7340#endif
7341
7342 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7343 AssertRCReturn(rc, rc);
7344
7345 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7346 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_GD;
7347
7348 /* Paranoia. */
7349 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7350 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
7351
7352 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pVCpu->cpum.GstCtx.dr[7]);
7353 AssertRC(rc);
7354
7355 /*
7356 * Raise #DB in the guest.
7357 *
7358 * It is important to reflect exactly what the VM-exit gave us (preserving the
7359 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7360 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7361 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7362 *
7363 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7364 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7365 */
7366 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7367 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7368 return VINF_SUCCESS;
7369 }
7370
7371 /*
7372 * Not a guest trap, must be a hypervisor related debug event then.
7373 * Update DR6 in case someone is interested in it.
7374 */
7375 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7376 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7377 CPUMSetHyperDR6(pVCpu, uDR6);
7378
7379 return rc;
7380}
7381
7382
7383/**
7384 * Hacks its way around the lovely mesa driver's backdoor accesses.
7385 *
7386 * @sa hmR0SvmHandleMesaDrvGp.
7387 */
7388static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7389{
7390 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7391 RT_NOREF(pCtx);
7392
7393 /* For now we'll just skip the instruction. */
7394 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7395}
7396
7397
7398/**
7399 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7400 * backdoor logging w/o checking what it is running inside.
7401 *
7402 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7403 * backdoor port and magic numbers loaded in registers.
7404 *
7405 * @returns true if it is, false if it isn't.
7406 * @sa hmR0SvmIsMesaDrvGp.
7407 */
7408DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7409{
7410 /* 0xed: IN eAX,dx */
7411 uint8_t abInstr[1];
7412 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7413 return false;
7414
7415 /* Check that it is #GP(0). */
7416 if (pVmxTransient->uExitIntErrorCode != 0)
7417 return false;
7418
7419 /* Check magic and port. */
7420 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7421 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7422 if (pCtx->rax != UINT32_C(0x564d5868))
7423 return false;
7424 if (pCtx->dx != UINT32_C(0x5658))
7425 return false;
7426
7427 /* Flat ring-3 CS. */
7428 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7429 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7430 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7431 if (pCtx->cs.Attr.n.u2Dpl != 3)
7432 return false;
7433 if (pCtx->cs.u64Base != 0)
7434 return false;
7435
7436 /* Check opcode. */
7437 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7438 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7439 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7440 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7441 if (RT_FAILURE(rc))
7442 return false;
7443 if (abInstr[0] != 0xed)
7444 return false;
7445
7446 return true;
7447}
7448
7449
7450/**
7451 * VM-exit exception handler for \#GP (General-protection exception).
7452 *
7453 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7454 */
7455static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7456{
7457 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7458 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7459
7460 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7461 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7462#ifndef IN_NEM_DARWIN
7463 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7464 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7465 { /* likely */ }
7466 else
7467#endif
7468 {
7469#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7470# ifndef IN_NEM_DARWIN
7471 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7472# else
7473 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7474# endif
7475#endif
7476 /*
7477 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7478 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7479 */
7480 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7481 AssertRCReturn(rc, rc);
7482 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7483 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7484
7485 if ( pVmxTransient->fIsNestedGuest
7486 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7487 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7488 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7489 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7490 else
7491 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7492 return rc;
7493 }
7494
7495#ifndef IN_NEM_DARWIN
7496 Assert(CPUMIsGuestInRealModeEx(pCtx));
7497 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7498 Assert(!pVmxTransient->fIsNestedGuest);
7499
7500 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7501 AssertRCReturn(rc, rc);
7502
7503 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7504 if (rcStrict == VINF_SUCCESS)
7505 {
7506 if (!CPUMIsGuestInRealModeEx(pCtx))
7507 {
7508 /*
7509 * The guest is no longer in real-mode, check if we can continue executing the
7510 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7511 */
7512 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7513 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7514 {
7515 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7516 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7517 }
7518 else
7519 {
7520 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7521 rcStrict = VINF_EM_RESCHEDULE;
7522 }
7523 }
7524 else
7525 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7526 }
7527 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7528 {
7529 rcStrict = VINF_SUCCESS;
7530 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7531 }
7532 return VBOXSTRICTRC_VAL(rcStrict);
7533#endif
7534}
7535
7536
7537/**
7538 * VM-exit exception handler for \#DE (Divide Error).
7539 *
7540 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7541 */
7542static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7543{
7544 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7545 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7546
7547 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7548 AssertRCReturn(rc, rc);
7549
7550 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7551 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7552 {
7553 uint8_t cbInstr = 0;
7554 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7555 if (rc2 == VINF_SUCCESS)
7556 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7557 else if (rc2 == VERR_NOT_FOUND)
7558 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7559 else
7560 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7561 }
7562 else
7563 rcStrict = VINF_SUCCESS; /* Do nothing. */
7564
7565 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7566 if (RT_FAILURE(rcStrict))
7567 {
7568 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7569 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7570 rcStrict = VINF_SUCCESS;
7571 }
7572
7573 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7574 return VBOXSTRICTRC_VAL(rcStrict);
7575}
7576
7577
7578/**
7579 * VM-exit exception handler wrapper for all other exceptions that are not handled
7580 * by a specific handler.
7581 *
7582 * This simply re-injects the exception back into the VM without any special
7583 * processing.
7584 *
7585 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7586 */
7587static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7588{
7589 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7590
7591#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7592# ifndef IN_NEM_DARWIN
7593 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7594 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7595 ("uVector=%#x u32XcptBitmap=%#X32\n",
7596 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7597 NOREF(pVmcsInfo);
7598# endif
7599#endif
7600
7601 /*
7602 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7603 * would have been handled while checking exits due to event delivery.
7604 */
7605 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7606
7607#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7608 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7609 AssertRCReturn(rc, rc);
7610 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7611#endif
7612
7613#ifdef VBOX_WITH_STATISTICS
7614 switch (uVector)
7615 {
7616 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7617 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7618 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7619 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7620 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7621 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7622 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7623 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7624 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7625 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7626 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7627 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7628 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7629 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7630 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7631 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7632 default:
7633 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7634 break;
7635 }
7636#endif
7637
7638 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7639 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7640 NOREF(uVector);
7641
7642 /* Re-inject the original exception into the guest. */
7643 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7644 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7645 return VINF_SUCCESS;
7646}
7647
7648
7649/**
7650 * VM-exit exception handler for all exceptions (except NMIs!).
7651 *
7652 * @remarks This may be called for both guests and nested-guests. Take care to not
7653 * make assumptions and avoid doing anything that is not relevant when
7654 * executing a nested-guest (e.g., Mesa driver hacks).
7655 */
7656static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7657{
7658 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7659
7660 /*
7661 * If this VM-exit occurred while delivering an event through the guest IDT, take
7662 * action based on the return code and additional hints (e.g. for page-faults)
7663 * that will be updated in the VMX transient structure.
7664 */
7665 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7666 if (rcStrict == VINF_SUCCESS)
7667 {
7668 /*
7669 * If an exception caused a VM-exit due to delivery of an event, the original
7670 * event may have to be re-injected into the guest. We shall reinject it and
7671 * continue guest execution. However, page-fault is a complicated case and
7672 * needs additional processing done in vmxHCExitXcptPF().
7673 */
7674 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7675 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7676 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7677 || uVector == X86_XCPT_PF)
7678 {
7679 switch (uVector)
7680 {
7681 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7682 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7683 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7684 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7685 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7686 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7687 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7688 default:
7689 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7690 }
7691 }
7692 /* else: inject pending event before resuming guest execution. */
7693 }
7694 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7695 {
7696 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7697 rcStrict = VINF_SUCCESS;
7698 }
7699
7700 return rcStrict;
7701}
7702/** @} */
7703
7704
7705/** @name VM-exit handlers.
7706 * @{
7707 */
7708/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7709/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7710/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7711
7712/**
7713 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7714 */
7715HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7716{
7717 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7718 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7719
7720#ifndef IN_NEM_DARWIN
7721 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7722 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7723 return VINF_SUCCESS;
7724 return VINF_EM_RAW_INTERRUPT;
7725#else
7726 return VINF_SUCCESS;
7727#endif
7728}
7729
7730
7731/**
7732 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7733 * VM-exit.
7734 */
7735HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7736{
7737 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7738 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7739
7740 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7741
7742 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7743 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7744 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7745
7746 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7747 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7748 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7749 NOREF(pVmcsInfo);
7750
7751 VBOXSTRICTRC rcStrict;
7752 switch (uExitIntType)
7753 {
7754#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7755 /*
7756 * Host physical NMIs:
7757 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7758 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7759 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7760 *
7761 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7762 * See Intel spec. 27.5.5 "Updating Non-Register State".
7763 */
7764 case VMX_EXIT_INT_INFO_TYPE_NMI:
7765 {
7766 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7767 break;
7768 }
7769#endif
7770
7771 /*
7772 * Privileged software exceptions (#DB from ICEBP),
7773 * Software exceptions (#BP and #OF),
7774 * Hardware exceptions:
7775 * Process the required exceptions and resume guest execution if possible.
7776 */
7777 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7778 Assert(uVector == X86_XCPT_DB);
7779 RT_FALL_THRU();
7780 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7781 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7782 RT_FALL_THRU();
7783 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7784 {
7785 NOREF(uVector);
7786 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7787 | HMVMX_READ_EXIT_INSTR_LEN
7788 | HMVMX_READ_IDT_VECTORING_INFO
7789 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7790 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7791 break;
7792 }
7793
7794 default:
7795 {
7796 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7797 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7798 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7799 break;
7800 }
7801 }
7802
7803 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7804 return rcStrict;
7805}
7806
7807
7808/**
7809 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7810 */
7811HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7812{
7813 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7814
7815 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7816 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7817 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7818
7819 /* Evaluate and deliver pending events and resume guest execution. */
7820 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7821 return VINF_SUCCESS;
7822}
7823
7824
7825/**
7826 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7827 */
7828HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7829{
7830 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7831
7832 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7833 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7834 {
7835 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7836 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7837 }
7838
7839 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
7840
7841 /*
7842 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7843 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7844 */
7845 uint32_t fIntrState;
7846 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7847 AssertRC(rc);
7848 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7849 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7850 {
7851 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
7852
7853 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7854 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7855 AssertRC(rc);
7856 }
7857
7858 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready. */
7859 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7860
7861 /* Evaluate and deliver pending events and resume guest execution. */
7862 return VINF_SUCCESS;
7863}
7864
7865
7866/**
7867 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7868 */
7869HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7870{
7871 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7872 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7873}
7874
7875
7876/**
7877 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7878 */
7879HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7880{
7881 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7882 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7883}
7884
7885
7886/**
7887 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7888 */
7889HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7890{
7891 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7892
7893 /*
7894 * Get the state we need and update the exit history entry.
7895 */
7896 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7897 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7898 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7899 AssertRCReturn(rc, rc);
7900
7901 VBOXSTRICTRC rcStrict;
7902 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7903 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7904 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7905 if (!pExitRec)
7906 {
7907 /*
7908 * Regular CPUID instruction execution.
7909 */
7910 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7911 if (rcStrict == VINF_SUCCESS)
7912 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7913 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7914 {
7915 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7916 rcStrict = VINF_SUCCESS;
7917 }
7918 }
7919 else
7920 {
7921 /*
7922 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7923 */
7924 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7925 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7926 AssertRCReturn(rc2, rc2);
7927
7928 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7929 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7930
7931 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7932 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7933
7934 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7935 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7936 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7937 }
7938 return rcStrict;
7939}
7940
7941
7942/**
7943 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7944 */
7945HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7946{
7947 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7948
7949 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7950 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7951 AssertRCReturn(rc, rc);
7952
7953 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7954 return VINF_EM_RAW_EMULATE_INSTR;
7955
7956 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7957 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7958}
7959
7960
7961/**
7962 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7963 */
7964HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7965{
7966 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7967
7968 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7969 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7970 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7971 AssertRCReturn(rc, rc);
7972
7973 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7974 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7975 {
7976 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7977 we must reset offsetting on VM-entry. See @bugref{6634}. */
7978 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7979 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7980 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7981 }
7982 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7983 {
7984 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7985 rcStrict = VINF_SUCCESS;
7986 }
7987 return rcStrict;
7988}
7989
7990
7991/**
7992 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7993 */
7994HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7995{
7996 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7997
7998 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7999 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8000 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
8001 AssertRCReturn(rc, rc);
8002
8003 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
8004 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8005 {
8006 /* If we get a spurious VM-exit when TSC offsetting is enabled,
8007 we must reset offsetting on VM-reentry. See @bugref{6634}. */
8008 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
8009 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8010 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8011 }
8012 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8013 {
8014 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8015 rcStrict = VINF_SUCCESS;
8016 }
8017 return rcStrict;
8018}
8019
8020
8021/**
8022 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
8023 */
8024HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8025{
8026 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8027
8028 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8029 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8030 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8031 AssertRCReturn(rc, rc);
8032
8033 VBOXSTRICTRC rcStrict = IEMExecDecodedRdpmc(pVCpu, pVmxTransient->cbExitInstr);
8034 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8035 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8036 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8037 {
8038 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8039 rcStrict = VINF_SUCCESS;
8040 }
8041 return rcStrict;
8042}
8043
8044
8045/**
8046 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
8047 */
8048HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8049{
8050 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8051
8052 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
8053 if (EMAreHypercallInstructionsEnabled(pVCpu))
8054 {
8055 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8056 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
8057 | CPUMCTX_EXTRN_RFLAGS
8058 | CPUMCTX_EXTRN_CR0
8059 | CPUMCTX_EXTRN_SS
8060 | CPUMCTX_EXTRN_CS
8061 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
8062 AssertRCReturn(rc, rc);
8063
8064 /* Perform the hypercall. */
8065 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
8066 if (rcStrict == VINF_SUCCESS)
8067 {
8068 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8069 AssertRCReturn(rc, rc);
8070 }
8071 else
8072 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
8073 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
8074 || RT_FAILURE(rcStrict));
8075
8076 /* If the hypercall changes anything other than guest's general-purpose registers,
8077 we would need to reload the guest changed bits here before VM-entry. */
8078 }
8079 else
8080 Log4Func(("Hypercalls not enabled\n"));
8081
8082 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
8083 if (RT_FAILURE(rcStrict))
8084 {
8085 vmxHCSetPendingXcptUD(pVCpu);
8086 rcStrict = VINF_SUCCESS;
8087 }
8088
8089 return rcStrict;
8090}
8091
8092
8093/**
8094 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
8095 */
8096HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8097{
8098 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8099#ifndef IN_NEM_DARWIN
8100 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
8101#endif
8102
8103 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8104 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8105 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8106 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8107 AssertRCReturn(rc, rc);
8108
8109 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
8110
8111 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
8112 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8113 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8114 {
8115 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8116 rcStrict = VINF_SUCCESS;
8117 }
8118 else
8119 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
8120 VBOXSTRICTRC_VAL(rcStrict)));
8121 return rcStrict;
8122}
8123
8124
8125/**
8126 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
8127 */
8128HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8129{
8130 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8131
8132 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8133 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8134 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
8135 AssertRCReturn(rc, rc);
8136
8137 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
8138 if (rcStrict == VINF_SUCCESS)
8139 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8140 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8141 {
8142 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8143 rcStrict = VINF_SUCCESS;
8144 }
8145
8146 return rcStrict;
8147}
8148
8149
8150/**
8151 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
8152 */
8153HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8154{
8155 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8156
8157 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8158 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8159 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8160 AssertRCReturn(rc, rc);
8161
8162 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
8163 if (RT_SUCCESS(rcStrict))
8164 {
8165 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8166 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
8167 rcStrict = VINF_SUCCESS;
8168 }
8169
8170 return rcStrict;
8171}
8172
8173
8174/**
8175 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
8176 * VM-exit.
8177 */
8178HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8179{
8180 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8181 return VINF_EM_RESET;
8182}
8183
8184
8185/**
8186 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
8187 */
8188HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8189{
8190 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8191
8192 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8193 AssertRCReturn(rc, rc);
8194
8195 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
8196 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
8197 rc = VINF_SUCCESS;
8198 else
8199 rc = VINF_EM_HALT;
8200
8201 if (rc != VINF_SUCCESS)
8202 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8203 return rc;
8204}
8205
8206
8207#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8208/**
8209 * VM-exit handler for instructions that result in a \#UD exception delivered to
8210 * the guest.
8211 */
8212HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8213{
8214 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8215 vmxHCSetPendingXcptUD(pVCpu);
8216 return VINF_SUCCESS;
8217}
8218#endif
8219
8220
8221/**
8222 * VM-exit handler for expiry of the VMX-preemption timer.
8223 */
8224HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8225{
8226 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8227
8228 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8229 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8230Log12(("vmxHCExitPreemptTimer:\n"));
8231
8232 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8233 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8234 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8235 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8236 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8237}
8238
8239
8240/**
8241 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8242 */
8243HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8244{
8245 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8246
8247 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8248 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8249 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8250 AssertRCReturn(rc, rc);
8251
8252 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8253 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8254 : HM_CHANGED_RAISED_XCPT_MASK);
8255
8256#ifndef IN_NEM_DARWIN
8257 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8258 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8259 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8260 {
8261 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8262 hmR0VmxUpdateStartVmFunction(pVCpu);
8263 }
8264#endif
8265
8266 return rcStrict;
8267}
8268
8269
8270/**
8271 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8272 */
8273HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8274{
8275 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8276
8277 /** @todo Enable the new code after finding a reliably guest test-case. */
8278#if 1
8279 return VERR_EM_INTERPRETER;
8280#else
8281 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8282 | HMVMX_READ_EXIT_INSTR_INFO
8283 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8284 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8285 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8286 AssertRCReturn(rc, rc);
8287
8288 /* Paranoia. Ensure this has a memory operand. */
8289 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8290
8291 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8292 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8293 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8294 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8295
8296 RTGCPTR GCPtrDesc;
8297 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8298
8299 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8300 GCPtrDesc, uType);
8301 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8302 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8303 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8304 {
8305 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8306 rcStrict = VINF_SUCCESS;
8307 }
8308 return rcStrict;
8309#endif
8310}
8311
8312
8313/**
8314 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8315 * VM-exit.
8316 */
8317HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8318{
8319 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8320 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8321 AssertRCReturn(rc, rc);
8322
8323 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8324 if (RT_FAILURE(rc))
8325 return rc;
8326
8327 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8328 NOREF(uInvalidReason);
8329
8330#ifdef VBOX_STRICT
8331 uint32_t fIntrState;
8332 uint64_t u64Val;
8333 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8334 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8335 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8336
8337 Log4(("uInvalidReason %u\n", uInvalidReason));
8338 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8339 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8340 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8341
8342 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8343 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8344 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8345 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8346 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8347 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8348 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8349 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8350 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8351 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8352 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8353 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8354# ifndef IN_NEM_DARWIN
8355 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8356 {
8357 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8358 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8359 }
8360
8361 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8362# endif
8363#endif
8364
8365 return VERR_VMX_INVALID_GUEST_STATE;
8366}
8367
8368/**
8369 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8370 */
8371HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8372{
8373 /*
8374 * Cumulative notes of all recognized but unexpected VM-exits.
8375 *
8376 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8377 * nested-paging is used.
8378 *
8379 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8380 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8381 * this function (and thereby stop VM execution) for handling such instructions.
8382 *
8383 *
8384 * VMX_EXIT_INIT_SIGNAL:
8385 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8386 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8387 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8388 *
8389 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8390 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8391 * See Intel spec. "23.8 Restrictions on VMX operation".
8392 *
8393 * VMX_EXIT_SIPI:
8394 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8395 * activity state is used. We don't make use of it as our guests don't have direct
8396 * access to the host local APIC.
8397 *
8398 * See Intel spec. 25.3 "Other Causes of VM-exits".
8399 *
8400 * VMX_EXIT_IO_SMI:
8401 * VMX_EXIT_SMI:
8402 * This can only happen if we support dual-monitor treatment of SMI, which can be
8403 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8404 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8405 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8406 *
8407 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8408 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8409 *
8410 * VMX_EXIT_ERR_MSR_LOAD:
8411 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8412 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8413 * execution.
8414 *
8415 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8416 *
8417 * VMX_EXIT_ERR_MACHINE_CHECK:
8418 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8419 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8420 * #MC exception abort class exception is raised. We thus cannot assume a
8421 * reasonable chance of continuing any sort of execution and we bail.
8422 *
8423 * See Intel spec. 15.1 "Machine-check Architecture".
8424 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8425 *
8426 * VMX_EXIT_PML_FULL:
8427 * VMX_EXIT_VIRTUALIZED_EOI:
8428 * VMX_EXIT_APIC_WRITE:
8429 * We do not currently support any of these features and thus they are all unexpected
8430 * VM-exits.
8431 *
8432 * VMX_EXIT_GDTR_IDTR_ACCESS:
8433 * VMX_EXIT_LDTR_TR_ACCESS:
8434 * VMX_EXIT_RDRAND:
8435 * VMX_EXIT_RSM:
8436 * VMX_EXIT_VMFUNC:
8437 * VMX_EXIT_ENCLS:
8438 * VMX_EXIT_RDSEED:
8439 * VMX_EXIT_XSAVES:
8440 * VMX_EXIT_XRSTORS:
8441 * VMX_EXIT_UMWAIT:
8442 * VMX_EXIT_TPAUSE:
8443 * VMX_EXIT_LOADIWKEY:
8444 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8445 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8446 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8447 *
8448 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8449 */
8450 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8451 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8452 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8453}
8454
8455
8456/**
8457 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8458 */
8459HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8460{
8461 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8462
8463 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8464
8465 /** @todo Optimize this: We currently drag in the whole MSR state
8466 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8467 * MSRs required. That would require changes to IEM and possibly CPUM too.
8468 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8469 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8470 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8471 int rc;
8472 switch (idMsr)
8473 {
8474 default:
8475 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8476 __FUNCTION__);
8477 AssertRCReturn(rc, rc);
8478 break;
8479 case MSR_K8_FS_BASE:
8480 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8481 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8482 AssertRCReturn(rc, rc);
8483 break;
8484 case MSR_K8_GS_BASE:
8485 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8486 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8487 AssertRCReturn(rc, rc);
8488 break;
8489 }
8490
8491 Log4Func(("ecx=%#RX32\n", idMsr));
8492
8493#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8494 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8495 {
8496 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8497 && idMsr != MSR_K6_EFER)
8498 {
8499 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8500 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8501 }
8502 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8503 {
8504 Assert(pVmcsInfo->pvMsrBitmap);
8505 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8506 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8507 {
8508 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8509 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8510 }
8511 }
8512 }
8513#endif
8514
8515 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8516 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8517 if (rcStrict == VINF_SUCCESS)
8518 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8519 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8520 {
8521 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8522 rcStrict = VINF_SUCCESS;
8523 }
8524 else
8525 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8526 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8527
8528 return rcStrict;
8529}
8530
8531
8532/**
8533 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8534 */
8535HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8536{
8537 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8538
8539 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8540
8541 /*
8542 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8543 * Although we don't need to fetch the base as it will be overwritten shortly, while
8544 * loading guest-state we would also load the entire segment register including limit
8545 * and attributes and thus we need to load them here.
8546 */
8547 /** @todo Optimize this: We currently drag in the whole MSR state
8548 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8549 * MSRs required. That would require changes to IEM and possibly CPUM too.
8550 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8551 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8552 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8553 int rc;
8554 switch (idMsr)
8555 {
8556 default:
8557 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8558 __FUNCTION__);
8559 AssertRCReturn(rc, rc);
8560 break;
8561
8562 case MSR_K8_FS_BASE:
8563 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8564 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8565 AssertRCReturn(rc, rc);
8566 break;
8567 case MSR_K8_GS_BASE:
8568 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8569 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8570 AssertRCReturn(rc, rc);
8571 break;
8572 }
8573 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8574
8575 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8576 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8577
8578 if (rcStrict == VINF_SUCCESS)
8579 {
8580 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8581
8582 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8583 if ( idMsr == MSR_IA32_APICBASE
8584 || ( idMsr >= MSR_IA32_X2APIC_START
8585 && idMsr <= MSR_IA32_X2APIC_END))
8586 {
8587 /*
8588 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8589 * When full APIC register virtualization is implemented we'll have to make
8590 * sure APIC state is saved from the VMCS before IEM changes it.
8591 */
8592 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8593 }
8594 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8595 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8596 else if (idMsr == MSR_K6_EFER)
8597 {
8598 /*
8599 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8600 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8601 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8602 */
8603 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8604 }
8605
8606 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8607 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8608 {
8609 switch (idMsr)
8610 {
8611 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8612 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8613 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8614 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8615 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8616 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8617 default:
8618 {
8619#ifndef IN_NEM_DARWIN
8620 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8621 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8622 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8623 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8624#else
8625 AssertMsgFailed(("TODO\n"));
8626#endif
8627 break;
8628 }
8629 }
8630 }
8631#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8632 else
8633 {
8634 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8635 switch (idMsr)
8636 {
8637 case MSR_IA32_SYSENTER_CS:
8638 case MSR_IA32_SYSENTER_EIP:
8639 case MSR_IA32_SYSENTER_ESP:
8640 case MSR_K8_FS_BASE:
8641 case MSR_K8_GS_BASE:
8642 {
8643 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8644 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8645 }
8646
8647 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8648 default:
8649 {
8650 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8651 {
8652 /* EFER MSR writes are always intercepted. */
8653 if (idMsr != MSR_K6_EFER)
8654 {
8655 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8656 idMsr));
8657 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8658 }
8659 }
8660
8661 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8662 {
8663 Assert(pVmcsInfo->pvMsrBitmap);
8664 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8665 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8666 {
8667 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8668 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8669 }
8670 }
8671 break;
8672 }
8673 }
8674 }
8675#endif /* VBOX_STRICT */
8676 }
8677 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8678 {
8679 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8680 rcStrict = VINF_SUCCESS;
8681 }
8682 else
8683 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8684 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8685
8686 return rcStrict;
8687}
8688
8689
8690/**
8691 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8692 */
8693HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8694{
8695 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8696
8697 /** @todo The guest has likely hit a contended spinlock. We might want to
8698 * poke a schedule different guest VCPU. */
8699 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8700 if (RT_SUCCESS(rc))
8701 return VINF_EM_RAW_INTERRUPT;
8702
8703 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8704 return rc;
8705}
8706
8707
8708/**
8709 * VM-exit handler for when the TPR value is lowered below the specified
8710 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8711 */
8712HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8713{
8714 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8715 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8716
8717 /*
8718 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8719 * We'll re-evaluate pending interrupts and inject them before the next VM
8720 * entry so we can just continue execution here.
8721 */
8722 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8723 return VINF_SUCCESS;
8724}
8725
8726
8727/**
8728 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8729 * VM-exit.
8730 *
8731 * @retval VINF_SUCCESS when guest execution can continue.
8732 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8733 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8734 * incompatible guest state for VMX execution (real-on-v86 case).
8735 */
8736HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8737{
8738 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8739 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8740
8741 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8742 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8743 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8744
8745 VBOXSTRICTRC rcStrict;
8746 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8747 uint64_t const uExitQual = pVmxTransient->uExitQual;
8748 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8749 switch (uAccessType)
8750 {
8751 /*
8752 * MOV to CRx.
8753 */
8754 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8755 {
8756 /*
8757 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8758 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8759 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8760 * PAE PDPTEs as well.
8761 */
8762 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8763 AssertRCReturn(rc, rc);
8764
8765 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8766#ifndef IN_NEM_DARWIN
8767 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8768#endif
8769 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8770 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8771
8772 /*
8773 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8774 * - When nested paging isn't used.
8775 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8776 * - We are executing in the VM debug loop.
8777 */
8778#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8779# ifndef IN_NEM_DARWIN
8780 Assert( iCrReg != 3
8781 || !VM_IS_VMX_NESTED_PAGING(pVM)
8782 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8783 || pVCpu->hmr0.s.fUsingDebugLoop);
8784# else
8785 Assert( iCrReg != 3
8786 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8787# endif
8788#endif
8789
8790 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8791 Assert( iCrReg != 8
8792 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8793
8794 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8795 AssertMsg( rcStrict == VINF_SUCCESS
8796 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8797
8798#ifndef IN_NEM_DARWIN
8799 /*
8800 * This is a kludge for handling switches back to real mode when we try to use
8801 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8802 * deal with special selector values, so we have to return to ring-3 and run
8803 * there till the selector values are V86 mode compatible.
8804 *
8805 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8806 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8807 * this function.
8808 */
8809 if ( iCrReg == 0
8810 && rcStrict == VINF_SUCCESS
8811 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8812 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8813 && (uOldCr0 & X86_CR0_PE)
8814 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8815 {
8816 /** @todo Check selectors rather than returning all the time. */
8817 Assert(!pVmxTransient->fIsNestedGuest);
8818 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8819 rcStrict = VINF_EM_RESCHEDULE_REM;
8820 }
8821#endif
8822
8823 break;
8824 }
8825
8826 /*
8827 * MOV from CRx.
8828 */
8829 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8830 {
8831 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8832 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8833
8834 /*
8835 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8836 * - When nested paging isn't used.
8837 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8838 * - We are executing in the VM debug loop.
8839 */
8840#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8841# ifndef IN_NEM_DARWIN
8842 Assert( iCrReg != 3
8843 || !VM_IS_VMX_NESTED_PAGING(pVM)
8844 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8845 || pVCpu->hmr0.s.fLeaveDone);
8846# else
8847 Assert( iCrReg != 3
8848 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8849# endif
8850#endif
8851
8852 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8853 Assert( iCrReg != 8
8854 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8855
8856 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8857 break;
8858 }
8859
8860 /*
8861 * CLTS (Clear Task-Switch Flag in CR0).
8862 */
8863 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8864 {
8865 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8866 break;
8867 }
8868
8869 /*
8870 * LMSW (Load Machine-Status Word into CR0).
8871 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8872 */
8873 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8874 {
8875 RTGCPTR GCPtrEffDst;
8876 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8877 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8878 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8879 if (fMemOperand)
8880 {
8881 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8882 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8883 }
8884 else
8885 GCPtrEffDst = NIL_RTGCPTR;
8886 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8887 break;
8888 }
8889
8890 default:
8891 {
8892 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8893 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8894 }
8895 }
8896
8897 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8898 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8899 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8900
8901 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8902 NOREF(pVM);
8903 return rcStrict;
8904}
8905
8906
8907/**
8908 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8909 * VM-exit.
8910 */
8911HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8912{
8913 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8914 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8915
8916 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8917 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8918 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8919 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8920#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8921 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8922 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8923 AssertRCReturn(rc, rc);
8924
8925 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8926 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8927 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8928 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8929 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8930 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8931 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8932 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8933
8934 /*
8935 * Update exit history to see if this exit can be optimized.
8936 */
8937 VBOXSTRICTRC rcStrict;
8938 PCEMEXITREC pExitRec = NULL;
8939 if ( !fGstStepping
8940 && !fDbgStepping)
8941 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8942 !fIOString
8943 ? !fIOWrite
8944 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8945 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8946 : !fIOWrite
8947 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8948 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8949 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8950 if (!pExitRec)
8951 {
8952 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8953 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8954
8955 uint32_t const cbValue = s_aIOSizes[uIOSize];
8956 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8957 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8958 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8959 if (fIOString)
8960 {
8961 /*
8962 * INS/OUTS - I/O String instruction.
8963 *
8964 * Use instruction-information if available, otherwise fall back on
8965 * interpreting the instruction.
8966 */
8967 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8968 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8969 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8970 if (fInsOutsInfo)
8971 {
8972 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8973 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8974 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8975 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8976 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8977 if (fIOWrite)
8978 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8979 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8980 else
8981 {
8982 /*
8983 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8984 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8985 * See Intel Instruction spec. for "INS".
8986 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8987 */
8988 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8989 }
8990 }
8991 else
8992 rcStrict = IEMExecOne(pVCpu);
8993
8994 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8995 fUpdateRipAlready = true;
8996 }
8997 else
8998 {
8999 /*
9000 * IN/OUT - I/O instruction.
9001 */
9002 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
9003 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
9004 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
9005 if (fIOWrite)
9006 {
9007 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
9008 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
9009#ifndef IN_NEM_DARWIN
9010 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
9011 && !pCtx->eflags.Bits.u1TF)
9012 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
9013#endif
9014 }
9015 else
9016 {
9017 uint32_t u32Result = 0;
9018 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
9019 if (IOM_SUCCESS(rcStrict))
9020 {
9021 /* Save result of I/O IN instr. in AL/AX/EAX. */
9022 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
9023 }
9024#ifndef IN_NEM_DARWIN
9025 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
9026 && !pCtx->eflags.Bits.u1TF)
9027 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
9028#endif
9029 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
9030 }
9031 }
9032
9033 if (IOM_SUCCESS(rcStrict))
9034 {
9035 if (!fUpdateRipAlready)
9036 {
9037 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
9038 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
9039 }
9040
9041 /*
9042 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
9043 * while booting Fedora 17 64-bit guest.
9044 *
9045 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
9046 */
9047 if (fIOString)
9048 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
9049
9050 /*
9051 * If any I/O breakpoints are armed, we need to check if one triggered
9052 * and take appropriate action.
9053 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
9054 */
9055#if 1
9056 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
9057#else
9058 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
9059 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
9060 AssertRCReturn(rc, rc);
9061#endif
9062
9063 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
9064 * execution engines about whether hyper BPs and such are pending. */
9065 uint32_t const uDr7 = pCtx->dr[7];
9066 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
9067 && X86_DR7_ANY_RW_IO(uDr7)
9068 && (pCtx->cr4 & X86_CR4_DE))
9069 || DBGFBpIsHwIoArmed(pVM)))
9070 {
9071 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
9072
9073#ifndef IN_NEM_DARWIN
9074 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
9075 VMMRZCallRing3Disable(pVCpu);
9076 HM_DISABLE_PREEMPT(pVCpu);
9077
9078 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
9079
9080 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
9081 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
9082 {
9083 /* Raise #DB. */
9084 if (fIsGuestDbgActive)
9085 ASMSetDR6(pCtx->dr[6]);
9086 if (pCtx->dr[7] != uDr7)
9087 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
9088
9089 vmxHCSetPendingXcptDB(pVCpu);
9090 }
9091 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
9092 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
9093 else if ( rcStrict2 != VINF_SUCCESS
9094 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
9095 rcStrict = rcStrict2;
9096 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
9097
9098 HM_RESTORE_PREEMPT();
9099 VMMRZCallRing3Enable(pVCpu);
9100#else
9101 /** @todo */
9102#endif
9103 }
9104 }
9105
9106#ifdef VBOX_STRICT
9107 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
9108 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
9109 Assert(!fIOWrite);
9110 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
9111 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
9112 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
9113 Assert(fIOWrite);
9114 else
9115 {
9116# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
9117 * statuses, that the VMM device and some others may return. See
9118 * IOM_SUCCESS() for guidance. */
9119 AssertMsg( RT_FAILURE(rcStrict)
9120 || rcStrict == VINF_SUCCESS
9121 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
9122 || rcStrict == VINF_EM_DBG_BREAKPOINT
9123 || rcStrict == VINF_EM_RAW_GUEST_TRAP
9124 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9125# endif
9126 }
9127#endif
9128 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
9129 }
9130 else
9131 {
9132 /*
9133 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
9134 */
9135 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
9136 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
9137 AssertRCReturn(rc2, rc2);
9138 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
9139 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
9140 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
9141 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9142 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
9143 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
9144
9145 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9146 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9147
9148 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9149 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9150 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9151 }
9152 return rcStrict;
9153}
9154
9155
9156/**
9157 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
9158 * VM-exit.
9159 */
9160HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9161{
9162 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9163
9164 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
9165 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9166 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
9167 {
9168 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
9169 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9170 {
9171 uint32_t uErrCode;
9172 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
9173 {
9174 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9175 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
9176 }
9177 else
9178 uErrCode = 0;
9179
9180 RTGCUINTPTR GCPtrFaultAddress;
9181 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
9182 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
9183 else
9184 GCPtrFaultAddress = 0;
9185
9186 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9187
9188 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
9189 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
9190
9191 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
9192 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
9193 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9194 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9195 }
9196 }
9197
9198 /* Fall back to the interpreter to emulate the task-switch. */
9199 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9200 return VERR_EM_INTERPRETER;
9201}
9202
9203
9204/**
9205 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9206 */
9207HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9208{
9209 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9210
9211 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9212 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9213 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9214 AssertRC(rc);
9215 return VINF_EM_DBG_STEPPED;
9216}
9217
9218
9219/**
9220 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9221 */
9222HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9223{
9224 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9225 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9226
9227 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9228 | HMVMX_READ_EXIT_INSTR_LEN
9229 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9230 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9231 | HMVMX_READ_IDT_VECTORING_INFO
9232 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9233
9234 /*
9235 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9236 */
9237 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9238 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9239 {
9240 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9241 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9242 {
9243 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9244 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9245 }
9246 }
9247 else
9248 {
9249 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9250 return rcStrict;
9251 }
9252
9253 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9254 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9255 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9256 AssertRCReturn(rc, rc);
9257
9258 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9259 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9260 switch (uAccessType)
9261 {
9262#ifndef IN_NEM_DARWIN
9263 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9264 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9265 {
9266 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9267 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9268 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9269
9270 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9271 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9272 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9273 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9274 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9275
9276 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9277 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9278 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9279 if ( rcStrict == VINF_SUCCESS
9280 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9281 || rcStrict == VERR_PAGE_NOT_PRESENT)
9282 {
9283 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9284 | HM_CHANGED_GUEST_APIC_TPR);
9285 rcStrict = VINF_SUCCESS;
9286 }
9287 break;
9288 }
9289#else
9290 /** @todo */
9291#endif
9292
9293 default:
9294 {
9295 Log4Func(("uAccessType=%#x\n", uAccessType));
9296 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9297 break;
9298 }
9299 }
9300
9301 if (rcStrict != VINF_SUCCESS)
9302 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9303 return rcStrict;
9304}
9305
9306
9307/**
9308 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9309 * VM-exit.
9310 */
9311HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9312{
9313 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9314 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9315
9316 /*
9317 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9318 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9319 * must emulate the MOV DRx access.
9320 */
9321 if (!pVmxTransient->fIsNestedGuest)
9322 {
9323 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9324 if ( pVmxTransient->fWasGuestDebugStateActive
9325#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9326 && !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx
9327#endif
9328 )
9329 {
9330 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9331 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9332 }
9333
9334 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9335 && !pVmxTransient->fWasHyperDebugStateActive)
9336 {
9337 Assert(!DBGFIsStepping(pVCpu));
9338 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9339
9340 /* Whether we disable intercepting MOV DRx instructions and resume
9341 the current one, or emulate it and keep intercepting them is
9342 configurable. Though it usually comes down to whether there are
9343 any new DR6 & DR7 bits (RTM) we want to hide from the guest. */
9344#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9345 bool const fResumeInstruction = !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx;
9346#else
9347 bool const fResumeInstruction = true;
9348#endif
9349 if (fResumeInstruction)
9350 {
9351 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9352 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9353 AssertRC(rc);
9354 }
9355
9356#ifndef IN_NEM_DARWIN
9357 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9358 VMMRZCallRing3Disable(pVCpu);
9359 HM_DISABLE_PREEMPT(pVCpu);
9360
9361 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9362 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9363 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9364
9365 HM_RESTORE_PREEMPT();
9366 VMMRZCallRing3Enable(pVCpu);
9367#else
9368 CPUMR3NemActivateGuestDebugState(pVCpu);
9369 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9370 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9371#endif
9372
9373 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9374 if (fResumeInstruction)
9375 {
9376#ifdef VBOX_WITH_STATISTICS
9377 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9378 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9379 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9380 else
9381 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9382#endif
9383 return VINF_SUCCESS;
9384 }
9385 }
9386 }
9387
9388 /*
9389 * Import state. We must have DR7 loaded here as it's always consulted,
9390 * both for reading and writing. The other debug registers are never
9391 * exported as such.
9392 */
9393 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9394 int rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
9395 | CPUMCTX_EXTRN_GPRS_MASK
9396 | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9397 AssertRCReturn(rc, rc);
9398
9399 uint8_t const iGReg = VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual);
9400 uint8_t const iDrReg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
9401 Log4Func(("cs:rip=%#04x:%08RX64 r%d %s dr%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iGReg,
9402 VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE ? "->" : "<-", iDrReg));
9403
9404 VBOXSTRICTRC rcStrict;
9405 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9406 {
9407 /*
9408 * Write DRx register.
9409 */
9410 rcStrict = IEMExecDecodedMovDRxWrite(pVCpu, pVmxTransient->cbExitInstr, iDrReg, iGReg);
9411 AssertMsg( rcStrict == VINF_SUCCESS
9412 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9413
9414 if (rcStrict == VINF_SUCCESS)
9415 {
9416 /** @todo r=bird: Not sure why we always flag DR7 as modified here, but I've
9417 * kept it for now to avoid breaking something non-obvious. */
9418 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9419 | HM_CHANGED_GUEST_DR7);
9420 /* Update the DR6 register if guest debug state is active, otherwise we'll
9421 trash it when calling CPUMR0DebugStateMaybeSaveGuestAndRestoreHost. */
9422 if (iDrReg == 6 && CPUMIsGuestDebugStateActive(pVCpu))
9423 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
9424 Log4Func(("r%d=%#RX64 => dr%d=%#RX64\n", iGReg, pVCpu->cpum.GstCtx.aGRegs[iGReg].u,
9425 iDrReg, pVCpu->cpum.GstCtx.dr[iDrReg]));
9426 }
9427 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9428 {
9429 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9430 rcStrict = VINF_SUCCESS;
9431 }
9432
9433 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9434 }
9435 else
9436 {
9437 /*
9438 * Read DRx register into a general purpose register.
9439 */
9440 rcStrict = IEMExecDecodedMovDRxRead(pVCpu, pVmxTransient->cbExitInstr, iGReg, iDrReg);
9441 AssertMsg( rcStrict == VINF_SUCCESS
9442 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9443
9444 if (rcStrict == VINF_SUCCESS)
9445 {
9446 if (iGReg == X86_GREG_xSP)
9447 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9448 | HM_CHANGED_GUEST_RSP);
9449 else
9450 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9451 }
9452 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9453 {
9454 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9455 rcStrict = VINF_SUCCESS;
9456 }
9457
9458 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9459 }
9460
9461 return rcStrict;
9462}
9463
9464
9465/**
9466 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9467 * Conditional VM-exit.
9468 */
9469HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9470{
9471 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9472
9473#ifndef IN_NEM_DARWIN
9474 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9475
9476 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9477 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9478 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9479 | HMVMX_READ_IDT_VECTORING_INFO
9480 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9481 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9482
9483 /*
9484 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9485 */
9486 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9487 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9488 {
9489 /*
9490 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9491 * instruction emulation to inject the original event. Otherwise, injecting the original event
9492 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9493 */
9494 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9495 { /* likely */ }
9496 else
9497 {
9498 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9499# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9500 /** @todo NSTVMX: Think about how this should be handled. */
9501 if (pVmxTransient->fIsNestedGuest)
9502 return VERR_VMX_IPE_3;
9503# endif
9504 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9505 }
9506 }
9507 else
9508 {
9509 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9510 return rcStrict;
9511 }
9512
9513 /*
9514 * Get sufficient state and update the exit history entry.
9515 */
9516 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9517 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9518 AssertRCReturn(rc, rc);
9519
9520 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9521 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9522 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9523 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9524 if (!pExitRec)
9525 {
9526 /*
9527 * If we succeed, resume guest execution.
9528 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9529 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9530 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9531 * weird case. See @bugref{6043}.
9532 */
9533 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9534/** @todo bird: We can probably just go straight to IOM here and assume that
9535 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9536 * well. However, we need to address that aliasing workarounds that
9537 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9538 *
9539 * Might also be interesting to see if we can get this done more or
9540 * less locklessly inside IOM. Need to consider the lookup table
9541 * updating and use a bit more carefully first (or do all updates via
9542 * rendezvous) */
9543 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, &pVCpu->cpum.GstCtx, GCPhys, UINT32_MAX);
9544 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
9545 if ( rcStrict == VINF_SUCCESS
9546 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9547 || rcStrict == VERR_PAGE_NOT_PRESENT)
9548 {
9549 /* Successfully handled MMIO operation. */
9550 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9551 | HM_CHANGED_GUEST_APIC_TPR);
9552 rcStrict = VINF_SUCCESS;
9553 }
9554 }
9555 else
9556 {
9557 /*
9558 * Frequent exit or something needing probing. Call EMHistoryExec.
9559 */
9560 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL, IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9561 AssertRCReturn(rc2, rc2);
9562 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9563 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9564
9565 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9566 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9567
9568 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9569 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9570 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9571 }
9572 return rcStrict;
9573#else
9574 AssertFailed();
9575 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9576#endif
9577}
9578
9579
9580/**
9581 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9582 * VM-exit.
9583 */
9584HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9585{
9586 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9587#ifndef IN_NEM_DARWIN
9588 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9589
9590 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9591 | HMVMX_READ_EXIT_INSTR_LEN
9592 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9593 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9594 | HMVMX_READ_IDT_VECTORING_INFO
9595 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9596 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9597
9598 /*
9599 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9600 */
9601 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9602 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9603 {
9604 /*
9605 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9606 * we shall resolve the nested #PF and re-inject the original event.
9607 */
9608 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9609 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9610 }
9611 else
9612 {
9613 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9614 return rcStrict;
9615 }
9616
9617 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9618 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9619 AssertRCReturn(rc, rc);
9620
9621 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9622 uint64_t const uExitQual = pVmxTransient->uExitQual;
9623 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9624
9625 RTGCUINT uErrorCode = 0;
9626 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9627 uErrorCode |= X86_TRAP_PF_ID;
9628 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9629 uErrorCode |= X86_TRAP_PF_RW;
9630 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9631 uErrorCode |= X86_TRAP_PF_P;
9632
9633 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9634 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9635
9636 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9637
9638 /*
9639 * Handle the pagefault trap for the nested shadow table.
9640 */
9641 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9642 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, pCtx, GCPhys);
9643 TRPMResetTrap(pVCpu);
9644
9645 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9646 if ( rcStrict == VINF_SUCCESS
9647 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9648 || rcStrict == VERR_PAGE_NOT_PRESENT)
9649 {
9650 /* Successfully synced our nested page tables. */
9651 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9652 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9653 return VINF_SUCCESS;
9654 }
9655 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9656 return rcStrict;
9657
9658#else /* IN_NEM_DARWIN */
9659 PVM pVM = pVCpu->CTX_SUFF(pVM);
9660 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9661 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9662 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9663 vmxHCImportGuestRip(pVCpu);
9664 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9665
9666 /*
9667 * Ask PGM for information about the given GCPhys. We need to check if we're
9668 * out of sync first.
9669 */
9670 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE),
9671 false,
9672 false };
9673 PGMPHYSNEMPAGEINFO Info;
9674 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9675 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9676 if (RT_SUCCESS(rc))
9677 {
9678 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9679 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9680 {
9681 if (State.fCanResume)
9682 {
9683 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9684 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9685 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9686 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9687 State.fDidSomething ? "" : " no-change"));
9688 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9689 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9690 return VINF_SUCCESS;
9691 }
9692 }
9693
9694 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9695 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9696 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9697 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9698 State.fDidSomething ? "" : " no-change"));
9699 }
9700 else
9701 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9702 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9703 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9704
9705 /*
9706 * Emulate the memory access, either access handler or special memory.
9707 */
9708 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9709 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9710 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9711 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9712 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9713
9714 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9715 AssertRCReturn(rc, rc);
9716
9717 VBOXSTRICTRC rcStrict;
9718 if (!pExitRec)
9719 rcStrict = IEMExecOne(pVCpu);
9720 else
9721 {
9722 /* Frequent access or probing. */
9723 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9724 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9725 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9726 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9727 }
9728
9729 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9730
9731 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9732 return rcStrict;
9733#endif /* IN_NEM_DARWIN */
9734}
9735
9736#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9737
9738/**
9739 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9740 */
9741HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9742{
9743 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9744
9745 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9746 | HMVMX_READ_EXIT_INSTR_INFO
9747 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9748 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9749 | CPUMCTX_EXTRN_SREG_MASK
9750 | CPUMCTX_EXTRN_HWVIRT
9751 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9752 AssertRCReturn(rc, rc);
9753
9754 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9755
9756 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9757 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9758
9759 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9760 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9761 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9762 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9763 {
9764 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9765 rcStrict = VINF_SUCCESS;
9766 }
9767 return rcStrict;
9768}
9769
9770
9771/**
9772 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9773 */
9774HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9775{
9776 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9777
9778 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9779 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9780 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9781 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9782 AssertRCReturn(rc, rc);
9783
9784 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9785
9786 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9787 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9788 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9789 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9790 {
9791 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9792 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9793 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9794 }
9795 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9796 return rcStrict;
9797}
9798
9799
9800/**
9801 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9802 */
9803HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9804{
9805 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9806
9807 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9808 | HMVMX_READ_EXIT_INSTR_INFO
9809 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9810 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9811 | CPUMCTX_EXTRN_SREG_MASK
9812 | CPUMCTX_EXTRN_HWVIRT
9813 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9814 AssertRCReturn(rc, rc);
9815
9816 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9817
9818 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9819 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9820
9821 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9822 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9823 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9824 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9825 {
9826 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9827 rcStrict = VINF_SUCCESS;
9828 }
9829 return rcStrict;
9830}
9831
9832
9833/**
9834 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9835 */
9836HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9837{
9838 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9839
9840 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9841 | HMVMX_READ_EXIT_INSTR_INFO
9842 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9843 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9844 | CPUMCTX_EXTRN_SREG_MASK
9845 | CPUMCTX_EXTRN_HWVIRT
9846 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9847 AssertRCReturn(rc, rc);
9848
9849 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9850
9851 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9852 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9853
9854 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9855 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9856 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9857 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9858 {
9859 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9860 rcStrict = VINF_SUCCESS;
9861 }
9862 return rcStrict;
9863}
9864
9865
9866/**
9867 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9868 */
9869HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9870{
9871 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9872
9873 /*
9874 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9875 * thus might not need to import the shadow VMCS state, it's safer just in case
9876 * code elsewhere dares look at unsynced VMCS fields.
9877 */
9878 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9879 | HMVMX_READ_EXIT_INSTR_INFO
9880 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9881 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9882 | CPUMCTX_EXTRN_SREG_MASK
9883 | CPUMCTX_EXTRN_HWVIRT
9884 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9885 AssertRCReturn(rc, rc);
9886
9887 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9888
9889 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9890 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9891 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9892
9893 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9894 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9895 {
9896 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9897
9898# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9899 /* Try for exit optimization. This is on the following instruction
9900 because it would be a waste of time to have to reinterpret the
9901 already decoded vmwrite instruction. */
9902 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9903 if (pExitRec)
9904 {
9905 /* Frequent access or probing. */
9906 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9907 AssertRCReturn(rc, rc);
9908
9909 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9910 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9911 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9912 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9913 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9914 }
9915# endif
9916 }
9917 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9918 {
9919 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9920 rcStrict = VINF_SUCCESS;
9921 }
9922 return rcStrict;
9923}
9924
9925
9926/**
9927 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9928 */
9929HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9930{
9931 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9932
9933 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9934 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9935 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9936 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9937 AssertRCReturn(rc, rc);
9938
9939 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9940
9941 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9942 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9943 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9944 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9945 {
9946 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9947 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9948 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9949 }
9950 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9951 return rcStrict;
9952}
9953
9954
9955/**
9956 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9957 */
9958HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9959{
9960 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9961
9962 /*
9963 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9964 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9965 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9966 */
9967 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9968 | HMVMX_READ_EXIT_INSTR_INFO
9969 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9970 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9971 | CPUMCTX_EXTRN_SREG_MASK
9972 | CPUMCTX_EXTRN_HWVIRT
9973 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9974 AssertRCReturn(rc, rc);
9975
9976 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9977
9978 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9979 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9980 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9981
9982 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9983 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9984 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9985 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9986 {
9987 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9988 rcStrict = VINF_SUCCESS;
9989 }
9990 return rcStrict;
9991}
9992
9993
9994/**
9995 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9996 */
9997HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9998{
9999 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10000
10001 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10002 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
10003 | CPUMCTX_EXTRN_HWVIRT
10004 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10005 AssertRCReturn(rc, rc);
10006
10007 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10008
10009 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
10010 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10011 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
10012 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10013 {
10014 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10015 rcStrict = VINF_SUCCESS;
10016 }
10017 return rcStrict;
10018}
10019
10020
10021/**
10022 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
10023 */
10024HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10025{
10026 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10027
10028 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10029 | HMVMX_READ_EXIT_INSTR_INFO
10030 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10031 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
10032 | CPUMCTX_EXTRN_SREG_MASK
10033 | CPUMCTX_EXTRN_HWVIRT
10034 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10035 AssertRCReturn(rc, rc);
10036
10037 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10038
10039 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10040 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10041
10042 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
10043 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10044 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
10045 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10046 {
10047 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10048 rcStrict = VINF_SUCCESS;
10049 }
10050 return rcStrict;
10051}
10052
10053
10054/**
10055 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
10056 */
10057HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10058{
10059 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10060
10061 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10062 | HMVMX_READ_EXIT_INSTR_INFO
10063 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10064 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
10065 | CPUMCTX_EXTRN_SREG_MASK
10066 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10067 AssertRCReturn(rc, rc);
10068
10069 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10070
10071 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10072 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10073
10074 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
10075 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10076 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10077 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10078 {
10079 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10080 rcStrict = VINF_SUCCESS;
10081 }
10082 return rcStrict;
10083}
10084
10085
10086# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10087/**
10088 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
10089 */
10090HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10091{
10092 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10093
10094 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10095 | HMVMX_READ_EXIT_INSTR_INFO
10096 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10097 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
10098 | CPUMCTX_EXTRN_SREG_MASK
10099 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10100 AssertRCReturn(rc, rc);
10101
10102 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10103
10104 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10105 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10106
10107 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
10108 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10109 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10110 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10111 {
10112 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10113 rcStrict = VINF_SUCCESS;
10114 }
10115 return rcStrict;
10116}
10117# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10118#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10119/** @} */
10120
10121
10122#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10123/** @name Nested-guest VM-exit handlers.
10124 * @{
10125 */
10126/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10127/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10128/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10129
10130/**
10131 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
10132 * Conditional VM-exit.
10133 */
10134HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10135{
10136 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10137
10138 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
10139
10140 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
10141 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
10142 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
10143
10144 switch (uExitIntType)
10145 {
10146# ifndef IN_NEM_DARWIN
10147 /*
10148 * Physical NMIs:
10149 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
10150 */
10151 case VMX_EXIT_INT_INFO_TYPE_NMI:
10152 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
10153# endif
10154
10155 /*
10156 * Hardware exceptions,
10157 * Software exceptions,
10158 * Privileged software exceptions:
10159 * Figure out if the exception must be delivered to the guest or the nested-guest.
10160 */
10161 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10162 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10163 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10164 {
10165 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10166 | HMVMX_READ_EXIT_INSTR_LEN
10167 | HMVMX_READ_IDT_VECTORING_INFO
10168 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10169
10170 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10171 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
10172 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, uVector, pVmxTransient->uExitIntErrorCode))
10173 {
10174 /*
10175 * Split-lock triggered #ACs should not be injected into the nested-guest
10176 * since we don't support split-lock detection for nested-guests yet.
10177 */
10178 if ( uVector == X86_XCPT_AC
10179 && uExitIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
10180 {
10181 int const rc = vmxHCImportGuestState<HMVMX_CPUMCTX_XPCT_AC>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10182 AssertRCReturn(rc, rc);
10183 if (vmxHCIsSplitLockAcXcpt(pVCpu))
10184 {
10185 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10186 if ( rcStrict == VINF_SUCCESS
10187 && !VCPU_2_VMXSTATE(pVCpu).Event.fPending)
10188 return vmxHCHandleSplitLockAcXcpt(pVCpu, pVmxTransient);
10189 if (rcStrict == VINF_HM_DOUBLE_FAULT)
10190 {
10191 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
10192 rcStrict = VINF_SUCCESS;
10193 }
10194 return rcStrict;
10195 }
10196 }
10197
10198 /* Exit qualification is required for debug and page-fault exceptions. */
10199 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10200
10201 /*
10202 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
10203 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
10204 * length. However, if delivery of a software interrupt, software exception or privileged
10205 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
10206 */
10207 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10208 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
10209 pVmxTransient->uExitIntErrorCode,
10210 pVmxTransient->uIdtVectoringInfo,
10211 pVmxTransient->uIdtVectoringErrorCode);
10212#ifdef DEBUG_ramshankar
10213 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10214 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
10215 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
10216 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
10217 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
10218 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
10219#endif
10220 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
10221 }
10222
10223 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
10224 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10225 return vmxHCExitXcpt(pVCpu, pVmxTransient);
10226 }
10227
10228 /*
10229 * Software interrupts:
10230 * VM-exits cannot be caused by software interrupts.
10231 *
10232 * External interrupts:
10233 * This should only happen when "acknowledge external interrupts on VM-exit"
10234 * control is set. However, we never set this when executing a guest or
10235 * nested-guest. For nested-guests it is emulated while injecting interrupts into
10236 * the guest.
10237 */
10238 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10239 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
10240 default:
10241 {
10242 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
10243 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10244 }
10245 }
10246}
10247
10248
10249/**
10250 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
10251 * Unconditional VM-exit.
10252 */
10253HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10254{
10255 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10256 return IEMExecVmxVmexitTripleFault(pVCpu);
10257}
10258
10259
10260/**
10261 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10262 */
10263HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10264{
10265 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10266
10267 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10268 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10269 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10270}
10271
10272
10273/**
10274 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10275 */
10276HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10277{
10278 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10279
10280 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10281 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10282 return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
10283}
10284
10285
10286/**
10287 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10288 * Unconditional VM-exit.
10289 */
10290HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10291{
10292 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10293
10294 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10295 | HMVMX_READ_EXIT_INSTR_LEN
10296 | HMVMX_READ_IDT_VECTORING_INFO
10297 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10298
10299 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10300 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10301 pVmxTransient->uIdtVectoringErrorCode);
10302 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10303}
10304
10305
10306/**
10307 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10308 */
10309HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10310{
10311 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10312
10313 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10314 {
10315 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10316 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10317 }
10318 return vmxHCExitHlt(pVCpu, pVmxTransient);
10319}
10320
10321
10322/**
10323 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10324 */
10325HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10326{
10327 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10328
10329 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10330 {
10331 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10332 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10333 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10334 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10335 }
10336 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10337}
10338
10339
10340/**
10341 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10342 */
10343HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10344{
10345 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10346
10347 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10348 {
10349 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10350 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10351 }
10352 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10353}
10354
10355
10356/**
10357 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10358 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10359 */
10360HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10361{
10362 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10363
10364 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10365 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10366
10367 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10368
10369 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10370 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10371 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10372
10373 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10374 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10375 u64VmcsField &= UINT64_C(0xffffffff);
10376
10377 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10378 {
10379 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10380 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10381 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10382 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10383 }
10384
10385 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10386 return vmxHCExitVmread(pVCpu, pVmxTransient);
10387 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10388}
10389
10390
10391/**
10392 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10393 */
10394HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10395{
10396 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10397
10398 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10399 {
10400 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10401 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10402 }
10403
10404 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10405}
10406
10407
10408/**
10409 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10410 * Conditional VM-exit.
10411 */
10412HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10413{
10414 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10415
10416 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10417 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10418
10419 VBOXSTRICTRC rcStrict;
10420 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10421 switch (uAccessType)
10422 {
10423 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10424 {
10425 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10426 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10427 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10428 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10429
10430 bool fIntercept;
10431 switch (iCrReg)
10432 {
10433 case 0:
10434 case 4:
10435 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10436 break;
10437
10438 case 3:
10439 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10440 break;
10441
10442 case 8:
10443 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10444 break;
10445
10446 default:
10447 fIntercept = false;
10448 break;
10449 }
10450 if (fIntercept)
10451 {
10452 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10453 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10454 }
10455 else
10456 {
10457 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10458 AssertRCReturn(rc, rc);
10459 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10460 }
10461 break;
10462 }
10463
10464 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10465 {
10466 /*
10467 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10468 * CR2 reads do not cause a VM-exit.
10469 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10470 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10471 */
10472 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10473 if ( iCrReg == 3
10474 || iCrReg == 8)
10475 {
10476 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10477 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10478 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10479 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10480 {
10481 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10482 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10483 }
10484 else
10485 {
10486 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10487 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10488 }
10489 }
10490 else
10491 {
10492 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10493 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10494 }
10495 break;
10496 }
10497
10498 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10499 {
10500 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10501 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10502 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10503 if ( (uGstHostMask & X86_CR0_TS)
10504 && (uReadShadow & X86_CR0_TS))
10505 {
10506 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10507 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10508 }
10509 else
10510 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10511 break;
10512 }
10513
10514 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10515 {
10516 RTGCPTR GCPtrEffDst;
10517 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10518 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10519 if (fMemOperand)
10520 {
10521 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10522 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10523 }
10524 else
10525 GCPtrEffDst = NIL_RTGCPTR;
10526
10527 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10528 {
10529 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10530 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10531 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10532 }
10533 else
10534 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10535 break;
10536 }
10537
10538 default:
10539 {
10540 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10541 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10542 }
10543 }
10544
10545 if (rcStrict == VINF_IEM_RAISED_XCPT)
10546 {
10547 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10548 rcStrict = VINF_SUCCESS;
10549 }
10550 return rcStrict;
10551}
10552
10553
10554/**
10555 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10556 * Conditional VM-exit.
10557 */
10558HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10559{
10560 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10561
10562 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10563 {
10564 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10565 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10566 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10567 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10568 }
10569 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10570}
10571
10572
10573/**
10574 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10575 * Conditional VM-exit.
10576 */
10577HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10578{
10579 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10580
10581 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10582
10583 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10584 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10585 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10586
10587 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10588 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10589 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10590 {
10591 /*
10592 * IN/OUT instruction:
10593 * - Provides VM-exit instruction length.
10594 *
10595 * INS/OUTS instruction:
10596 * - Provides VM-exit instruction length.
10597 * - Provides Guest-linear address.
10598 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10599 */
10600 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10601 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10602
10603 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10604 pVmxTransient->ExitInstrInfo.u = 0;
10605 pVmxTransient->uGuestLinearAddr = 0;
10606
10607 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10608 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10609 if (fIOString)
10610 {
10611 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10612 if (fVmxInsOutsInfo)
10613 {
10614 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10615 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10616 }
10617 }
10618
10619 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10620 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10621 }
10622 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10623}
10624
10625
10626/**
10627 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10628 */
10629HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10630{
10631 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10632
10633 uint32_t fMsrpm;
10634 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10635 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10636 else
10637 fMsrpm = VMXMSRPM_EXIT_RD;
10638
10639 if (fMsrpm & VMXMSRPM_EXIT_RD)
10640 {
10641 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10642 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10643 }
10644 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10645}
10646
10647
10648/**
10649 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10650 */
10651HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10652{
10653 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10654
10655 uint32_t fMsrpm;
10656 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10657 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10658 else
10659 fMsrpm = VMXMSRPM_EXIT_WR;
10660
10661 if (fMsrpm & VMXMSRPM_EXIT_WR)
10662 {
10663 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10664 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10665 }
10666 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10667}
10668
10669
10670/**
10671 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10672 */
10673HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10674{
10675 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10676
10677 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10678 {
10679 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10680 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10681 }
10682 return vmxHCExitMwait(pVCpu, pVmxTransient);
10683}
10684
10685
10686/**
10687 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10688 * VM-exit.
10689 */
10690HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10691{
10692 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10693
10694 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10695 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10696 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10697 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10698}
10699
10700
10701/**
10702 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10703 */
10704HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10705{
10706 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10707
10708 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10709 {
10710 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10711 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10712 }
10713 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10714}
10715
10716
10717/**
10718 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10719 */
10720HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10721{
10722 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10723
10724 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10725 * PAUSE when executing a nested-guest? If it does not, we would not need
10726 * to check for the intercepts here. Just call VM-exit... */
10727
10728 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10729 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10730 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10731 {
10732 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10733 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10734 }
10735 return vmxHCExitPause(pVCpu, pVmxTransient);
10736}
10737
10738
10739/**
10740 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10741 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10742 */
10743HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10744{
10745 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10746
10747 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10748 {
10749 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10750 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10751 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10752 }
10753 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10754}
10755
10756
10757/**
10758 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10759 * VM-exit.
10760 */
10761HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10762{
10763 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10764
10765 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10766 | HMVMX_READ_EXIT_INSTR_LEN
10767 | HMVMX_READ_IDT_VECTORING_INFO
10768 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10769
10770 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10771
10772 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10773 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10774
10775 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10776 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10777 pVmxTransient->uIdtVectoringErrorCode);
10778 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10779}
10780
10781
10782/**
10783 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10784 * Conditional VM-exit.
10785 */
10786HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10787{
10788 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10789
10790 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10791 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10792 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10793}
10794
10795
10796/**
10797 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10798 * Conditional VM-exit.
10799 */
10800HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10801{
10802 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10803
10804 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10805 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10806 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10807}
10808
10809
10810/**
10811 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10812 */
10813HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10814{
10815 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10816
10817 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10818 {
10819 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10820 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10821 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10822 }
10823 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10824}
10825
10826
10827/**
10828 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10829 */
10830HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10831{
10832 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10833
10834 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10835 {
10836 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10837 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10838 }
10839 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10840}
10841
10842
10843/**
10844 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10845 */
10846HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10847{
10848 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10849
10850 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10851 {
10852 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10853 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10854 | HMVMX_READ_EXIT_INSTR_INFO
10855 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10856 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10857 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10858 }
10859 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10860}
10861
10862
10863/**
10864 * Nested-guest VM-exit handler for invalid-guest state
10865 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10866 */
10867HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10868{
10869 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10870
10871 /*
10872 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10873 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10874 * Handle it like it's in an invalid guest state of the outer guest.
10875 *
10876 * When the fast path is implemented, this should be changed to cause the corresponding
10877 * nested-guest VM-exit.
10878 */
10879 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10880}
10881
10882
10883/**
10884 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10885 * and only provide the instruction length.
10886 *
10887 * Unconditional VM-exit.
10888 */
10889HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10890{
10891 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10892
10893#ifdef VBOX_STRICT
10894 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10895 switch (pVmxTransient->uExitReason)
10896 {
10897 case VMX_EXIT_ENCLS:
10898 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10899 break;
10900
10901 case VMX_EXIT_VMFUNC:
10902 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10903 break;
10904 }
10905#endif
10906
10907 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10908 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10909}
10910
10911
10912/**
10913 * Nested-guest VM-exit handler for instructions that provide instruction length as
10914 * well as more information.
10915 *
10916 * Unconditional VM-exit.
10917 */
10918HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10919{
10920 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10921
10922# ifdef VBOX_STRICT
10923 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10924 switch (pVmxTransient->uExitReason)
10925 {
10926 case VMX_EXIT_GDTR_IDTR_ACCESS:
10927 case VMX_EXIT_LDTR_TR_ACCESS:
10928 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10929 break;
10930
10931 case VMX_EXIT_RDRAND:
10932 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10933 break;
10934
10935 case VMX_EXIT_RDSEED:
10936 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10937 break;
10938
10939 case VMX_EXIT_XSAVES:
10940 case VMX_EXIT_XRSTORS:
10941 /** @todo NSTVMX: Verify XSS-bitmap. */
10942 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10943 break;
10944
10945 case VMX_EXIT_UMWAIT:
10946 case VMX_EXIT_TPAUSE:
10947 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10948 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10949 break;
10950
10951 case VMX_EXIT_LOADIWKEY:
10952 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10953 break;
10954 }
10955# endif
10956
10957 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10958 | HMVMX_READ_EXIT_INSTR_LEN
10959 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10960 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10961 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10962}
10963
10964# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10965
10966/**
10967 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10968 * Conditional VM-exit.
10969 */
10970HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10971{
10972 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10973 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10974
10975 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10976 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10977 {
10978 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10979 | HMVMX_READ_EXIT_INSTR_LEN
10980 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10981 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10982 | HMVMX_READ_IDT_VECTORING_INFO
10983 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10984 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10985 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10986 AssertRCReturn(rc, rc);
10987
10988 /*
10989 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10990 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10991 * it's its problem to deal with that issue and we'll clear the recovered event.
10992 */
10993 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10994 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10995 { /*likely*/ }
10996 else
10997 {
10998 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10999 return rcStrict;
11000 }
11001 uint32_t const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
11002
11003 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
11004 uint64_t const uExitQual = pVmxTransient->uExitQual;
11005
11006 RTGCPTR GCPtrNestedFault;
11007 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
11008 if (fIsLinearAddrValid)
11009 {
11010 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
11011 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
11012 }
11013 else
11014 GCPtrNestedFault = 0;
11015
11016 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
11017 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
11018 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
11019 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
11020 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
11021
11022 PGMPTWALK Walk;
11023 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11024 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, pCtx, GCPhysNestedFault,
11025 fIsLinearAddrValid, GCPtrNestedFault, &Walk);
11026 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
11027 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
11028 if (RT_SUCCESS(rcStrict))
11029 {
11030 if (rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE)
11031 {
11032 Assert(!fClearEventOnForward);
11033 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM));
11034 rcStrict = VINF_EM_RESCHEDULE_REM;
11035 }
11036 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
11037 return rcStrict;
11038 }
11039
11040 if (fClearEventOnForward)
11041 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
11042
11043 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
11044 pVmxTransient->uIdtVectoringErrorCode);
11045 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
11046 {
11047 VMXVEXITINFO const ExitInfo
11048 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
11049 pVmxTransient->uExitQual,
11050 pVmxTransient->cbExitInstr,
11051 pVmxTransient->uGuestLinearAddr,
11052 pVmxTransient->uGuestPhysicalAddr);
11053 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
11054 }
11055
11056 AssertMsgReturn(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG,
11057 ("uErr=%#RX32 uExitQual=%#RX64 GCPhysNestedFault=%#RGp GCPtrNestedFault=%#RGv\n",
11058 (uint32_t)uErr, uExitQual, GCPhysNestedFault, GCPtrNestedFault),
11059 rcStrict);
11060 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
11061 }
11062
11063 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
11064}
11065
11066
11067/**
11068 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
11069 * Conditional VM-exit.
11070 */
11071HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11072{
11073 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11074 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
11075
11076 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11077 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
11078 {
11079 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
11080 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
11081 AssertRCReturn(rc, rc);
11082
11083 PGMPTWALK Walk;
11084 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11085 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
11086 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, pCtx,
11087 GCPhysNestedFault, false /* fIsLinearAddrValid */,
11088 0 /* GCPtrNestedFault */, &Walk);
11089 if (RT_SUCCESS(rcStrict))
11090 {
11091 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
11092 return rcStrict;
11093 }
11094
11095 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
11096 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
11097 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
11098
11099 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
11100 pVmxTransient->uIdtVectoringErrorCode);
11101 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
11102 }
11103
11104 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
11105}
11106
11107# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
11108
11109/** @} */
11110#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
11111
11112
11113/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
11114 * probes.
11115 *
11116 * The following few functions and associated structure contains the bloat
11117 * necessary for providing detailed debug events and dtrace probes as well as
11118 * reliable host side single stepping. This works on the principle of
11119 * "subclassing" the normal execution loop and workers. We replace the loop
11120 * method completely and override selected helpers to add necessary adjustments
11121 * to their core operation.
11122 *
11123 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
11124 * any performance for debug and analysis features.
11125 *
11126 * @{
11127 */
11128
11129/**
11130 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
11131 * the debug run loop.
11132 */
11133typedef struct VMXRUNDBGSTATE
11134{
11135 /** The RIP we started executing at. This is for detecting that we stepped. */
11136 uint64_t uRipStart;
11137 /** The CS we started executing with. */
11138 uint16_t uCsStart;
11139
11140 /** Whether we've actually modified the 1st execution control field. */
11141 bool fModifiedProcCtls : 1;
11142 /** Whether we've actually modified the 2nd execution control field. */
11143 bool fModifiedProcCtls2 : 1;
11144 /** Whether we've actually modified the exception bitmap. */
11145 bool fModifiedXcptBitmap : 1;
11146
11147 /** We desire the modified the CR0 mask to be cleared. */
11148 bool fClearCr0Mask : 1;
11149 /** We desire the modified the CR4 mask to be cleared. */
11150 bool fClearCr4Mask : 1;
11151 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
11152 uint32_t fCpe1Extra;
11153 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
11154 uint32_t fCpe1Unwanted;
11155 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
11156 uint32_t fCpe2Extra;
11157 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
11158 uint32_t bmXcptExtra;
11159 /** The sequence number of the Dtrace provider settings the state was
11160 * configured against. */
11161 uint32_t uDtraceSettingsSeqNo;
11162 /** VM-exits to check (one bit per VM-exit). */
11163 uint32_t bmExitsToCheck[3];
11164
11165 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
11166 uint32_t fProcCtlsInitial;
11167 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
11168 uint32_t fProcCtls2Initial;
11169 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
11170 uint32_t bmXcptInitial;
11171} VMXRUNDBGSTATE;
11172AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
11173typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
11174
11175
11176/**
11177 * Initializes the VMXRUNDBGSTATE structure.
11178 *
11179 * @param pVCpu The cross context virtual CPU structure of the
11180 * calling EMT.
11181 * @param pVmxTransient The VMX-transient structure.
11182 * @param pDbgState The debug state to initialize.
11183 */
11184static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11185{
11186 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
11187 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
11188
11189 pDbgState->fModifiedProcCtls = false;
11190 pDbgState->fModifiedProcCtls2 = false;
11191 pDbgState->fModifiedXcptBitmap = false;
11192 pDbgState->fClearCr0Mask = false;
11193 pDbgState->fClearCr4Mask = false;
11194 pDbgState->fCpe1Extra = 0;
11195 pDbgState->fCpe1Unwanted = 0;
11196 pDbgState->fCpe2Extra = 0;
11197 pDbgState->bmXcptExtra = 0;
11198 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
11199 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
11200 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
11201}
11202
11203
11204/**
11205 * Updates the VMSC fields with changes requested by @a pDbgState.
11206 *
11207 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
11208 * immediately before executing guest code, i.e. when interrupts are disabled.
11209 * We don't check status codes here as we cannot easily assert or return in the
11210 * latter case.
11211 *
11212 * @param pVCpu The cross context virtual CPU structure.
11213 * @param pVmxTransient The VMX-transient structure.
11214 * @param pDbgState The debug state.
11215 */
11216static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11217{
11218 /*
11219 * Ensure desired flags in VMCS control fields are set.
11220 * (Ignoring write failure here, as we're committed and it's just debug extras.)
11221 *
11222 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
11223 * there should be no stale data in pCtx at this point.
11224 */
11225 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11226 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
11227 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
11228 {
11229 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
11230 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
11231 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
11232 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
11233 pDbgState->fModifiedProcCtls = true;
11234 }
11235
11236 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
11237 {
11238 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
11239 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
11240 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
11241 pDbgState->fModifiedProcCtls2 = true;
11242 }
11243
11244 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
11245 {
11246 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
11247 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
11248 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
11249 pDbgState->fModifiedXcptBitmap = true;
11250 }
11251
11252 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11253 {
11254 pVmcsInfo->u64Cr0Mask = 0;
11255 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
11256 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11257 }
11258
11259 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11260 {
11261 pVmcsInfo->u64Cr4Mask = 0;
11262 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
11263 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11264 }
11265
11266 NOREF(pVCpu);
11267}
11268
11269
11270/**
11271 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11272 * re-entry next time around.
11273 *
11274 * @returns Strict VBox status code (i.e. informational status codes too).
11275 * @param pVCpu The cross context virtual CPU structure.
11276 * @param pVmxTransient The VMX-transient structure.
11277 * @param pDbgState The debug state.
11278 * @param rcStrict The return code from executing the guest using single
11279 * stepping.
11280 */
11281static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11282 VBOXSTRICTRC rcStrict)
11283{
11284 /*
11285 * Restore VM-exit control settings as we may not reenter this function the
11286 * next time around.
11287 */
11288 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11289
11290 /* We reload the initial value, trigger what we can of recalculations the
11291 next time around. From the looks of things, that's all that's required atm. */
11292 if (pDbgState->fModifiedProcCtls)
11293 {
11294 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11295 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11296 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11297 AssertRC(rc2);
11298 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11299 }
11300
11301 /* We're currently the only ones messing with this one, so just restore the
11302 cached value and reload the field. */
11303 if ( pDbgState->fModifiedProcCtls2
11304 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11305 {
11306 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11307 AssertRC(rc2);
11308 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11309 }
11310
11311 /* If we've modified the exception bitmap, we restore it and trigger
11312 reloading and partial recalculation the next time around. */
11313 if (pDbgState->fModifiedXcptBitmap)
11314 {
11315 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pDbgState->bmXcptInitial);
11316 AssertRC(rc2);
11317 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11318 }
11319
11320 return rcStrict;
11321}
11322
11323
11324/**
11325 * Configures VM-exit controls for current DBGF and DTrace settings.
11326 *
11327 * This updates @a pDbgState and the VMCS execution control fields to reflect
11328 * the necessary VM-exits demanded by DBGF and DTrace.
11329 *
11330 * @param pVCpu The cross context virtual CPU structure.
11331 * @param pVmxTransient The VMX-transient structure. May update
11332 * fUpdatedTscOffsettingAndPreemptTimer.
11333 * @param pDbgState The debug state.
11334 */
11335static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11336{
11337#ifndef IN_NEM_DARWIN
11338 /*
11339 * Take down the dtrace serial number so we can spot changes.
11340 */
11341 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11342 ASMCompilerBarrier();
11343#endif
11344
11345 /*
11346 * We'll rebuild most of the middle block of data members (holding the
11347 * current settings) as we go along here, so start by clearing it all.
11348 */
11349 pDbgState->bmXcptExtra = 0;
11350 pDbgState->fCpe1Extra = 0;
11351 pDbgState->fCpe1Unwanted = 0;
11352 pDbgState->fCpe2Extra = 0;
11353 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11354 pDbgState->bmExitsToCheck[i] = 0;
11355
11356 /*
11357 * Software interrupts (INT XXh) - no idea how to trigger these...
11358 */
11359 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11360 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11361 || VBOXVMM_INT_SOFTWARE_ENABLED())
11362 {
11363 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11364 }
11365
11366 /*
11367 * INT3 breakpoints - triggered by #BP exceptions.
11368 */
11369 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11370 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11371
11372 /*
11373 * Exception bitmap and XCPT events+probes.
11374 */
11375 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11376 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11377 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11378
11379 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11380 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11381 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11382 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11383 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11384 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11385 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11386 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11387 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11388 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11389 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11390 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11391 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11392 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11393 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11394 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11395 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11396 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11397
11398 if (pDbgState->bmXcptExtra)
11399 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11400
11401 /*
11402 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11403 *
11404 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11405 * So, when adding/changing/removing please don't forget to update it.
11406 *
11407 * Some of the macros are picking up local variables to save horizontal space,
11408 * (being able to see it in a table is the lesser evil here).
11409 */
11410#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11411 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11412 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11413#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11414 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11415 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11416 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11417 } else do { } while (0)
11418#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11419 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11420 { \
11421 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11422 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11423 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11424 } else do { } while (0)
11425#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11426 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11427 { \
11428 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11429 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11430 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11431 } else do { } while (0)
11432#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11433 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11434 { \
11435 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11436 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11437 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11438 } else do { } while (0)
11439
11440 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11441 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11442 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11443 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11444 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11445
11446 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11447 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11448 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11449 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11450 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11451 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11452 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11453 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11454 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11455 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11456 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11457 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11458 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11459 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11460 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11461 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11462 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11463 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11464 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11465 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11466 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11467 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11468 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11469 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11470 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11471 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11472 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11473 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11474 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11475 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11476 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11477 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11478 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11479 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11480 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11481 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11482
11483 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11484 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11485 {
11486 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11487 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11488 AssertRC(rc);
11489
11490#if 0 /** @todo fix me */
11491 pDbgState->fClearCr0Mask = true;
11492 pDbgState->fClearCr4Mask = true;
11493#endif
11494 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11495 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11496 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11497 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11498 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11499 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11500 require clearing here and in the loop if we start using it. */
11501 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11502 }
11503 else
11504 {
11505 if (pDbgState->fClearCr0Mask)
11506 {
11507 pDbgState->fClearCr0Mask = false;
11508 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11509 }
11510 if (pDbgState->fClearCr4Mask)
11511 {
11512 pDbgState->fClearCr4Mask = false;
11513 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11514 }
11515 }
11516 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11517 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11518
11519 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11520 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11521 {
11522 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11523 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11524 }
11525 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11526 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11527
11528 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11529 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11530 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11531 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11532 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11533 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11534 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11535 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11536#if 0 /** @todo too slow, fix handler. */
11537 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11538#endif
11539 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11540
11541 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11542 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11543 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11544 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11545 {
11546 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11547 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11548 }
11549 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11550 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11551 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11552 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11553
11554 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11555 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11556 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11557 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11558 {
11559 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11560 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11561 }
11562 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11563 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11564 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11565 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11566
11567 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11568 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11569 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11570 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11571 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11572 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11573 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11574 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11575 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11576 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11577 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11578 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11579 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11580 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11581 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11582 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11583 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11584 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11585 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11586 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11587 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11588 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11589
11590#undef IS_EITHER_ENABLED
11591#undef SET_ONLY_XBM_IF_EITHER_EN
11592#undef SET_CPE1_XBM_IF_EITHER_EN
11593#undef SET_CPEU_XBM_IF_EITHER_EN
11594#undef SET_CPE2_XBM_IF_EITHER_EN
11595
11596 /*
11597 * Sanitize the control stuff.
11598 */
11599 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11600 if (pDbgState->fCpe2Extra)
11601 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11602 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11603 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11604#ifndef IN_NEM_DARWIN
11605 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11606 {
11607 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11608 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11609 }
11610#else
11611 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11612 {
11613 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11614 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11615 }
11616#endif
11617
11618 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11619 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11620 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11621 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11622}
11623
11624
11625/**
11626 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11627 * appropriate.
11628 *
11629 * The caller has checked the VM-exit against the
11630 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11631 * already, so we don't have to do that either.
11632 *
11633 * @returns Strict VBox status code (i.e. informational status codes too).
11634 * @param pVCpu The cross context virtual CPU structure.
11635 * @param pVmxTransient The VMX-transient structure.
11636 * @param uExitReason The VM-exit reason.
11637 *
11638 * @remarks The name of this function is displayed by dtrace, so keep it short
11639 * and to the point. No longer than 33 chars long, please.
11640 */
11641static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11642{
11643 /*
11644 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11645 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11646 *
11647 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11648 * does. Must add/change/remove both places. Same ordering, please.
11649 *
11650 * Added/removed events must also be reflected in the next section
11651 * where we dispatch dtrace events.
11652 */
11653 bool fDtrace1 = false;
11654 bool fDtrace2 = false;
11655 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11656 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11657 uint32_t uEventArg = 0;
11658#define SET_EXIT(a_EventSubName) \
11659 do { \
11660 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11661 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11662 } while (0)
11663#define SET_BOTH(a_EventSubName) \
11664 do { \
11665 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11666 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11667 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11668 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11669 } while (0)
11670 switch (uExitReason)
11671 {
11672 case VMX_EXIT_MTF:
11673 return vmxHCExitMtf(pVCpu, pVmxTransient);
11674
11675 case VMX_EXIT_XCPT_OR_NMI:
11676 {
11677 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11678 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11679 {
11680 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11681 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11682 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11683 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11684 {
11685 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11686 {
11687 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11688 uEventArg = pVmxTransient->uExitIntErrorCode;
11689 }
11690 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11691 switch (enmEvent1)
11692 {
11693 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11694 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11695 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11696 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11697 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11698 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11699 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11700 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11701 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11702 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11703 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11704 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11705 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11706 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11707 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11708 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11709 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11710 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11711 default: break;
11712 }
11713 }
11714 else
11715 AssertFailed();
11716 break;
11717
11718 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11719 uEventArg = idxVector;
11720 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11721 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11722 break;
11723 }
11724 break;
11725 }
11726
11727 case VMX_EXIT_TRIPLE_FAULT:
11728 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11729 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11730 break;
11731 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11732 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11733 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11734 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11735 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11736
11737 /* Instruction specific VM-exits: */
11738 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11739 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11740 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11741 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11742 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11743 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11744 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11745 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11746 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11747 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11748 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11749 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11750 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11751 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11752 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11753 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11754 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11755 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11756 case VMX_EXIT_MOV_CRX:
11757 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11758 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11759 SET_BOTH(CRX_READ);
11760 else
11761 SET_BOTH(CRX_WRITE);
11762 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11763 break;
11764 case VMX_EXIT_MOV_DRX:
11765 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11766 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11767 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11768 SET_BOTH(DRX_READ);
11769 else
11770 SET_BOTH(DRX_WRITE);
11771 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11772 break;
11773 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11774 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11775 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11776 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11777 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11778 case VMX_EXIT_GDTR_IDTR_ACCESS:
11779 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11780 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11781 {
11782 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11783 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11784 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11785 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11786 }
11787 break;
11788
11789 case VMX_EXIT_LDTR_TR_ACCESS:
11790 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11791 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11792 {
11793 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11794 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11795 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11796 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11797 }
11798 break;
11799
11800 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11801 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11802 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11803 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11804 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11805 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11806 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11807 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11808 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11809 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11810 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11811
11812 /* Events that aren't relevant at this point. */
11813 case VMX_EXIT_EXT_INT:
11814 case VMX_EXIT_INT_WINDOW:
11815 case VMX_EXIT_NMI_WINDOW:
11816 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11817 case VMX_EXIT_PREEMPT_TIMER:
11818 case VMX_EXIT_IO_INSTR:
11819 break;
11820
11821 /* Errors and unexpected events. */
11822 case VMX_EXIT_INIT_SIGNAL:
11823 case VMX_EXIT_SIPI:
11824 case VMX_EXIT_IO_SMI:
11825 case VMX_EXIT_SMI:
11826 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11827 case VMX_EXIT_ERR_MSR_LOAD:
11828 case VMX_EXIT_ERR_MACHINE_CHECK:
11829 case VMX_EXIT_PML_FULL:
11830 case VMX_EXIT_VIRTUALIZED_EOI:
11831 break;
11832
11833 default:
11834 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11835 break;
11836 }
11837#undef SET_BOTH
11838#undef SET_EXIT
11839
11840 /*
11841 * Dtrace tracepoints go first. We do them here at once so we don't
11842 * have to copy the guest state saving and stuff a few dozen times.
11843 * Down side is that we've got to repeat the switch, though this time
11844 * we use enmEvent since the probes are a subset of what DBGF does.
11845 */
11846 if (fDtrace1 || fDtrace2)
11847 {
11848 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11849 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11850 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; RT_NOREF(pCtx); /* Shut up Clang 13. */
11851 switch (enmEvent1)
11852 {
11853 /** @todo consider which extra parameters would be helpful for each probe. */
11854 case DBGFEVENT_END: break;
11855 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11856 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11857 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11858 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11859 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11860 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11861 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11862 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11863 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11864 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11865 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11866 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11867 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11868 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11869 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11870 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11871 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11872 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11873 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11874 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11875 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11876 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11877 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11878 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11879 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11880 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11881 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11882 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11883 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11884 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11885 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11886 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11887 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11888 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11889 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11890 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11891 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11892 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11893 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11894 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11895 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11896 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11897 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11898 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11899 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11900 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11901 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11902 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11903 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11904 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11905 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11906 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11907 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11908 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11909 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11910 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11911 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11912 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11913 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11914 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11915 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11916 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11917 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11918 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11919 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11920 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11921 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11922 }
11923 switch (enmEvent2)
11924 {
11925 /** @todo consider which extra parameters would be helpful for each probe. */
11926 case DBGFEVENT_END: break;
11927 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11928 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11929 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11930 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11931 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11932 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11933 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11934 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11935 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11936 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11937 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11938 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11939 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11940 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11941 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11942 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11943 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11944 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11945 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11946 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11947 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11948 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11949 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11950 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11951 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11952 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11953 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11954 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11955 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11956 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11957 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11958 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11959 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11960 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11961 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11962 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11963 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11964 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11965 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11966 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11967 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11968 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11969 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11970 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11971 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11972 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11973 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11974 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11975 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11976 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11977 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11978 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11979 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11980 }
11981 }
11982
11983 /*
11984 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11985 * the DBGF call will do a full check).
11986 *
11987 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11988 * Note! If we have to events, we prioritize the first, i.e. the instruction
11989 * one, in order to avoid event nesting.
11990 */
11991 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11992 if ( enmEvent1 != DBGFEVENT_END
11993 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11994 {
11995 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11996 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11997 if (rcStrict != VINF_SUCCESS)
11998 return rcStrict;
11999 }
12000 else if ( enmEvent2 != DBGFEVENT_END
12001 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
12002 {
12003 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
12004 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
12005 if (rcStrict != VINF_SUCCESS)
12006 return rcStrict;
12007 }
12008
12009 return VINF_SUCCESS;
12010}
12011
12012
12013/**
12014 * Single-stepping VM-exit filtering.
12015 *
12016 * This is preprocessing the VM-exits and deciding whether we've gotten far
12017 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
12018 * handling is performed.
12019 *
12020 * @returns Strict VBox status code (i.e. informational status codes too).
12021 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
12022 * @param pVmxTransient The VMX-transient structure.
12023 * @param pDbgState The debug state.
12024 */
12025DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
12026{
12027 /*
12028 * Expensive (saves context) generic dtrace VM-exit probe.
12029 */
12030 uint32_t const uExitReason = pVmxTransient->uExitReason;
12031 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
12032 { /* more likely */ }
12033 else
12034 {
12035 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
12036 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
12037 AssertRC(rc);
12038 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
12039 }
12040
12041#ifndef IN_NEM_DARWIN
12042 /*
12043 * Check for host NMI, just to get that out of the way.
12044 */
12045 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
12046 { /* normally likely */ }
12047 else
12048 {
12049 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
12050 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
12051 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
12052 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
12053 }
12054#endif
12055
12056 /*
12057 * Check for single stepping event if we're stepping.
12058 */
12059 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
12060 {
12061 switch (uExitReason)
12062 {
12063 case VMX_EXIT_MTF:
12064 return vmxHCExitMtf(pVCpu, pVmxTransient);
12065
12066 /* Various events: */
12067 case VMX_EXIT_XCPT_OR_NMI:
12068 case VMX_EXIT_EXT_INT:
12069 case VMX_EXIT_TRIPLE_FAULT:
12070 case VMX_EXIT_INT_WINDOW:
12071 case VMX_EXIT_NMI_WINDOW:
12072 case VMX_EXIT_TASK_SWITCH:
12073 case VMX_EXIT_TPR_BELOW_THRESHOLD:
12074 case VMX_EXIT_APIC_ACCESS:
12075 case VMX_EXIT_EPT_VIOLATION:
12076 case VMX_EXIT_EPT_MISCONFIG:
12077 case VMX_EXIT_PREEMPT_TIMER:
12078
12079 /* Instruction specific VM-exits: */
12080 case VMX_EXIT_CPUID:
12081 case VMX_EXIT_GETSEC:
12082 case VMX_EXIT_HLT:
12083 case VMX_EXIT_INVD:
12084 case VMX_EXIT_INVLPG:
12085 case VMX_EXIT_RDPMC:
12086 case VMX_EXIT_RDTSC:
12087 case VMX_EXIT_RSM:
12088 case VMX_EXIT_VMCALL:
12089 case VMX_EXIT_VMCLEAR:
12090 case VMX_EXIT_VMLAUNCH:
12091 case VMX_EXIT_VMPTRLD:
12092 case VMX_EXIT_VMPTRST:
12093 case VMX_EXIT_VMREAD:
12094 case VMX_EXIT_VMRESUME:
12095 case VMX_EXIT_VMWRITE:
12096 case VMX_EXIT_VMXOFF:
12097 case VMX_EXIT_VMXON:
12098 case VMX_EXIT_MOV_CRX:
12099 case VMX_EXIT_MOV_DRX:
12100 case VMX_EXIT_IO_INSTR:
12101 case VMX_EXIT_RDMSR:
12102 case VMX_EXIT_WRMSR:
12103 case VMX_EXIT_MWAIT:
12104 case VMX_EXIT_MONITOR:
12105 case VMX_EXIT_PAUSE:
12106 case VMX_EXIT_GDTR_IDTR_ACCESS:
12107 case VMX_EXIT_LDTR_TR_ACCESS:
12108 case VMX_EXIT_INVEPT:
12109 case VMX_EXIT_RDTSCP:
12110 case VMX_EXIT_INVVPID:
12111 case VMX_EXIT_WBINVD:
12112 case VMX_EXIT_XSETBV:
12113 case VMX_EXIT_RDRAND:
12114 case VMX_EXIT_INVPCID:
12115 case VMX_EXIT_VMFUNC:
12116 case VMX_EXIT_RDSEED:
12117 case VMX_EXIT_XSAVES:
12118 case VMX_EXIT_XRSTORS:
12119 {
12120 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
12121 AssertRCReturn(rc, rc);
12122 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
12123 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
12124 return VINF_EM_DBG_STEPPED;
12125 break;
12126 }
12127
12128 /* Errors and unexpected events: */
12129 case VMX_EXIT_INIT_SIGNAL:
12130 case VMX_EXIT_SIPI:
12131 case VMX_EXIT_IO_SMI:
12132 case VMX_EXIT_SMI:
12133 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
12134 case VMX_EXIT_ERR_MSR_LOAD:
12135 case VMX_EXIT_ERR_MACHINE_CHECK:
12136 case VMX_EXIT_PML_FULL:
12137 case VMX_EXIT_VIRTUALIZED_EOI:
12138 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
12139 break;
12140
12141 default:
12142 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
12143 break;
12144 }
12145 }
12146
12147 /*
12148 * Check for debugger event breakpoints and dtrace probes.
12149 */
12150 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
12151 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
12152 {
12153 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
12154 if (rcStrict != VINF_SUCCESS)
12155 return rcStrict;
12156 }
12157
12158 /*
12159 * Normal processing.
12160 */
12161#ifdef HMVMX_USE_FUNCTION_TABLE
12162 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
12163#else
12164 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
12165#endif
12166}
12167
12168/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use