VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 99653

Last change on this file since 99653 was 99653, checked in by vboxsync, 2 years ago

VMM: Nested VMX: bugref:10318 Separate injection of events between guest and nested-guest, far fewer checks, better readability at slight cost of duplication.
Fixed priority of NMI-window and interrupt-window VM-exits as they can occur regardless of whether an interrupt is pending.
Fixed NMI issue with SMP nested Hyper-V enabled Windows Server 2008 R2 guest by clearing VMCPU_FF_INTERRUPT_NMI on virtual NMI-exit.
Fixed vmxHCExitNmiWindowNested to call vmxHCExitNmiWindow rather than vmxHCExitIntWindow when the nested-hypervisor intercept wasn't set.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 526.7 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 99653 2023-05-08 07:17:30Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330 VMX_VMCS16_HLAT_PREFIX_SIZE,
331
332 /* 16-bit guest-state fields. */
333 VMX_VMCS16_GUEST_ES_SEL,
334 VMX_VMCS16_GUEST_CS_SEL,
335 VMX_VMCS16_GUEST_SS_SEL,
336 VMX_VMCS16_GUEST_DS_SEL,
337 VMX_VMCS16_GUEST_FS_SEL,
338 VMX_VMCS16_GUEST_GS_SEL,
339 VMX_VMCS16_GUEST_LDTR_SEL,
340 VMX_VMCS16_GUEST_TR_SEL,
341 VMX_VMCS16_GUEST_INTR_STATUS,
342 VMX_VMCS16_GUEST_PML_INDEX,
343
344 /* 16-bits host-state fields. */
345 VMX_VMCS16_HOST_ES_SEL,
346 VMX_VMCS16_HOST_CS_SEL,
347 VMX_VMCS16_HOST_SS_SEL,
348 VMX_VMCS16_HOST_DS_SEL,
349 VMX_VMCS16_HOST_FS_SEL,
350 VMX_VMCS16_HOST_GS_SEL,
351 VMX_VMCS16_HOST_TR_SEL,
352
353 /* 64-bit control fields. */
354 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
355 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
357 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
358 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
359 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
361 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
363 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
365 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
367 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
369 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
370 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
371 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
373 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
375 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
377 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
379 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
380 VMX_VMCS64_CTRL_EPTP_FULL,
381 VMX_VMCS64_CTRL_EPTP_HIGH,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
383 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
385 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
387 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
389 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
390 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
391 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
393 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
395 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
397 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
399 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
401 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
402 VMX_VMCS64_CTRL_SPPTP_FULL,
403 VMX_VMCS64_CTRL_SPPTP_HIGH,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
405 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
406 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
407 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
409 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
410 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_FULL,
411 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_HIGH,
412 VMX_VMCS64_CTRL_HLAT_PTR_FULL,
413 VMX_VMCS64_CTRL_HLAT_PTR_HIGH,
414 VMX_VMCS64_CTRL_EXIT2_FULL,
415 VMX_VMCS64_CTRL_EXIT2_HIGH,
416
417 /* 64-bit read-only data fields. */
418 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
419 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
420
421 /* 64-bit guest-state fields. */
422 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
423 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
424 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
425 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
426 VMX_VMCS64_GUEST_PAT_FULL,
427 VMX_VMCS64_GUEST_PAT_HIGH,
428 VMX_VMCS64_GUEST_EFER_FULL,
429 VMX_VMCS64_GUEST_EFER_HIGH,
430 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
431 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
432 VMX_VMCS64_GUEST_PDPTE0_FULL,
433 VMX_VMCS64_GUEST_PDPTE0_HIGH,
434 VMX_VMCS64_GUEST_PDPTE1_FULL,
435 VMX_VMCS64_GUEST_PDPTE1_HIGH,
436 VMX_VMCS64_GUEST_PDPTE2_FULL,
437 VMX_VMCS64_GUEST_PDPTE2_HIGH,
438 VMX_VMCS64_GUEST_PDPTE3_FULL,
439 VMX_VMCS64_GUEST_PDPTE3_HIGH,
440 VMX_VMCS64_GUEST_BNDCFGS_FULL,
441 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
442 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
443 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
444 VMX_VMCS64_GUEST_PKRS_FULL,
445 VMX_VMCS64_GUEST_PKRS_HIGH,
446
447 /* 64-bit host-state fields. */
448 VMX_VMCS64_HOST_PAT_FULL,
449 VMX_VMCS64_HOST_PAT_HIGH,
450 VMX_VMCS64_HOST_EFER_FULL,
451 VMX_VMCS64_HOST_EFER_HIGH,
452 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
453 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
454 VMX_VMCS64_HOST_PKRS_FULL,
455 VMX_VMCS64_HOST_PKRS_HIGH,
456
457 /* 32-bit control fields. */
458 VMX_VMCS32_CTRL_PIN_EXEC,
459 VMX_VMCS32_CTRL_PROC_EXEC,
460 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
461 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
462 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
463 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
464 VMX_VMCS32_CTRL_EXIT,
465 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
466 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
467 VMX_VMCS32_CTRL_ENTRY,
468 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
469 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
470 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
471 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
472 VMX_VMCS32_CTRL_TPR_THRESHOLD,
473 VMX_VMCS32_CTRL_PROC_EXEC2,
474 VMX_VMCS32_CTRL_PLE_GAP,
475 VMX_VMCS32_CTRL_PLE_WINDOW,
476
477 /* 32-bits read-only fields. */
478 VMX_VMCS32_RO_VM_INSTR_ERROR,
479 VMX_VMCS32_RO_EXIT_REASON,
480 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
481 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
482 VMX_VMCS32_RO_IDT_VECTORING_INFO,
483 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
484 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
485 VMX_VMCS32_RO_EXIT_INSTR_INFO,
486
487 /* 32-bit guest-state fields. */
488 VMX_VMCS32_GUEST_ES_LIMIT,
489 VMX_VMCS32_GUEST_CS_LIMIT,
490 VMX_VMCS32_GUEST_SS_LIMIT,
491 VMX_VMCS32_GUEST_DS_LIMIT,
492 VMX_VMCS32_GUEST_FS_LIMIT,
493 VMX_VMCS32_GUEST_GS_LIMIT,
494 VMX_VMCS32_GUEST_LDTR_LIMIT,
495 VMX_VMCS32_GUEST_TR_LIMIT,
496 VMX_VMCS32_GUEST_GDTR_LIMIT,
497 VMX_VMCS32_GUEST_IDTR_LIMIT,
498 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
501 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
502 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
503 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
504 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
505 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
506 VMX_VMCS32_GUEST_INT_STATE,
507 VMX_VMCS32_GUEST_ACTIVITY_STATE,
508 VMX_VMCS32_GUEST_SMBASE,
509 VMX_VMCS32_GUEST_SYSENTER_CS,
510 VMX_VMCS32_PREEMPT_TIMER_VALUE,
511
512 /* 32-bit host-state fields. */
513 VMX_VMCS32_HOST_SYSENTER_CS,
514
515 /* Natural-width control fields. */
516 VMX_VMCS_CTRL_CR0_MASK,
517 VMX_VMCS_CTRL_CR4_MASK,
518 VMX_VMCS_CTRL_CR0_READ_SHADOW,
519 VMX_VMCS_CTRL_CR4_READ_SHADOW,
520 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
521 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
522 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
523 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
524
525 /* Natural-width read-only data fields. */
526 VMX_VMCS_RO_EXIT_QUALIFICATION,
527 VMX_VMCS_RO_IO_RCX,
528 VMX_VMCS_RO_IO_RSI,
529 VMX_VMCS_RO_IO_RDI,
530 VMX_VMCS_RO_IO_RIP,
531 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
532
533 /* Natural-width guest-state field */
534 VMX_VMCS_GUEST_CR0,
535 VMX_VMCS_GUEST_CR3,
536 VMX_VMCS_GUEST_CR4,
537 VMX_VMCS_GUEST_ES_BASE,
538 VMX_VMCS_GUEST_CS_BASE,
539 VMX_VMCS_GUEST_SS_BASE,
540 VMX_VMCS_GUEST_DS_BASE,
541 VMX_VMCS_GUEST_FS_BASE,
542 VMX_VMCS_GUEST_GS_BASE,
543 VMX_VMCS_GUEST_LDTR_BASE,
544 VMX_VMCS_GUEST_TR_BASE,
545 VMX_VMCS_GUEST_GDTR_BASE,
546 VMX_VMCS_GUEST_IDTR_BASE,
547 VMX_VMCS_GUEST_DR7,
548 VMX_VMCS_GUEST_RSP,
549 VMX_VMCS_GUEST_RIP,
550 VMX_VMCS_GUEST_RFLAGS,
551 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
552 VMX_VMCS_GUEST_SYSENTER_ESP,
553 VMX_VMCS_GUEST_SYSENTER_EIP,
554 VMX_VMCS_GUEST_S_CET,
555 VMX_VMCS_GUEST_SSP,
556 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
557
558 /* Natural-width host-state fields */
559 VMX_VMCS_HOST_CR0,
560 VMX_VMCS_HOST_CR3,
561 VMX_VMCS_HOST_CR4,
562 VMX_VMCS_HOST_FS_BASE,
563 VMX_VMCS_HOST_GS_BASE,
564 VMX_VMCS_HOST_TR_BASE,
565 VMX_VMCS_HOST_GDTR_BASE,
566 VMX_VMCS_HOST_IDTR_BASE,
567 VMX_VMCS_HOST_SYSENTER_ESP,
568 VMX_VMCS_HOST_SYSENTER_EIP,
569 VMX_VMCS_HOST_RSP,
570 VMX_VMCS_HOST_RIP,
571 VMX_VMCS_HOST_S_CET,
572 VMX_VMCS_HOST_SSP,
573 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
574};
575#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
576
577#ifdef HMVMX_USE_FUNCTION_TABLE
578/**
579 * VMX_EXIT dispatch table.
580 */
581static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
582{
583 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
584 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
585 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
586 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
587 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
588 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
589 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
590 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
591 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
592 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
593 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
594 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
595 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
596 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
597 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
598 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
599 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
600 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
601 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
602#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
603 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
604 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
605 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
606 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
607 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
608 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
609 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
610 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
611 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
612#else
613 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
614 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
615 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
616 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
617 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
618 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
619 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
620 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
621 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
622#endif
623 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
624 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
625 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
626 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
627 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
628 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
629 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
630 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
632 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
633 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
634 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
635 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
636 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
637 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
638 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
639 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
640 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
641 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
642 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
643 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
644 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
646 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
647#else
648 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
651 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
652#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
653 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
654#else
655 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
656#endif
657 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
658 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
659 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
660 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
661 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
662 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
663 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
664 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
665 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
666 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
667 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
668 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
669 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
670 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
671 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
672 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
673};
674#endif /* HMVMX_USE_FUNCTION_TABLE */
675
676#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
677static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
678{
679 /* 0 */ "(Not Used)",
680 /* 1 */ "VMCALL executed in VMX root operation.",
681 /* 2 */ "VMCLEAR with invalid physical address.",
682 /* 3 */ "VMCLEAR with VMXON pointer.",
683 /* 4 */ "VMLAUNCH with non-clear VMCS.",
684 /* 5 */ "VMRESUME with non-launched VMCS.",
685 /* 6 */ "VMRESUME after VMXOFF",
686 /* 7 */ "VM-entry with invalid control fields.",
687 /* 8 */ "VM-entry with invalid host state fields.",
688 /* 9 */ "VMPTRLD with invalid physical address.",
689 /* 10 */ "VMPTRLD with VMXON pointer.",
690 /* 11 */ "VMPTRLD with incorrect revision identifier.",
691 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
692 /* 13 */ "VMWRITE to read-only VMCS component.",
693 /* 14 */ "(Not Used)",
694 /* 15 */ "VMXON executed in VMX root operation.",
695 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
696 /* 17 */ "VM-entry with non-launched executing VMCS.",
697 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
698 /* 19 */ "VMCALL with non-clear VMCS.",
699 /* 20 */ "VMCALL with invalid VM-exit control fields.",
700 /* 21 */ "(Not Used)",
701 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
702 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
703 /* 24 */ "VMCALL with invalid SMM-monitor features.",
704 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
705 /* 26 */ "VM-entry with events blocked by MOV SS.",
706 /* 27 */ "(Not Used)",
707 /* 28 */ "Invalid operand to INVEPT/INVVPID."
708};
709#endif /* VBOX_STRICT && LOG_ENABLED */
710
711
712/**
713 * Gets the CR0 guest/host mask.
714 *
715 * These bits typically does not change through the lifetime of a VM. Any bit set in
716 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
717 * by the guest.
718 *
719 * @returns The CR0 guest/host mask.
720 * @param pVCpu The cross context virtual CPU structure.
721 */
722static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
723{
724 /*
725 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
726 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
727 *
728 * Furthermore, modifications to any bits that are reserved/unspecified currently
729 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
730 * when future CPUs specify and use currently reserved/unspecified bits.
731 */
732 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
733 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
734 * and @bugref{6944}. */
735 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
736 AssertCompile(RT_HI_U32(VMX_EXIT_HOST_CR0_IGNORE_MASK) == UINT32_C(0xffffffff)); /* Paranoia. */
737 return ( X86_CR0_PE
738 | X86_CR0_NE
739 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
740 | X86_CR0_PG
741 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
742}
743
744
745/**
746 * Gets the CR4 guest/host mask.
747 *
748 * These bits typically does not change through the lifetime of a VM. Any bit set in
749 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
750 * by the guest.
751 *
752 * @returns The CR4 guest/host mask.
753 * @param pVCpu The cross context virtual CPU structure.
754 */
755static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
756{
757 /*
758 * We construct a mask of all CR4 bits that the guest can modify without causing
759 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
760 * a VM-exit when the guest attempts to modify them when executing using
761 * hardware-assisted VMX.
762 *
763 * When a feature is not exposed to the guest (and may be present on the host),
764 * we want to intercept guest modifications to the bit so we can emulate proper
765 * behavior (e.g., #GP).
766 *
767 * Furthermore, only modifications to those bits that don't require immediate
768 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
769 * depends on CR3 which might not always be the guest value while executing
770 * using hardware-assisted VMX.
771 */
772 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
773 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
774#ifdef IN_NEM_DARWIN
775 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
776#endif
777 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
778
779 /*
780 * Paranoia.
781 * Ensure features exposed to the guest are present on the host.
782 */
783 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
784#ifdef IN_NEM_DARWIN
785 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
786#endif
787 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
788
789 uint64_t const fGstMask = X86_CR4_PVI
790 | X86_CR4_TSD
791 | X86_CR4_DE
792 | X86_CR4_MCE
793 | X86_CR4_PCE
794 | X86_CR4_OSXMMEEXCPT
795 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
796#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
797 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
798 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
799#endif
800 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
801 return ~fGstMask;
802}
803
804
805/**
806 * Adds one or more exceptions to the exception bitmap and commits it to the current
807 * VMCS.
808 *
809 * @param pVCpu The cross context virtual CPU structure.
810 * @param pVmxTransient The VMX-transient structure.
811 * @param uXcptMask The exception(s) to add.
812 */
813static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
814{
815 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
816 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
817 if ((uXcptBitmap & uXcptMask) != uXcptMask)
818 {
819 uXcptBitmap |= uXcptMask;
820 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
821 AssertRC(rc);
822 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
823 }
824}
825
826
827/**
828 * Adds an exception to the exception bitmap and commits it to the current VMCS.
829 *
830 * @param pVCpu The cross context virtual CPU structure.
831 * @param pVmxTransient The VMX-transient structure.
832 * @param uXcpt The exception to add.
833 */
834static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
835{
836 Assert(uXcpt <= X86_XCPT_LAST);
837 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
838}
839
840
841/**
842 * Remove one or more exceptions from the exception bitmap and commits it to the
843 * current VMCS.
844 *
845 * This takes care of not removing the exception intercept if a nested-guest
846 * requires the exception to be intercepted.
847 *
848 * @returns VBox status code.
849 * @param pVCpu The cross context virtual CPU structure.
850 * @param pVmxTransient The VMX-transient structure.
851 * @param uXcptMask The exception(s) to remove.
852 */
853static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
854{
855 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
856 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
857 if (uXcptBitmap & uXcptMask)
858 {
859#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
860 if (!pVmxTransient->fIsNestedGuest)
861 { /* likely */ }
862 else
863 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
864#endif
865#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
866 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
867 | RT_BIT(X86_XCPT_DE)
868 | RT_BIT(X86_XCPT_NM)
869 | RT_BIT(X86_XCPT_TS)
870 | RT_BIT(X86_XCPT_UD)
871 | RT_BIT(X86_XCPT_NP)
872 | RT_BIT(X86_XCPT_SS)
873 | RT_BIT(X86_XCPT_GP)
874 | RT_BIT(X86_XCPT_PF)
875 | RT_BIT(X86_XCPT_MF));
876#elif defined(HMVMX_ALWAYS_TRAP_PF)
877 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
878#endif
879 if (uXcptMask)
880 {
881 /* Validate we are not removing any essential exception intercepts. */
882#ifndef IN_NEM_DARWIN
883 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
884#else
885 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
886#endif
887 NOREF(pVCpu);
888 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
889 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
890
891 /* Remove it from the exception bitmap. */
892 uXcptBitmap &= ~uXcptMask;
893
894 /* Commit and update the cache if necessary. */
895 if (pVmcsInfo->u32XcptBitmap != uXcptBitmap)
896 {
897 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
898 AssertRC(rc);
899 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
900 }
901 }
902 }
903 return VINF_SUCCESS;
904}
905
906
907/**
908 * Remove an exceptions from the exception bitmap and commits it to the current
909 * VMCS.
910 *
911 * @returns VBox status code.
912 * @param pVCpu The cross context virtual CPU structure.
913 * @param pVmxTransient The VMX-transient structure.
914 * @param uXcpt The exception to remove.
915 */
916static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
917{
918 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
919}
920
921#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
922
923/**
924 * Loads the shadow VMCS specified by the VMCS info. object.
925 *
926 * @returns VBox status code.
927 * @param pVmcsInfo The VMCS info. object.
928 *
929 * @remarks Can be called with interrupts disabled.
930 */
931static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
932{
933 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
934 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
935
936 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
937 if (RT_SUCCESS(rc))
938 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
939 return rc;
940}
941
942
943/**
944 * Clears the shadow VMCS specified by the VMCS info. object.
945 *
946 * @returns VBox status code.
947 * @param pVmcsInfo The VMCS info. object.
948 *
949 * @remarks Can be called with interrupts disabled.
950 */
951static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
952{
953 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
954 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
955
956 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
957 if (RT_SUCCESS(rc))
958 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
959 return rc;
960}
961
962
963/**
964 * Switches from and to the specified VMCSes.
965 *
966 * @returns VBox status code.
967 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
968 * @param pVmcsInfoTo The VMCS info. object we are switching to.
969 *
970 * @remarks Called with interrupts disabled.
971 */
972static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
973{
974 /*
975 * Clear the VMCS we are switching out if it has not already been cleared.
976 * This will sync any CPU internal data back to the VMCS.
977 */
978 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
979 {
980 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
981 if (RT_SUCCESS(rc))
982 {
983 /*
984 * The shadow VMCS, if any, would not be active at this point since we
985 * would have cleared it while importing the virtual hardware-virtualization
986 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
987 * clear the shadow VMCS here, just assert for safety.
988 */
989 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
990 }
991 else
992 return rc;
993 }
994
995 /*
996 * Clear the VMCS we are switching to if it has not already been cleared.
997 * This will initialize the VMCS launch state to "clear" required for loading it.
998 *
999 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1000 */
1001 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1002 {
1003 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1004 if (RT_SUCCESS(rc))
1005 { /* likely */ }
1006 else
1007 return rc;
1008 }
1009
1010 /*
1011 * Finally, load the VMCS we are switching to.
1012 */
1013 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1014}
1015
1016
1017/**
1018 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1019 * caller.
1020 *
1021 * @returns VBox status code.
1022 * @param pVCpu The cross context virtual CPU structure.
1023 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1024 * true) or guest VMCS (pass false).
1025 */
1026static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1027{
1028 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1029 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1030
1031 PVMXVMCSINFO pVmcsInfoFrom;
1032 PVMXVMCSINFO pVmcsInfoTo;
1033 if (fSwitchToNstGstVmcs)
1034 {
1035 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1036 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1037 }
1038 else
1039 {
1040 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1041 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1042 }
1043
1044 /*
1045 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1046 * preemption hook code path acquires the current VMCS.
1047 */
1048 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1049
1050 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1051 if (RT_SUCCESS(rc))
1052 {
1053 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1054 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1055
1056 /*
1057 * If we are switching to a VMCS that was executed on a different host CPU or was
1058 * never executed before, flag that we need to export the host state before executing
1059 * guest/nested-guest code using hardware-assisted VMX.
1060 *
1061 * This could probably be done in a preemptible context since the preemption hook
1062 * will flag the necessary change in host context. However, since preemption is
1063 * already disabled and to avoid making assumptions about host specific code in
1064 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1065 * disabled.
1066 */
1067 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1068 { /* likely */ }
1069 else
1070 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1071
1072 ASMSetFlags(fEFlags);
1073
1074 /*
1075 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1076 * flag that we need to update the host MSR values there. Even if we decide in the
1077 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1078 * if its content differs, we would have to update the host MSRs anyway.
1079 */
1080 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1081 }
1082 else
1083 ASMSetFlags(fEFlags);
1084 return rc;
1085}
1086
1087#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1088#ifdef VBOX_STRICT
1089
1090/**
1091 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1092 * transient structure.
1093 *
1094 * @param pVCpu The cross context virtual CPU structure.
1095 * @param pVmxTransient The VMX-transient structure.
1096 */
1097DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1098{
1099 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1100 AssertRC(rc);
1101}
1102
1103
1104/**
1105 * Reads the VM-entry exception error code field from the VMCS into
1106 * the VMX transient structure.
1107 *
1108 * @param pVCpu The cross context virtual CPU structure.
1109 * @param pVmxTransient The VMX-transient structure.
1110 */
1111DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1112{
1113 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1114 AssertRC(rc);
1115}
1116
1117
1118/**
1119 * Reads the VM-entry exception error code field from the VMCS into
1120 * the VMX transient structure.
1121 *
1122 * @param pVCpu The cross context virtual CPU structure.
1123 * @param pVmxTransient The VMX-transient structure.
1124 */
1125DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1126{
1127 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1128 AssertRC(rc);
1129}
1130
1131#endif /* VBOX_STRICT */
1132
1133
1134/**
1135 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1136 *
1137 * Don't call directly unless the it's likely that some or all of the fields
1138 * given in @a a_fReadMask have already been read.
1139 *
1140 * @tparam a_fReadMask The fields to read.
1141 * @param pVCpu The cross context virtual CPU structure.
1142 * @param pVmxTransient The VMX-transient structure.
1143 */
1144template<uint32_t const a_fReadMask>
1145static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1146{
1147 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1148 | HMVMX_READ_EXIT_INSTR_LEN
1149 | HMVMX_READ_EXIT_INSTR_INFO
1150 | HMVMX_READ_IDT_VECTORING_INFO
1151 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1152 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1153 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1154 | HMVMX_READ_GUEST_LINEAR_ADDR
1155 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1156 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1157 )) == 0);
1158
1159 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1160 {
1161 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1162
1163 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1164 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1165 {
1166 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1167 AssertRC(rc);
1168 }
1169 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1170 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1171 {
1172 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1173 AssertRC(rc);
1174 }
1175 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1176 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1177 {
1178 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1179 AssertRC(rc);
1180 }
1181 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1182 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1183 {
1184 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1185 AssertRC(rc);
1186 }
1187 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1188 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1189 {
1190 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1191 AssertRC(rc);
1192 }
1193 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1194 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1195 {
1196 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1197 AssertRC(rc);
1198 }
1199 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1200 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1201 {
1202 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1203 AssertRC(rc);
1204 }
1205 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1206 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1207 {
1208 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1209 AssertRC(rc);
1210 }
1211 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1212 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1213 {
1214 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1215 AssertRC(rc);
1216 }
1217 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1218 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1219 {
1220 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1221 AssertRC(rc);
1222 }
1223
1224 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1225 }
1226}
1227
1228
1229/**
1230 * Reads VMCS fields into the VMXTRANSIENT structure.
1231 *
1232 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1233 * generating an optimized read sequences w/o any conditionals between in
1234 * non-strict builds.
1235 *
1236 * @tparam a_fReadMask The fields to read. One or more of the
1237 * HMVMX_READ_XXX fields ORed together.
1238 * @param pVCpu The cross context virtual CPU structure.
1239 * @param pVmxTransient The VMX-transient structure.
1240 */
1241template<uint32_t const a_fReadMask>
1242DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1243{
1244 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1245 | HMVMX_READ_EXIT_INSTR_LEN
1246 | HMVMX_READ_EXIT_INSTR_INFO
1247 | HMVMX_READ_IDT_VECTORING_INFO
1248 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1249 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1250 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1251 | HMVMX_READ_GUEST_LINEAR_ADDR
1252 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1253 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1254 )) == 0);
1255
1256 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1257 {
1258 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1259 {
1260 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1261 AssertRC(rc);
1262 }
1263 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1264 {
1265 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1266 AssertRC(rc);
1267 }
1268 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1269 {
1270 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1271 AssertRC(rc);
1272 }
1273 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1274 {
1275 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1276 AssertRC(rc);
1277 }
1278 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1279 {
1280 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1281 AssertRC(rc);
1282 }
1283 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1284 {
1285 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1286 AssertRC(rc);
1287 }
1288 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1289 {
1290 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1291 AssertRC(rc);
1292 }
1293 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1294 {
1295 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1296 AssertRC(rc);
1297 }
1298 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1299 {
1300 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1301 AssertRC(rc);
1302 }
1303 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1304 {
1305 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1306 AssertRC(rc);
1307 }
1308
1309 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1310 }
1311 else
1312 {
1313 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1314 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1315 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1316 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1317 }
1318}
1319
1320
1321#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1322/**
1323 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1324 *
1325 * @param pVCpu The cross context virtual CPU structure.
1326 * @param pVmxTransient The VMX-transient structure.
1327 */
1328static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1329{
1330 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1331 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1332 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1333 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1334 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1335 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1336 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1337 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1338 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1339 AssertRC(rc);
1340 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1341 | HMVMX_READ_EXIT_INSTR_LEN
1342 | HMVMX_READ_EXIT_INSTR_INFO
1343 | HMVMX_READ_IDT_VECTORING_INFO
1344 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1345 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1346 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1347 | HMVMX_READ_GUEST_LINEAR_ADDR
1348 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1349}
1350#endif
1351
1352/**
1353 * Verifies that our cached values of the VMCS fields are all consistent with
1354 * what's actually present in the VMCS.
1355 *
1356 * @returns VBox status code.
1357 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1358 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1359 * VMCS content. HMCPU error-field is
1360 * updated, see VMX_VCI_XXX.
1361 * @param pVCpu The cross context virtual CPU structure.
1362 * @param pVmcsInfo The VMCS info. object.
1363 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1364 */
1365static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1366{
1367 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1368
1369 uint32_t u32Val;
1370 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1371 AssertRC(rc);
1372 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1373 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1374 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1375 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1376
1377 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1378 AssertRC(rc);
1379 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1380 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1381 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1382 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1383
1384 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1385 AssertRC(rc);
1386 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1387 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1388 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1389 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1390
1391 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1392 AssertRC(rc);
1393 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1394 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1395 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1396 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1397
1398 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1399 {
1400 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1401 AssertRC(rc);
1402 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1403 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1404 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1405 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1406 }
1407
1408 uint64_t u64Val;
1409 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1410 {
1411 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1412 AssertRC(rc);
1413 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1414 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1415 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1416 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1417 }
1418
1419 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1420 AssertRC(rc);
1421 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1422 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1423 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1424 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1425
1426 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1427 AssertRC(rc);
1428 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1429 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1430 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1431 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1432
1433 NOREF(pcszVmcs);
1434 return VINF_SUCCESS;
1435}
1436
1437
1438/**
1439 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1440 * VMCS.
1441 *
1442 * This is typically required when the guest changes paging mode.
1443 *
1444 * @returns VBox status code.
1445 * @param pVCpu The cross context virtual CPU structure.
1446 * @param pVmxTransient The VMX-transient structure.
1447 *
1448 * @remarks Requires EFER.
1449 * @remarks No-long-jump zone!!!
1450 */
1451static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1452{
1453 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1454 {
1455 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1456 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1457
1458 /*
1459 * VM-entry controls.
1460 */
1461 {
1462 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1463 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1464
1465 /*
1466 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1467 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1468 *
1469 * For nested-guests, this is a mandatory VM-entry control. It's also
1470 * required because we do not want to leak host bits to the nested-guest.
1471 */
1472 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1473
1474 /*
1475 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1476 *
1477 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1478 * required to get the nested-guest working with hardware-assisted VMX execution.
1479 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1480 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1481 * here rather than while merging the guest VMCS controls.
1482 */
1483 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1484 {
1485 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1486 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1487 }
1488 else
1489 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1490
1491 /*
1492 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1493 *
1494 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1495 * regardless of whether the nested-guest VMCS specifies it because we are free to
1496 * load whatever MSRs we require and we do not need to modify the guest visible copy
1497 * of the VM-entry MSR load area.
1498 */
1499 if ( g_fHmVmxSupportsVmcsEfer
1500#ifndef IN_NEM_DARWIN
1501 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1502#endif
1503 )
1504 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1505 else
1506 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1507
1508 /*
1509 * The following should -not- be set (since we're not in SMM mode):
1510 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1511 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1512 */
1513
1514 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1515 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1516
1517 if ((fVal & fZap) == fVal)
1518 { /* likely */ }
1519 else
1520 {
1521 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1522 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1523 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1524 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1525 }
1526
1527 /* Commit it to the VMCS. */
1528 if (pVmcsInfo->u32EntryCtls != fVal)
1529 {
1530 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1531 AssertRC(rc);
1532 pVmcsInfo->u32EntryCtls = fVal;
1533 }
1534 }
1535
1536 /*
1537 * VM-exit controls.
1538 */
1539 {
1540 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1541 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1542
1543 /*
1544 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1545 * supported the 1-setting of this bit.
1546 *
1547 * For nested-guests, we set the "save debug controls" as the converse
1548 * "load debug controls" is mandatory for nested-guests anyway.
1549 */
1550 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1551
1552 /*
1553 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1554 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1555 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1556 * vmxHCExportHostMsrs().
1557 *
1558 * For nested-guests, we always set this bit as we do not support 32-bit
1559 * hosts.
1560 */
1561 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1562
1563#ifndef IN_NEM_DARWIN
1564 /*
1565 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1566 *
1567 * For nested-guests, we should use the "save IA32_EFER" control if we also
1568 * used the "load IA32_EFER" control while exporting VM-entry controls.
1569 */
1570 if ( g_fHmVmxSupportsVmcsEfer
1571 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1572 {
1573 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1574 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1575 }
1576#endif
1577
1578 /*
1579 * Enable saving of the VMX-preemption timer value on VM-exit.
1580 * For nested-guests, currently not exposed/used.
1581 */
1582 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1583 * the timer value. */
1584 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1585 {
1586 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1587 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1588 }
1589
1590 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1591 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1592
1593 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1594 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1595 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1596
1597 if ((fVal & fZap) == fVal)
1598 { /* likely */ }
1599 else
1600 {
1601 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1602 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1603 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1604 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1605 }
1606
1607 /* Commit it to the VMCS. */
1608 if (pVmcsInfo->u32ExitCtls != fVal)
1609 {
1610 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1611 AssertRC(rc);
1612 pVmcsInfo->u32ExitCtls = fVal;
1613 }
1614 }
1615
1616 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1617 }
1618 return VINF_SUCCESS;
1619}
1620
1621
1622/**
1623 * Sets the TPR threshold in the VMCS.
1624 *
1625 * @param pVCpu The cross context virtual CPU structure.
1626 * @param pVmcsInfo The VMCS info. object.
1627 * @param u32TprThreshold The TPR threshold (task-priority class only).
1628 */
1629DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1630{
1631 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1632 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1633 RT_NOREF(pVmcsInfo);
1634 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1635 AssertRC(rc);
1636}
1637
1638
1639/**
1640 * Exports the guest APIC TPR state into the VMCS.
1641 *
1642 * @param pVCpu The cross context virtual CPU structure.
1643 * @param pVmxTransient The VMX-transient structure.
1644 *
1645 * @remarks No-long-jump zone!!!
1646 */
1647static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1648{
1649 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1650 {
1651 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1652
1653 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1654 if (!pVmxTransient->fIsNestedGuest)
1655 {
1656 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1657 && APICIsEnabled(pVCpu))
1658 {
1659 /*
1660 * Setup TPR shadowing.
1661 */
1662 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1663 {
1664 bool fPendingIntr = false;
1665 uint8_t u8Tpr = 0;
1666 uint8_t u8PendingIntr = 0;
1667 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1668 AssertRC(rc);
1669
1670 /*
1671 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1672 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1673 * priority of the pending interrupt so we can deliver the interrupt. If there
1674 * are no interrupts pending, set threshold to 0 to not cause any
1675 * TPR-below-threshold VM-exits.
1676 */
1677 uint32_t u32TprThreshold = 0;
1678 if (fPendingIntr)
1679 {
1680 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1681 (which is the Task-Priority Class). */
1682 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1683 const uint8_t u8TprPriority = u8Tpr >> 4;
1684 if (u8PendingPriority <= u8TprPriority)
1685 u32TprThreshold = u8PendingPriority;
1686 }
1687
1688 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1689 }
1690 }
1691 }
1692 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1693 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1694 }
1695}
1696
1697
1698/**
1699 * Gets the guest interruptibility-state and updates related force-flags.
1700 *
1701 * @returns Guest's interruptibility-state.
1702 * @param pVCpu The cross context virtual CPU structure.
1703 *
1704 * @remarks No-long-jump zone!!!
1705 */
1706static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1707{
1708 uint32_t fIntrState;
1709
1710 /*
1711 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1712 */
1713 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
1714 fIntrState = 0;
1715 else
1716 {
1717 /* If inhibition is active, RIP should've been imported from the VMCS already. */
1718 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1719
1720 if (CPUMIsInInterruptShadowAfterSs(&pVCpu->cpum.GstCtx))
1721 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1722 else
1723 {
1724 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1725
1726 /* Block-by-STI must not be set when interrupts are disabled. */
1727 AssertStmt(pVCpu->cpum.GstCtx.eflags.Bits.u1IF, fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1728 }
1729 }
1730
1731 /*
1732 * Check if we should inhibit NMI delivery.
1733 */
1734 if (!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1735 { /* likely */ }
1736 else
1737 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1738
1739 /*
1740 * Validate.
1741 */
1742 /* We don't support block-by-SMI yet.*/
1743 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1744
1745 return fIntrState;
1746}
1747
1748
1749/**
1750 * Exports the exception intercepts required for guest execution in the VMCS.
1751 *
1752 * @param pVCpu The cross context virtual CPU structure.
1753 * @param pVmxTransient The VMX-transient structure.
1754 *
1755 * @remarks No-long-jump zone!!!
1756 */
1757static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1758{
1759 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1760 {
1761 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1762 if ( !pVmxTransient->fIsNestedGuest
1763 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1764 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1765 else
1766 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1767
1768 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1769 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1770 }
1771}
1772
1773
1774/**
1775 * Exports the guest's RIP into the guest-state area in the VMCS.
1776 *
1777 * @param pVCpu The cross context virtual CPU structure.
1778 *
1779 * @remarks No-long-jump zone!!!
1780 */
1781static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1782{
1783 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1784 {
1785 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1786
1787 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1788 AssertRC(rc);
1789
1790 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1791 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1792 }
1793}
1794
1795
1796/**
1797 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1798 *
1799 * @param pVCpu The cross context virtual CPU structure.
1800 * @param pVmxTransient The VMX-transient structure.
1801 *
1802 * @remarks No-long-jump zone!!!
1803 */
1804static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1805{
1806 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1807 {
1808 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1809
1810 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits
1811 of RFLAGS are reserved (MBZ). We use bits 63:24 for internal purposes, so no need
1812 to assert this, the CPUMX86EFLAGS/CPUMX86RFLAGS union masks these off for us.
1813 Use 32-bit VMWRITE. */
1814 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
1815 Assert((fEFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
1816 AssertMsg(!(fEFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), ("%#x\n", fEFlags));
1817
1818#ifndef IN_NEM_DARWIN
1819 /*
1820 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1821 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1822 * can run the real-mode guest code under Virtual 8086 mode.
1823 */
1824 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1825 if (pVmcsInfo->RealMode.fRealOnV86Active)
1826 {
1827 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1828 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1829 Assert(!pVmxTransient->fIsNestedGuest);
1830 pVmcsInfo->RealMode.Eflags.u32 = fEFlags; /* Save the original eflags of the real-mode guest. */
1831 fEFlags |= X86_EFL_VM; /* Set the Virtual 8086 mode bit. */
1832 fEFlags &= ~X86_EFL_IOPL; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1833 }
1834#else
1835 RT_NOREF(pVmxTransient);
1836#endif
1837
1838 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags);
1839 AssertRC(rc);
1840
1841 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1842 Log4Func(("eflags=%#RX32\n", fEFlags));
1843 }
1844}
1845
1846
1847#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1848/**
1849 * Copies the nested-guest VMCS to the shadow VMCS.
1850 *
1851 * @returns VBox status code.
1852 * @param pVCpu The cross context virtual CPU structure.
1853 * @param pVmcsInfo The VMCS info. object.
1854 *
1855 * @remarks No-long-jump zone!!!
1856 */
1857static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1858{
1859 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1860 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1861
1862 /*
1863 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1864 * current VMCS, as we may try saving guest lazy MSRs.
1865 *
1866 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1867 * calling the import VMCS code which is currently performing the guest MSR reads
1868 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1869 * and the rest of the VMX leave session machinery.
1870 */
1871 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1872
1873 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1874 if (RT_SUCCESS(rc))
1875 {
1876 /*
1877 * Copy all guest read/write VMCS fields.
1878 *
1879 * We don't check for VMWRITE failures here for performance reasons and
1880 * because they are not expected to fail, barring irrecoverable conditions
1881 * like hardware errors.
1882 */
1883 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1884 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1885 {
1886 uint64_t u64Val;
1887 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1888 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1889 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1890 }
1891
1892 /*
1893 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1894 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1895 */
1896 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1897 {
1898 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1899 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1900 {
1901 uint64_t u64Val;
1902 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1903 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1904 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1905 }
1906 }
1907
1908 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1909 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1910 }
1911
1912 ASMSetFlags(fEFlags);
1913 return rc;
1914}
1915
1916
1917/**
1918 * Copies the shadow VMCS to the nested-guest VMCS.
1919 *
1920 * @returns VBox status code.
1921 * @param pVCpu The cross context virtual CPU structure.
1922 * @param pVmcsInfo The VMCS info. object.
1923 *
1924 * @remarks Called with interrupts disabled.
1925 */
1926static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1927{
1928 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1929 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1930 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1931
1932 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1933 if (RT_SUCCESS(rc))
1934 {
1935 /*
1936 * Copy guest read/write fields from the shadow VMCS.
1937 * Guest read-only fields cannot be modified, so no need to copy them.
1938 *
1939 * We don't check for VMREAD failures here for performance reasons and
1940 * because they are not expected to fail, barring irrecoverable conditions
1941 * like hardware errors.
1942 */
1943 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1944 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1945 {
1946 uint64_t u64Val;
1947 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1948 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1949 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1950 }
1951
1952 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1953 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1954 }
1955 return rc;
1956}
1957
1958
1959/**
1960 * Enables VMCS shadowing for the given VMCS info. object.
1961 *
1962 * @param pVCpu The cross context virtual CPU structure.
1963 * @param pVmcsInfo The VMCS info. object.
1964 *
1965 * @remarks No-long-jump zone!!!
1966 */
1967static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1968{
1969 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1970 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1971 {
1972 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1973 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1974 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1975 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1976 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1977 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1978 Log4Func(("Enabled\n"));
1979 }
1980}
1981
1982
1983/**
1984 * Disables VMCS shadowing for the given VMCS info. object.
1985 *
1986 * @param pVCpu The cross context virtual CPU structure.
1987 * @param pVmcsInfo The VMCS info. object.
1988 *
1989 * @remarks No-long-jump zone!!!
1990 */
1991static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1992{
1993 /*
1994 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1995 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1996 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
1997 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
1998 *
1999 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2000 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2001 */
2002 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2003 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2004 {
2005 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2006 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2007 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2008 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2009 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2010 Log4Func(("Disabled\n"));
2011 }
2012}
2013#endif
2014
2015
2016/**
2017 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2018 *
2019 * The guest FPU state is always pre-loaded hence we don't need to bother about
2020 * sharing FPU related CR0 bits between the guest and host.
2021 *
2022 * @returns VBox status code.
2023 * @param pVCpu The cross context virtual CPU structure.
2024 * @param pVmxTransient The VMX-transient structure.
2025 *
2026 * @remarks No-long-jump zone!!!
2027 */
2028static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2029{
2030 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2031 {
2032 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2033 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2034
2035 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2036 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2037 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2038 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2039 else
2040 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2041
2042 if (!pVmxTransient->fIsNestedGuest)
2043 {
2044 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2045 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2046 uint64_t const u64ShadowCr0 = u64GuestCr0;
2047 Assert(!RT_HI_U32(u64GuestCr0));
2048
2049 /*
2050 * Setup VT-x's view of the guest CR0.
2051 */
2052 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2053 if (VM_IS_VMX_NESTED_PAGING(pVM))
2054 {
2055#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2056 if (CPUMIsGuestPagingEnabled(pVCpu))
2057 {
2058 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2059 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2060 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2061 }
2062 else
2063 {
2064 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2065 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2066 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2067 }
2068
2069 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2070 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2071 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2072#endif
2073 }
2074 else
2075 {
2076 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2077 u64GuestCr0 |= X86_CR0_WP;
2078 }
2079
2080 /*
2081 * Guest FPU bits.
2082 *
2083 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2084 * using CR0.TS.
2085 *
2086 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2087 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2088 */
2089 u64GuestCr0 |= X86_CR0_NE;
2090
2091 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2092 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2093
2094 /*
2095 * Update exception intercepts.
2096 */
2097 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2098#ifndef IN_NEM_DARWIN
2099 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2100 {
2101 Assert(PDMVmmDevHeapIsEnabled(pVM));
2102 Assert(pVM->hm.s.vmx.pRealModeTSS);
2103 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2104 }
2105 else
2106#endif
2107 {
2108 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2109 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2110 if (fInterceptMF)
2111 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2112 }
2113
2114 /* Additional intercepts for debugging, define these yourself explicitly. */
2115#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2116 uXcptBitmap |= 0
2117 | RT_BIT(X86_XCPT_BP)
2118 | RT_BIT(X86_XCPT_DE)
2119 | RT_BIT(X86_XCPT_NM)
2120 | RT_BIT(X86_XCPT_TS)
2121 | RT_BIT(X86_XCPT_UD)
2122 | RT_BIT(X86_XCPT_NP)
2123 | RT_BIT(X86_XCPT_SS)
2124 | RT_BIT(X86_XCPT_GP)
2125 | RT_BIT(X86_XCPT_PF)
2126 | RT_BIT(X86_XCPT_MF)
2127 ;
2128#elif defined(HMVMX_ALWAYS_TRAP_PF)
2129 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2130#endif
2131 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2132 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2133 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2134 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2135 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2136
2137 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2138 u64GuestCr0 |= fSetCr0;
2139 u64GuestCr0 &= fZapCr0;
2140 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2141
2142 Assert(!RT_HI_U32(u64GuestCr0));
2143 Assert(u64GuestCr0 & X86_CR0_NE);
2144
2145 /* Commit the CR0 and related fields to the guest VMCS. */
2146 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2147 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2148 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2149 {
2150 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2151 AssertRC(rc);
2152 }
2153 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2154 {
2155 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2156 AssertRC(rc);
2157 }
2158
2159 /* Update our caches. */
2160 pVmcsInfo->u32ProcCtls = uProcCtls;
2161 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2162
2163 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2164 }
2165 else
2166 {
2167 /*
2168 * With nested-guests, we may have extended the guest/host mask here since we
2169 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2170 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2171 * originally supplied. We must copy those bits from the nested-guest CR0 into
2172 * the nested-guest CR0 read-shadow.
2173 */
2174 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2175 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2176 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2177
2178 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2179 u64GuestCr0 |= fSetCr0;
2180 u64GuestCr0 &= fZapCr0;
2181 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2182
2183 Assert(!RT_HI_U32(u64GuestCr0));
2184 Assert(u64GuestCr0 & X86_CR0_NE);
2185
2186 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2187 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2188 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2189
2190 Log4Func(("cr0=%#RX64 shadow=%#RX64 vmcs_read_shw=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0,
2191 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u, fSetCr0, fZapCr0));
2192 }
2193
2194 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2195 }
2196
2197 return VINF_SUCCESS;
2198}
2199
2200
2201/**
2202 * Exports the guest control registers (CR3, CR4) into the guest-state area
2203 * in the VMCS.
2204 *
2205 * @returns VBox strict status code.
2206 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2207 * without unrestricted guest access and the VMMDev is not presently
2208 * mapped (e.g. EFI32).
2209 *
2210 * @param pVCpu The cross context virtual CPU structure.
2211 * @param pVmxTransient The VMX-transient structure.
2212 *
2213 * @remarks No-long-jump zone!!!
2214 */
2215static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2216{
2217 int rc = VINF_SUCCESS;
2218 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2219
2220 /*
2221 * Guest CR2.
2222 * It's always loaded in the assembler code. Nothing to do here.
2223 */
2224
2225 /*
2226 * Guest CR3.
2227 */
2228 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2229 {
2230 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2231
2232 if (VM_IS_VMX_NESTED_PAGING(pVM))
2233 {
2234#ifndef IN_NEM_DARWIN
2235 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2236 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2237
2238 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2239 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2240 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2241 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2242
2243 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2244 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2245 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2246
2247 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2248 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2249 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2250 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2251 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2252 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2253 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2254
2255 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2256 AssertRC(rc);
2257#endif
2258
2259 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2260 uint64_t u64GuestCr3 = pCtx->cr3;
2261 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2262 || CPUMIsGuestPagingEnabledEx(pCtx))
2263 {
2264 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2265 if (CPUMIsGuestInPAEModeEx(pCtx))
2266 {
2267 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2268 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2269 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2270 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2271 }
2272
2273 /*
2274 * The guest's view of its CR3 is unblemished with nested paging when the
2275 * guest is using paging or we have unrestricted guest execution to handle
2276 * the guest when it's not using paging.
2277 */
2278 }
2279#ifndef IN_NEM_DARWIN
2280 else
2281 {
2282 /*
2283 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2284 * thinks it accesses physical memory directly, we use our identity-mapped
2285 * page table to map guest-linear to guest-physical addresses. EPT takes care
2286 * of translating it to host-physical addresses.
2287 */
2288 RTGCPHYS GCPhys;
2289 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2290
2291 /* We obtain it here every time as the guest could have relocated this PCI region. */
2292 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2293 if (RT_SUCCESS(rc))
2294 { /* likely */ }
2295 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2296 {
2297 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2298 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2299 }
2300 else
2301 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2302
2303 u64GuestCr3 = GCPhys;
2304 }
2305#endif
2306
2307 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2308 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2309 AssertRC(rc);
2310 }
2311 else
2312 {
2313 Assert(!pVmxTransient->fIsNestedGuest);
2314 /* Non-nested paging case, just use the hypervisor's CR3. */
2315 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2316
2317 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2318 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2319 AssertRC(rc);
2320 }
2321
2322 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2323 }
2324
2325 /*
2326 * Guest CR4.
2327 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2328 */
2329 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2330 {
2331 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2332 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2333
2334 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2335 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2336
2337 /*
2338 * With nested-guests, we may have extended the guest/host mask here (since we
2339 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2340 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2341 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2342 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2343 */
2344 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2345 uint64_t u64GuestCr4 = pCtx->cr4;
2346 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2347 ? pCtx->cr4
2348 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2349 Assert(!RT_HI_U32(u64GuestCr4));
2350
2351#ifndef IN_NEM_DARWIN
2352 /*
2353 * Setup VT-x's view of the guest CR4.
2354 *
2355 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2356 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2357 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2358 *
2359 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2360 */
2361 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2362 {
2363 Assert(pVM->hm.s.vmx.pRealModeTSS);
2364 Assert(PDMVmmDevHeapIsEnabled(pVM));
2365 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2366 }
2367#endif
2368
2369 if (VM_IS_VMX_NESTED_PAGING(pVM))
2370 {
2371 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2372 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2373 {
2374 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2375 u64GuestCr4 |= X86_CR4_PSE;
2376 /* Our identity mapping is a 32-bit page directory. */
2377 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2378 }
2379 /* else use guest CR4.*/
2380 }
2381 else
2382 {
2383 Assert(!pVmxTransient->fIsNestedGuest);
2384
2385 /*
2386 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2387 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2388 */
2389 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2390 {
2391 case PGMMODE_REAL: /* Real-mode. */
2392 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2393 case PGMMODE_32_BIT: /* 32-bit paging. */
2394 {
2395 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2396 break;
2397 }
2398
2399 case PGMMODE_PAE: /* PAE paging. */
2400 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2401 {
2402 u64GuestCr4 |= X86_CR4_PAE;
2403 break;
2404 }
2405
2406 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2407 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2408 {
2409#ifdef VBOX_WITH_64_BITS_GUESTS
2410 /* For our assumption in vmxHCShouldSwapEferMsr. */
2411 Assert(u64GuestCr4 & X86_CR4_PAE);
2412 break;
2413#endif
2414 }
2415 default:
2416 AssertFailed();
2417 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2418 }
2419 }
2420
2421 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2422 u64GuestCr4 |= fSetCr4;
2423 u64GuestCr4 &= fZapCr4;
2424
2425 Assert(!RT_HI_U32(u64GuestCr4));
2426 Assert(u64GuestCr4 & X86_CR4_VMXE);
2427
2428 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2429 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2430 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2431
2432#ifndef IN_NEM_DARWIN
2433 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2434 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2435 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2436 {
2437 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2438 hmR0VmxUpdateStartVmFunction(pVCpu);
2439 }
2440#endif
2441
2442 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2443
2444 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2445 }
2446 return rc;
2447}
2448
2449
2450#ifdef VBOX_STRICT
2451/**
2452 * Strict function to validate segment registers.
2453 *
2454 * @param pVCpu The cross context virtual CPU structure.
2455 * @param pVmcsInfo The VMCS info. object.
2456 *
2457 * @remarks Will import guest CR0 on strict builds during validation of
2458 * segments.
2459 */
2460static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2461{
2462 /*
2463 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2464 *
2465 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2466 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2467 * unusable bit and doesn't change the guest-context value.
2468 */
2469 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2470 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2471 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2472 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2473 && ( !CPUMIsGuestInRealModeEx(pCtx)
2474 && !CPUMIsGuestInV86ModeEx(pCtx)))
2475 {
2476 /* Protected mode checks */
2477 /* CS */
2478 Assert(pCtx->cs.Attr.n.u1Present);
2479 Assert(!(pCtx->cs.Attr.u & 0xf00));
2480 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2481 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2482 || !(pCtx->cs.Attr.n.u1Granularity));
2483 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2484 || (pCtx->cs.Attr.n.u1Granularity));
2485 /* CS cannot be loaded with NULL in protected mode. */
2486 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2487 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2488 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2489 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2490 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2491 else
2492 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2493 /* SS */
2494 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2495 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2496 if ( !(pCtx->cr0 & X86_CR0_PE)
2497 || pCtx->cs.Attr.n.u4Type == 3)
2498 {
2499 Assert(!pCtx->ss.Attr.n.u2Dpl);
2500 }
2501 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2502 {
2503 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2504 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2505 Assert(pCtx->ss.Attr.n.u1Present);
2506 Assert(!(pCtx->ss.Attr.u & 0xf00));
2507 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2508 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2509 || !(pCtx->ss.Attr.n.u1Granularity));
2510 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2511 || (pCtx->ss.Attr.n.u1Granularity));
2512 }
2513 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2514 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2515 {
2516 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2517 Assert(pCtx->ds.Attr.n.u1Present);
2518 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2519 Assert(!(pCtx->ds.Attr.u & 0xf00));
2520 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2521 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2522 || !(pCtx->ds.Attr.n.u1Granularity));
2523 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2524 || (pCtx->ds.Attr.n.u1Granularity));
2525 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2526 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2527 }
2528 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2529 {
2530 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2531 Assert(pCtx->es.Attr.n.u1Present);
2532 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2533 Assert(!(pCtx->es.Attr.u & 0xf00));
2534 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2535 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2536 || !(pCtx->es.Attr.n.u1Granularity));
2537 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2538 || (pCtx->es.Attr.n.u1Granularity));
2539 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2540 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2541 }
2542 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2543 {
2544 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2545 Assert(pCtx->fs.Attr.n.u1Present);
2546 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2547 Assert(!(pCtx->fs.Attr.u & 0xf00));
2548 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2549 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2550 || !(pCtx->fs.Attr.n.u1Granularity));
2551 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2552 || (pCtx->fs.Attr.n.u1Granularity));
2553 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2554 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2555 }
2556 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2557 {
2558 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2559 Assert(pCtx->gs.Attr.n.u1Present);
2560 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2561 Assert(!(pCtx->gs.Attr.u & 0xf00));
2562 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2563 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2564 || !(pCtx->gs.Attr.n.u1Granularity));
2565 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2566 || (pCtx->gs.Attr.n.u1Granularity));
2567 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2568 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2569 }
2570 /* 64-bit capable CPUs. */
2571 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2572 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2573 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2574 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2575 }
2576 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2577 || ( CPUMIsGuestInRealModeEx(pCtx)
2578 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2579 {
2580 /* Real and v86 mode checks. */
2581 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2582 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2583#ifndef IN_NEM_DARWIN
2584 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2585 {
2586 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2587 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2588 }
2589 else
2590#endif
2591 {
2592 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2593 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2594 }
2595
2596 /* CS */
2597 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2598 Assert(pCtx->cs.u32Limit == 0xffff);
2599 AssertMsg(u32CSAttr == 0xf3, ("cs=%#x %#x ", pCtx->cs.Sel, u32CSAttr));
2600 /* SS */
2601 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2602 Assert(pCtx->ss.u32Limit == 0xffff);
2603 Assert(u32SSAttr == 0xf3);
2604 /* DS */
2605 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2606 Assert(pCtx->ds.u32Limit == 0xffff);
2607 Assert(u32DSAttr == 0xf3);
2608 /* ES */
2609 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2610 Assert(pCtx->es.u32Limit == 0xffff);
2611 Assert(u32ESAttr == 0xf3);
2612 /* FS */
2613 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2614 Assert(pCtx->fs.u32Limit == 0xffff);
2615 Assert(u32FSAttr == 0xf3);
2616 /* GS */
2617 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2618 Assert(pCtx->gs.u32Limit == 0xffff);
2619 Assert(u32GSAttr == 0xf3);
2620 /* 64-bit capable CPUs. */
2621 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2622 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2623 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2624 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2625 }
2626}
2627#endif /* VBOX_STRICT */
2628
2629
2630/**
2631 * Exports a guest segment register into the guest-state area in the VMCS.
2632 *
2633 * @returns VBox status code.
2634 * @param pVCpu The cross context virtual CPU structure.
2635 * @param pVmcsInfo The VMCS info. object.
2636 * @param iSegReg The segment register number (X86_SREG_XXX).
2637 * @param pSelReg Pointer to the segment selector.
2638 *
2639 * @remarks No-long-jump zone!!!
2640 */
2641static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2642{
2643 Assert(iSegReg < X86_SREG_COUNT);
2644
2645 uint32_t u32Access = pSelReg->Attr.u;
2646#ifndef IN_NEM_DARWIN
2647 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2648#endif
2649 {
2650 /*
2651 * The way to differentiate between whether this is really a null selector or was just
2652 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2653 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2654 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2655 * NULL selectors loaded in protected-mode have their attribute as 0.
2656 */
2657 if (u32Access)
2658 { }
2659 else
2660 u32Access = X86DESCATTR_UNUSABLE;
2661 }
2662#ifndef IN_NEM_DARWIN
2663 else
2664 {
2665 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2666 u32Access = 0xf3;
2667 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2668 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2669 RT_NOREF_PV(pVCpu);
2670 }
2671#else
2672 RT_NOREF(pVmcsInfo);
2673#endif
2674
2675 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2676 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2677 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2678
2679 /*
2680 * Commit it to the VMCS.
2681 */
2682 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2683 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2684 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2685 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2686 return VINF_SUCCESS;
2687}
2688
2689
2690/**
2691 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2692 * area in the VMCS.
2693 *
2694 * @returns VBox status code.
2695 * @param pVCpu The cross context virtual CPU structure.
2696 * @param pVmxTransient The VMX-transient structure.
2697 *
2698 * @remarks Will import guest CR0 on strict builds during validation of
2699 * segments.
2700 * @remarks No-long-jump zone!!!
2701 */
2702static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2703{
2704 int rc = VERR_INTERNAL_ERROR_5;
2705#ifndef IN_NEM_DARWIN
2706 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2707#endif
2708 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2709 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2710#ifndef IN_NEM_DARWIN
2711 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2712#endif
2713
2714 /*
2715 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2716 */
2717 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2718 {
2719 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2720 {
2721 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2722#ifndef IN_NEM_DARWIN
2723 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2724 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2725#endif
2726 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2727 AssertRC(rc);
2728 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2729 }
2730
2731 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2732 {
2733 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2734#ifndef IN_NEM_DARWIN
2735 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2736 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2737#endif
2738 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2739 AssertRC(rc);
2740 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2741 }
2742
2743 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2744 {
2745 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2746#ifndef IN_NEM_DARWIN
2747 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2748 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2749#endif
2750 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2751 AssertRC(rc);
2752 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2753 }
2754
2755 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2756 {
2757 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2758#ifndef IN_NEM_DARWIN
2759 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2760 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2761#endif
2762 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2763 AssertRC(rc);
2764 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2765 }
2766
2767 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2768 {
2769 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2770#ifndef IN_NEM_DARWIN
2771 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2772 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2773#endif
2774 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2775 AssertRC(rc);
2776 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2777 }
2778
2779 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2780 {
2781 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2782#ifndef IN_NEM_DARWIN
2783 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2784 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2785#endif
2786 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2787 AssertRC(rc);
2788 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2789 }
2790
2791#ifdef VBOX_STRICT
2792 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2793#endif
2794 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2795 pCtx->cs.Attr.u));
2796 }
2797
2798 /*
2799 * Guest TR.
2800 */
2801 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2802 {
2803 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2804
2805 /*
2806 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2807 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2808 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2809 */
2810 uint16_t u16Sel;
2811 uint32_t u32Limit;
2812 uint64_t u64Base;
2813 uint32_t u32AccessRights;
2814#ifndef IN_NEM_DARWIN
2815 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2816#endif
2817 {
2818 u16Sel = pCtx->tr.Sel;
2819 u32Limit = pCtx->tr.u32Limit;
2820 u64Base = pCtx->tr.u64Base;
2821 u32AccessRights = pCtx->tr.Attr.u;
2822 }
2823#ifndef IN_NEM_DARWIN
2824 else
2825 {
2826 Assert(!pVmxTransient->fIsNestedGuest);
2827 Assert(pVM->hm.s.vmx.pRealModeTSS);
2828 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2829
2830 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2831 RTGCPHYS GCPhys;
2832 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2833 AssertRCReturn(rc, rc);
2834
2835 X86DESCATTR DescAttr;
2836 DescAttr.u = 0;
2837 DescAttr.n.u1Present = 1;
2838 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2839
2840 u16Sel = 0;
2841 u32Limit = HM_VTX_TSS_SIZE;
2842 u64Base = GCPhys;
2843 u32AccessRights = DescAttr.u;
2844 }
2845#endif
2846
2847 /* Validate. */
2848 Assert(!(u16Sel & RT_BIT(2)));
2849 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2850 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2851 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2852 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2853 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2854 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2855 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2856 Assert( (u32Limit & 0xfff) == 0xfff
2857 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2858 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2859 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2860
2861 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2862 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2863 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2864 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2865
2866 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2867 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2868 }
2869
2870 /*
2871 * Guest GDTR.
2872 */
2873 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2874 {
2875 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2876
2877 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2878 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2879
2880 /* Validate. */
2881 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2882
2883 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2884 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2885 }
2886
2887 /*
2888 * Guest LDTR.
2889 */
2890 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2891 {
2892 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2893
2894 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2895 uint32_t u32Access;
2896 if ( !pVmxTransient->fIsNestedGuest
2897 && !pCtx->ldtr.Attr.u)
2898 u32Access = X86DESCATTR_UNUSABLE;
2899 else
2900 u32Access = pCtx->ldtr.Attr.u;
2901
2902 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2903 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2904 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2905 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2906
2907 /* Validate. */
2908 if (!(u32Access & X86DESCATTR_UNUSABLE))
2909 {
2910 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2911 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2912 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2913 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2914 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2915 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2916 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2917 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2918 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2919 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2920 }
2921
2922 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2923 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2924 }
2925
2926 /*
2927 * Guest IDTR.
2928 */
2929 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2930 {
2931 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2932
2933 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2934 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2935
2936 /* Validate. */
2937 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2938
2939 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2940 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2941 }
2942
2943 return VINF_SUCCESS;
2944}
2945
2946
2947/**
2948 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2949 * VM-exit interruption info type.
2950 *
2951 * @returns The IEM exception flags.
2952 * @param uVector The event vector.
2953 * @param uVmxEventType The VMX event type.
2954 *
2955 * @remarks This function currently only constructs flags required for
2956 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2957 * and CR2 aspects of an exception are not included).
2958 */
2959static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2960{
2961 uint32_t fIemXcptFlags;
2962 switch (uVmxEventType)
2963 {
2964 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2965 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2966 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2967 break;
2968
2969 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2970 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2971 break;
2972
2973 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2974 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2975 break;
2976
2977 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2978 {
2979 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2980 if (uVector == X86_XCPT_BP)
2981 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2982 else if (uVector == X86_XCPT_OF)
2983 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2984 else
2985 {
2986 fIemXcptFlags = 0;
2987 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2988 }
2989 break;
2990 }
2991
2992 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2993 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2994 break;
2995
2996 default:
2997 fIemXcptFlags = 0;
2998 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
2999 break;
3000 }
3001 return fIemXcptFlags;
3002}
3003
3004
3005/**
3006 * Sets an event as a pending event to be injected into the guest.
3007 *
3008 * @param pVCpu The cross context virtual CPU structure.
3009 * @param u32IntInfo The VM-entry interruption-information field.
3010 * @param cbInstr The VM-entry instruction length in bytes (for
3011 * software interrupts, exceptions and privileged
3012 * software exceptions).
3013 * @param u32ErrCode The VM-entry exception error code.
3014 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3015 * page-fault.
3016 */
3017DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3018 RTGCUINTPTR GCPtrFaultAddress)
3019{
3020 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3021 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3022 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3023 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3024 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3025 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3026}
3027
3028
3029/**
3030 * Sets an external interrupt as pending-for-injection into the VM.
3031 *
3032 * @param pVCpu The cross context virtual CPU structure.
3033 * @param u8Interrupt The external interrupt vector.
3034 */
3035DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3036{
3037 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3038 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3039 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3040 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3041 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3042 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
3043}
3044
3045
3046/**
3047 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3048 *
3049 * @param pVCpu The cross context virtual CPU structure.
3050 */
3051DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3052{
3053 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3054 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3055 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3056 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3057 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3058 Log4Func(("NMI pending injection\n"));
3059}
3060
3061
3062/**
3063 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3064 *
3065 * @param pVCpu The cross context virtual CPU structure.
3066 */
3067DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3068{
3069 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3070 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3071 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3072 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3073 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3074}
3075
3076
3077/**
3078 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3079 *
3080 * @param pVCpu The cross context virtual CPU structure.
3081 */
3082DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3083{
3084 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3085 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3086 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3087 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3088 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3089}
3090
3091
3092/**
3093 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3094 *
3095 * @param pVCpu The cross context virtual CPU structure.
3096 */
3097DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3098{
3099 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3100 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3101 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3102 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3103 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3104}
3105
3106
3107#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3108/**
3109 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3110 *
3111 * @param pVCpu The cross context virtual CPU structure.
3112 * @param u32ErrCode The error code for the general-protection exception.
3113 */
3114DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3115{
3116 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3117 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3118 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3119 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3120 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3121}
3122
3123
3124/**
3125 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3126 *
3127 * @param pVCpu The cross context virtual CPU structure.
3128 * @param u32ErrCode The error code for the stack exception.
3129 */
3130DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3131{
3132 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3133 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3134 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3135 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3136 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3137}
3138#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3139
3140
3141/**
3142 * Fixes up attributes for the specified segment register.
3143 *
3144 * @param pVCpu The cross context virtual CPU structure.
3145 * @param pSelReg The segment register that needs fixing.
3146 * @param pszRegName The register name (for logging and assertions).
3147 */
3148static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3149{
3150 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3151
3152 /*
3153 * If VT-x marks the segment as unusable, most other bits remain undefined:
3154 * - For CS the L, D and G bits have meaning.
3155 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3156 * - For the remaining data segments no bits are defined.
3157 *
3158 * The present bit and the unusable bit has been observed to be set at the
3159 * same time (the selector was supposed to be invalid as we started executing
3160 * a V8086 interrupt in ring-0).
3161 *
3162 * What should be important for the rest of the VBox code, is that the P bit is
3163 * cleared. Some of the other VBox code recognizes the unusable bit, but
3164 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3165 * safe side here, we'll strip off P and other bits we don't care about. If
3166 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3167 *
3168 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3169 */
3170#ifdef VBOX_STRICT
3171 uint32_t const uAttr = pSelReg->Attr.u;
3172#endif
3173
3174 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3175 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3176 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3177
3178#ifdef VBOX_STRICT
3179# ifndef IN_NEM_DARWIN
3180 VMMRZCallRing3Disable(pVCpu);
3181# endif
3182 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3183# ifdef DEBUG_bird
3184 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3185 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3186 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3187# endif
3188# ifndef IN_NEM_DARWIN
3189 VMMRZCallRing3Enable(pVCpu);
3190# endif
3191 NOREF(uAttr);
3192#endif
3193 RT_NOREF2(pVCpu, pszRegName);
3194}
3195
3196
3197/**
3198 * Imports a guest segment register from the current VMCS into the guest-CPU
3199 * context.
3200 *
3201 * @param pVCpu The cross context virtual CPU structure.
3202 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3203 *
3204 * @remarks Called with interrupts and/or preemption disabled.
3205 */
3206template<uint32_t const a_iSegReg>
3207DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3208{
3209 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3210 /* Check that the macros we depend upon here and in the export parenter function works: */
3211#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3212 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3213 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3214 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3215 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3216 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3217 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3218 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3219 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3220 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3221 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3222
3223 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3224
3225 uint16_t u16Sel;
3226 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3227 pSelReg->Sel = u16Sel;
3228 pSelReg->ValidSel = u16Sel;
3229
3230 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3231 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3232
3233 uint32_t u32Attr;
3234 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3235 pSelReg->Attr.u = u32Attr;
3236 if (u32Attr & X86DESCATTR_UNUSABLE)
3237 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3238
3239 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3240}
3241
3242
3243/**
3244 * Imports the guest LDTR from the VMCS into the guest-CPU context.
3245 *
3246 * @param pVCpu The cross context virtual CPU structure.
3247 *
3248 * @remarks Called with interrupts and/or preemption disabled.
3249 */
3250DECL_FORCE_INLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3251{
3252 uint16_t u16Sel;
3253 uint64_t u64Base;
3254 uint32_t u32Limit, u32Attr;
3255 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3256 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3257 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3258 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3259
3260 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3261 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3262 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3263 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3264 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3265 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3266 if (u32Attr & X86DESCATTR_UNUSABLE)
3267 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3268}
3269
3270
3271/**
3272 * Imports the guest TR from the VMCS into the guest-CPU context.
3273 *
3274 * @param pVCpu The cross context virtual CPU structure.
3275 *
3276 * @remarks Called with interrupts and/or preemption disabled.
3277 */
3278DECL_FORCE_INLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3279{
3280 uint16_t u16Sel;
3281 uint64_t u64Base;
3282 uint32_t u32Limit, u32Attr;
3283 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3284 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3285 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3286 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3287
3288 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3289 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3290 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3291 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3292 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3293 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3294 /* TR is the only selector that can never be unusable. */
3295 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3296}
3297
3298
3299/**
3300 * Core: Imports the guest RIP from the VMCS into the guest-CPU context.
3301 *
3302 * @returns The RIP value.
3303 * @param pVCpu The cross context virtual CPU structure.
3304 *
3305 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3306 * @remarks Do -not- call this function directly!
3307 */
3308DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3309{
3310 uint64_t u64Val;
3311 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3312 AssertRC(rc);
3313
3314 pVCpu->cpum.GstCtx.rip = u64Val;
3315
3316 return u64Val;
3317}
3318
3319
3320/**
3321 * Imports the guest RIP from the VMCS into the guest-CPU context.
3322 *
3323 * @param pVCpu The cross context virtual CPU structure.
3324 *
3325 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3326 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3327 * instead!!!
3328 */
3329DECL_FORCE_INLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3330{
3331 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3332 {
3333 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3334 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3335 }
3336}
3337
3338
3339/**
3340 * Core: Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3341 *
3342 * @param pVCpu The cross context virtual CPU structure.
3343 * @param pVmcsInfo The VMCS info. object.
3344 *
3345 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3346 * @remarks Do -not- call this function directly!
3347 */
3348DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3349{
3350 uint64_t fRFlags;
3351 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &fRFlags);
3352 AssertRC(rc);
3353
3354 Assert((fRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
3355 Assert((fRFlags & ~(uint64_t)(X86_EFL_1 | X86_EFL_LIVE_MASK)) == 0);
3356
3357 pVCpu->cpum.GstCtx.rflags.u = fRFlags;
3358#ifndef IN_NEM_DARWIN
3359 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3360 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
3361 { /* mostly likely */ }
3362 else
3363 {
3364 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3365 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3366 }
3367#else
3368 RT_NOREF(pVmcsInfo);
3369#endif
3370}
3371
3372
3373/**
3374 * Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3375 *
3376 * @param pVCpu The cross context virtual CPU structure.
3377 * @param pVmcsInfo The VMCS info. object.
3378 *
3379 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3380 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3381 * instead!!!
3382 */
3383DECL_FORCE_INLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3384{
3385 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3386 {
3387 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3388 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3389 }
3390}
3391
3392
3393#ifndef IN_NEM_DARWIN
3394/**
3395 * Imports the guest TSX AUX and certain other MSRs from the VMCS into the guest-CPU
3396 * context.
3397 *
3398 * The other MSRs are in the VM-exit MSR-store.
3399 *
3400 * @returns VBox status code.
3401 * @param pVCpu The cross context virtual CPU structure.
3402 * @param pVmcsInfo The VMCS info. object.
3403 * @param fEFlags Saved EFLAGS for restoring the interrupt flag (in case of
3404 * unexpected errors). Ignored in NEM/darwin context.
3405 */
3406DECL_FORCE_INLINE(int) vmxHCImportGuestTscAuxAndOtherMsrs(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3407{
3408 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3409 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3410 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3411 Assert(pMsrs);
3412 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3413 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3414 for (uint32_t i = 0; i < cMsrs; i++)
3415 {
3416 uint32_t const idMsr = pMsrs[i].u32Msr;
3417 switch (idMsr)
3418 {
3419 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3420 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3421 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3422 default:
3423 {
3424 uint32_t idxLbrMsr;
3425 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3426 if (VM_IS_VMX_LBR(pVM))
3427 {
3428 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3429 {
3430 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3431 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3432 break;
3433 }
3434 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3435 {
3436 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3437 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3438 break;
3439 }
3440 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3441 {
3442 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3443 break;
3444 }
3445 /* Fallthru (no break) */
3446 }
3447 pVCpu->cpum.GstCtx.fExtrn = 0;
3448 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3449 ASMSetFlags(fEFlags);
3450 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3451 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3452 }
3453 }
3454 }
3455 return VINF_SUCCESS;
3456}
3457#endif /* !IN_NEM_DARWIN */
3458
3459
3460/**
3461 * Imports the guest CR0 from the VMCS into the guest-CPU context.
3462 *
3463 * @param pVCpu The cross context virtual CPU structure.
3464 * @param pVmcsInfo The VMCS info. object.
3465 */
3466DECL_FORCE_INLINE(void) vmxHCImportGuestCr0(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3467{
3468 uint64_t u64Cr0;
3469 uint64_t u64Shadow;
3470 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3471 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3472#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3473 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3474 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3475#else
3476 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
3477 {
3478 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3479 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3480 }
3481 else
3482 {
3483 /*
3484 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3485 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3486 * re-construct CR0. See @bugref{9180#c95} for details.
3487 */
3488 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3489 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3490 u64Cr0 = (u64Cr0 & ~(pVmcsInfoGst->u64Cr0Mask & pVmcsNstGst->u64Cr0Mask.u))
3491 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3492 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3493 Assert(u64Cr0 & X86_CR0_NE);
3494 }
3495#endif
3496
3497#ifndef IN_NEM_DARWIN
3498 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3499#endif
3500 CPUMSetGuestCR0(pVCpu, u64Cr0);
3501#ifndef IN_NEM_DARWIN
3502 VMMRZCallRing3Enable(pVCpu);
3503#endif
3504}
3505
3506
3507/**
3508 * Imports the guest CR3 from the VMCS into the guest-CPU context.
3509 *
3510 * @param pVCpu The cross context virtual CPU structure.
3511 */
3512DECL_FORCE_INLINE(void) vmxHCImportGuestCr3(PVMCPUCC pVCpu)
3513{
3514 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3515 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3516
3517 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3518 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3519 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3520 && CPUMIsGuestPagingEnabledEx(pCtx)))
3521 {
3522 uint64_t u64Cr3;
3523 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3524 if (pCtx->cr3 != u64Cr3)
3525 {
3526 pCtx->cr3 = u64Cr3;
3527 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3528 }
3529
3530 /*
3531 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3532 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3533 */
3534 if (CPUMIsGuestInPAEModeEx(pCtx))
3535 {
3536 X86PDPE aPaePdpes[4];
3537 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3538 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3539 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3540 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3541 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3542 {
3543 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3544 /* PGM now updates PAE PDPTEs while updating CR3. */
3545 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3546 }
3547 }
3548 }
3549}
3550
3551
3552/**
3553 * Imports the guest CR4 from the VMCS into the guest-CPU context.
3554 *
3555 * @param pVCpu The cross context virtual CPU structure.
3556 * @param pVmcsInfo The VMCS info. object.
3557 */
3558DECL_FORCE_INLINE(void) vmxHCImportGuestCr4(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3559{
3560 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3561 uint64_t u64Cr4;
3562 uint64_t u64Shadow;
3563 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3564 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3565#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3566 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3567 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3568#else
3569 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3570 {
3571 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3572 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3573 }
3574 else
3575 {
3576 /*
3577 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3578 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3579 * re-construct CR4. See @bugref{9180#c95} for details.
3580 */
3581 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3582 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3583 u64Cr4 = (u64Cr4 & ~(pVmcsInfo->u64Cr4Mask & pVmcsNstGst->u64Cr4Mask.u))
3584 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3585 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3586 Assert(u64Cr4 & X86_CR4_VMXE);
3587 }
3588#endif
3589 pCtx->cr4 = u64Cr4;
3590}
3591
3592
3593/**
3594 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
3595 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
3596 */
3597DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
3598{
3599 /*
3600 * We must import RIP here to set our EM interrupt-inhibited state.
3601 * We also import RFLAGS as our code that evaluates pending interrupts
3602 * before VM-entry requires it.
3603 */
3604 vmxHCImportGuestRip(pVCpu);
3605 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3606
3607 CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
3608 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
3609 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
3610 pVCpu->cpum.GstCtx.rip);
3611 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
3612}
3613
3614
3615/**
3616 * Imports the guest interruptibility-state from the VMCS into the guest-CPU
3617 * context.
3618 *
3619 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3620 *
3621 * @param pVCpu The cross context virtual CPU structure.
3622 * @param pVmcsInfo The VMCS info. object.
3623 *
3624 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3625 * do not log!
3626 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3627 * instead!!!
3628 */
3629DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3630{
3631 uint32_t u32Val;
3632 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3633 Assert((u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
3634 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
3635 if (!u32Val)
3636 {
3637 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
3638 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
3639 }
3640 else
3641 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
3642}
3643
3644
3645/**
3646 * Worker for VMXR0ImportStateOnDemand.
3647 *
3648 * @returns VBox status code.
3649 * @param pVCpu The cross context virtual CPU structure.
3650 * @param pVmcsInfo The VMCS info. object.
3651 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3652 */
3653static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3654{
3655 int rc = VINF_SUCCESS;
3656 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3657 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3658 uint32_t u32Val;
3659
3660 /*
3661 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3662 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3663 * neither are other host platforms.
3664 *
3665 * Committing this temporarily as it prevents BSOD.
3666 *
3667 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3668 */
3669#ifdef RT_OS_WINDOWS
3670 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3671 return VERR_HM_IPE_1;
3672#endif
3673
3674 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3675
3676#ifndef IN_NEM_DARWIN
3677 /*
3678 * We disable interrupts to make the updating of the state and in particular
3679 * the fExtrn modification atomic wrt to preemption hooks.
3680 */
3681 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3682#endif
3683
3684 fWhat &= pCtx->fExtrn;
3685 if (fWhat)
3686 {
3687 do
3688 {
3689 if (fWhat & CPUMCTX_EXTRN_RIP)
3690 vmxHCImportGuestRip(pVCpu);
3691
3692 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3693 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3694
3695 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3696 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3697 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3698
3699 if (fWhat & CPUMCTX_EXTRN_RSP)
3700 {
3701 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3702 AssertRC(rc);
3703 }
3704
3705 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3706 {
3707 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3708#ifndef IN_NEM_DARWIN
3709 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3710#else
3711 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3712#endif
3713 if (fWhat & CPUMCTX_EXTRN_CS)
3714 {
3715 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3716 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3717 if (fRealOnV86Active)
3718 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3719 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3720 }
3721 if (fWhat & CPUMCTX_EXTRN_SS)
3722 {
3723 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3724 if (fRealOnV86Active)
3725 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3726 }
3727 if (fWhat & CPUMCTX_EXTRN_DS)
3728 {
3729 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3730 if (fRealOnV86Active)
3731 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3732 }
3733 if (fWhat & CPUMCTX_EXTRN_ES)
3734 {
3735 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3736 if (fRealOnV86Active)
3737 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3738 }
3739 if (fWhat & CPUMCTX_EXTRN_FS)
3740 {
3741 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3742 if (fRealOnV86Active)
3743 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3744 }
3745 if (fWhat & CPUMCTX_EXTRN_GS)
3746 {
3747 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3748 if (fRealOnV86Active)
3749 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3750 }
3751 }
3752
3753 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3754 {
3755 if (fWhat & CPUMCTX_EXTRN_LDTR)
3756 vmxHCImportGuestLdtr(pVCpu);
3757
3758 if (fWhat & CPUMCTX_EXTRN_GDTR)
3759 {
3760 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3761 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3762 pCtx->gdtr.cbGdt = u32Val;
3763 }
3764
3765 /* Guest IDTR. */
3766 if (fWhat & CPUMCTX_EXTRN_IDTR)
3767 {
3768 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3769 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3770 pCtx->idtr.cbIdt = u32Val;
3771 }
3772
3773 /* Guest TR. */
3774 if (fWhat & CPUMCTX_EXTRN_TR)
3775 {
3776#ifndef IN_NEM_DARWIN
3777 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3778 don't need to import that one. */
3779 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3780#endif
3781 vmxHCImportGuestTr(pVCpu);
3782 }
3783 }
3784
3785 if (fWhat & CPUMCTX_EXTRN_DR7)
3786 {
3787#ifndef IN_NEM_DARWIN
3788 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3789#endif
3790 {
3791 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3792 AssertRC(rc);
3793 }
3794 }
3795
3796 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3797 {
3798 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3799 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3800 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3801 pCtx->SysEnter.cs = u32Val;
3802 }
3803
3804#ifndef IN_NEM_DARWIN
3805 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3806 {
3807 if ( pVM->hmr0.s.fAllow64BitGuests
3808 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3809 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3810 }
3811
3812 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3813 {
3814 if ( pVM->hmr0.s.fAllow64BitGuests
3815 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3816 {
3817 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3818 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3819 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3820 }
3821 }
3822
3823 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3824 {
3825 rc = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
3826 AssertRCReturn(rc, rc);
3827 }
3828#else
3829 NOREF(pVM);
3830#endif
3831
3832 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3833 {
3834 if (fWhat & CPUMCTX_EXTRN_CR0)
3835 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
3836
3837 if (fWhat & CPUMCTX_EXTRN_CR4)
3838 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
3839
3840 if (fWhat & CPUMCTX_EXTRN_CR3)
3841 vmxHCImportGuestCr3(pVCpu);
3842 }
3843
3844#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3845 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3846 {
3847 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3848 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3849 {
3850 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3851 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3852 if (RT_SUCCESS(rc))
3853 { /* likely */ }
3854 else
3855 break;
3856 }
3857 }
3858#endif
3859 } while (0);
3860
3861 if (RT_SUCCESS(rc))
3862 {
3863 /* Update fExtrn. */
3864 pCtx->fExtrn &= ~fWhat;
3865
3866 /* If everything has been imported, clear the HM keeper bit. */
3867 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3868 {
3869#ifndef IN_NEM_DARWIN
3870 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3871#else
3872 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3873#endif
3874 Assert(!pCtx->fExtrn);
3875 }
3876 }
3877 }
3878#ifndef IN_NEM_DARWIN
3879 else
3880 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3881
3882 /*
3883 * Restore interrupts.
3884 */
3885 ASMSetFlags(fEFlags);
3886#endif
3887
3888 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3889
3890 if (RT_SUCCESS(rc))
3891 { /* likely */ }
3892 else
3893 return rc;
3894
3895 /*
3896 * Honor any pending CR3 updates.
3897 *
3898 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3899 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3900 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3901 *
3902 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3903 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3904 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3905 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3906 *
3907 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3908 *
3909 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3910 */
3911 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3912#ifndef IN_NEM_DARWIN
3913 && VMMRZCallRing3IsEnabled(pVCpu)
3914#endif
3915 )
3916 {
3917 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3918 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3919 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3920 }
3921
3922 return VINF_SUCCESS;
3923}
3924
3925
3926/**
3927 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3928 *
3929 * @returns VBox status code.
3930 * @param pVCpu The cross context virtual CPU structure.
3931 * @param pVmcsInfo The VMCS info. object.
3932 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3933 * in NEM/darwin context.
3934 * @tparam a_fWhat What to import, zero or more bits from
3935 * HMVMX_CPUMCTX_EXTRN_ALL.
3936 */
3937template<uint64_t const a_fWhat>
3938static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3939{
3940 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3941 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3942 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3943 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3944
3945 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3946
3947 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3948
3949 /* RIP and RFLAGS may have been imported already by the post exit code
3950 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3951 of the code is skipping this part of the code. */
3952 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3953 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3954 {
3955 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3956 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3957
3958 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3959 {
3960 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
3961 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3962 else
3963 vmxHCImportGuestCoreRip(pVCpu);
3964 }
3965 }
3966
3967 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3968 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3969 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3970
3971 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
3972 {
3973 if (a_fWhat & CPUMCTX_EXTRN_CS)
3974 {
3975 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3976 /** @todo try get rid of this carp, it smells and is probably never ever
3977 * used: */
3978 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
3979 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
3980 {
3981 vmxHCImportGuestCoreRip(pVCpu);
3982 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3983 }
3984 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3985 }
3986 if (a_fWhat & CPUMCTX_EXTRN_SS)
3987 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3988 if (a_fWhat & CPUMCTX_EXTRN_DS)
3989 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3990 if (a_fWhat & CPUMCTX_EXTRN_ES)
3991 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3992 if (a_fWhat & CPUMCTX_EXTRN_FS)
3993 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3994 if (a_fWhat & CPUMCTX_EXTRN_GS)
3995 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3996
3997 /* Guest TR.
3998 Real-mode emulation using virtual-8086 mode has the fake TSS
3999 (pRealModeTSS) in TR, don't need to import that one. */
4000#ifndef IN_NEM_DARWIN
4001 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
4002 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
4003 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
4004#else
4005 if (a_fWhat & CPUMCTX_EXTRN_TR)
4006#endif
4007 vmxHCImportGuestTr(pVCpu);
4008
4009#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
4010 if (fRealOnV86Active)
4011 {
4012 if (a_fWhat & CPUMCTX_EXTRN_CS)
4013 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
4014 if (a_fWhat & CPUMCTX_EXTRN_SS)
4015 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
4016 if (a_fWhat & CPUMCTX_EXTRN_DS)
4017 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
4018 if (a_fWhat & CPUMCTX_EXTRN_ES)
4019 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
4020 if (a_fWhat & CPUMCTX_EXTRN_FS)
4021 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
4022 if (a_fWhat & CPUMCTX_EXTRN_GS)
4023 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
4024 }
4025#endif
4026 }
4027
4028 if (a_fWhat & CPUMCTX_EXTRN_RSP)
4029 {
4030 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
4031 AssertRC(rc);
4032 }
4033
4034 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
4035 vmxHCImportGuestLdtr(pVCpu);
4036
4037 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
4038 {
4039 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
4040 uint32_t u32Val;
4041 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
4042 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
4043 }
4044
4045 /* Guest IDTR. */
4046 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
4047 {
4048 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
4049 uint32_t u32Val;
4050 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
4051 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
4052 }
4053
4054 if (a_fWhat & CPUMCTX_EXTRN_DR7)
4055 {
4056#ifndef IN_NEM_DARWIN
4057 if (!pVCpu->hmr0.s.fUsingHyperDR7)
4058#endif
4059 {
4060 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
4061 AssertRC(rc);
4062 }
4063 }
4064
4065 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
4066 {
4067 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
4068 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
4069 uint32_t u32Val;
4070 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
4071 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
4072 }
4073
4074#ifndef IN_NEM_DARWIN
4075 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
4076 {
4077 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4078 && pVM->hmr0.s.fAllow64BitGuests)
4079 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
4080 }
4081
4082 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4083 {
4084 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4085 && pVM->hmr0.s.fAllow64BitGuests)
4086 {
4087 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4088 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4089 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4090 }
4091 }
4092
4093 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4094 {
4095 int const rc1 = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
4096 AssertRCReturn(rc1, rc1);
4097 }
4098#else
4099 NOREF(pVM);
4100#endif
4101
4102 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4103 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
4104
4105 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4106 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
4107
4108 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4109 vmxHCImportGuestCr3(pVCpu);
4110
4111#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4112 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4113 {
4114 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4115 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4116 {
4117 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4118 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4119 AssertRCReturn(rc, rc);
4120 }
4121 }
4122#endif
4123
4124 /* Update fExtrn. */
4125 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4126
4127 /* If everything has been imported, clear the HM keeper bit. */
4128 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4129 {
4130#ifndef IN_NEM_DARWIN
4131 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4132#else
4133 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4134#endif
4135 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4136 }
4137
4138 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4139
4140 /*
4141 * Honor any pending CR3 updates.
4142 *
4143 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4144 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4145 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4146 *
4147 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4148 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4149 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4150 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4151 *
4152 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4153 *
4154 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4155 */
4156#ifndef IN_NEM_DARWIN
4157 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4158 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4159 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4160 return VINF_SUCCESS;
4161 ASMSetFlags(fEFlags);
4162#else
4163 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4164 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4165 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4166 return VINF_SUCCESS;
4167 RT_NOREF_PV(fEFlags);
4168#endif
4169
4170 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4171 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4172 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4173 return VINF_SUCCESS;
4174}
4175
4176
4177/**
4178 * Internal state fetcher.
4179 *
4180 * @returns VBox status code.
4181 * @param pVCpu The cross context virtual CPU structure.
4182 * @param pVmcsInfo The VMCS info. object.
4183 * @param pszCaller For logging.
4184 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4185 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4186 * already. This is ORed together with @a a_fWhat when
4187 * calculating what needs fetching (just for safety).
4188 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4189 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4190 * already. This is ORed together with @a a_fWhat when
4191 * calculating what needs fetching (just for safety).
4192 */
4193template<uint64_t const a_fWhat,
4194 uint64_t const a_fDoneLocal = 0,
4195 uint64_t const a_fDonePostExit = 0
4196#ifndef IN_NEM_DARWIN
4197 | CPUMCTX_EXTRN_INHIBIT_INT
4198 | CPUMCTX_EXTRN_INHIBIT_NMI
4199# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4200 | HMVMX_CPUMCTX_EXTRN_ALL
4201# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4202 | CPUMCTX_EXTRN_RFLAGS
4203# endif
4204#else /* IN_NEM_DARWIN */
4205 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4206#endif /* IN_NEM_DARWIN */
4207>
4208DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4209{
4210 RT_NOREF_PV(pszCaller);
4211 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4212 {
4213#ifndef IN_NEM_DARWIN
4214 /*
4215 * We disable interrupts to make the updating of the state and in particular
4216 * the fExtrn modification atomic wrt to preemption hooks.
4217 */
4218 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4219#else
4220 RTCCUINTREG const fEFlags = 0;
4221#endif
4222
4223 /*
4224 * We combine all three parameters and take the (probably) inlined optimized
4225 * code path for the new things specified in a_fWhat.
4226 *
4227 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4228 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4229 * also take the streamlined path when both of these are cleared in fExtrn
4230 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4231 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4232 */
4233 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4234 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4235 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4236 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4237 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4238 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4239 {
4240 int const rc = vmxHCImportGuestStateInner< a_fWhat
4241 & HMVMX_CPUMCTX_EXTRN_ALL
4242 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4243#ifndef IN_NEM_DARWIN
4244 ASMSetFlags(fEFlags);
4245#endif
4246 return rc;
4247 }
4248
4249#ifndef IN_NEM_DARWIN
4250 ASMSetFlags(fEFlags);
4251#endif
4252
4253 /*
4254 * We shouldn't normally get here, but it may happen when executing
4255 * in the debug run-loops. Typically, everything should already have
4256 * been fetched then. Otherwise call the fallback state import function.
4257 */
4258 if (fWhatToDo == 0)
4259 { /* hope the cause was the debug loop or something similar */ }
4260 else
4261 {
4262 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4263 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4264 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4265 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4266 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4267 }
4268 }
4269 return VINF_SUCCESS;
4270}
4271
4272
4273/**
4274 * Check per-VM and per-VCPU force flag actions that require us to go back to
4275 * ring-3 for one reason or another.
4276 *
4277 * @returns Strict VBox status code (i.e. informational status codes too)
4278 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4279 * ring-3.
4280 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4281 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4282 * interrupts)
4283 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4284 * all EMTs to be in ring-3.
4285 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4286 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4287 * to the EM loop.
4288 *
4289 * @param pVCpu The cross context virtual CPU structure.
4290 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4291 * @param fStepping Whether we are single-stepping the guest using the
4292 * hypervisor debugger.
4293 *
4294 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4295 * is no longer in VMX non-root mode.
4296 */
4297static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4298{
4299#ifndef IN_NEM_DARWIN
4300 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4301#endif
4302
4303 /*
4304 * Update pending interrupts into the APIC's IRR.
4305 */
4306 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4307 APICUpdatePendingInterrupts(pVCpu);
4308
4309 /*
4310 * Anything pending? Should be more likely than not if we're doing a good job.
4311 */
4312 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4313 if ( !fStepping
4314 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4315 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4316 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4317 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4318 return VINF_SUCCESS;
4319
4320 /* Pending PGM C3 sync. */
4321 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4322 {
4323 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4324 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4325 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4326 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4327 if (rcStrict != VINF_SUCCESS)
4328 {
4329 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4330 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4331 return rcStrict;
4332 }
4333 }
4334
4335 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4336 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4337 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4338 {
4339 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4340 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4341 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4342 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4343 return rc;
4344 }
4345
4346 /* Pending VM request packets, such as hardware interrupts. */
4347 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4348 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4349 {
4350 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4351 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4352 return VINF_EM_PENDING_REQUEST;
4353 }
4354
4355 /* Pending PGM pool flushes. */
4356 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4357 {
4358 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4359 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4360 return VINF_PGM_POOL_FLUSH_PENDING;
4361 }
4362
4363 /* Pending DMA requests. */
4364 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4365 {
4366 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4367 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4368 return VINF_EM_RAW_TO_R3;
4369 }
4370
4371#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4372 /*
4373 * Pending nested-guest events.
4374 *
4375 * Please note the priority of these events are specified and important.
4376 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4377 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4378 *
4379 * Interrupt-window and NMI-window VM-exits for the nested-guest need not be
4380 * handled here. They'll be handled by the hardware while executing the nested-guest
4381 * or by us when we injecting events that are not part of VM-entry of the nested-guest.
4382 */
4383 if (fIsNestedGuest)
4384 {
4385 /* Pending nested-guest APIC-write (may or may not cause a VM-exit). */
4386 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4387 {
4388 Log4Func(("Pending nested-guest APIC-write\n"));
4389 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4390 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4391 if ( rcStrict == VINF_SUCCESS
4392 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4393 return rcStrict;
4394 }
4395
4396 /* Pending nested-guest monitor-trap flag (MTF). */
4397 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4398 {
4399 Log4Func(("Pending nested-guest MTF\n"));
4400 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4401 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4402 return rcStrict;
4403 }
4404
4405 /* Pending nested-guest VMX-preemption timer expired. */
4406 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4407 {
4408 Log4Func(("Pending nested-guest preempt timer\n"));
4409 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4410 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4411 return rcStrict;
4412 }
4413 }
4414#else
4415 NOREF(fIsNestedGuest);
4416#endif
4417
4418 return VINF_SUCCESS;
4419}
4420
4421
4422/**
4423 * Converts any TRPM trap into a pending HM event. This is typically used when
4424 * entering from ring-3 (not longjmp returns).
4425 *
4426 * @param pVCpu The cross context virtual CPU structure.
4427 */
4428static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4429{
4430 Assert(TRPMHasTrap(pVCpu));
4431 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4432
4433 uint8_t uVector;
4434 TRPMEVENT enmTrpmEvent;
4435 uint32_t uErrCode;
4436 RTGCUINTPTR GCPtrFaultAddress;
4437 uint8_t cbInstr;
4438 bool fIcebp;
4439
4440 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4441 AssertRC(rc);
4442
4443 uint32_t u32IntInfo;
4444 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4445 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4446
4447 rc = TRPMResetTrap(pVCpu);
4448 AssertRC(rc);
4449 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4450 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4451
4452 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4453}
4454
4455
4456/**
4457 * Converts the pending HM event into a TRPM trap.
4458 *
4459 * @param pVCpu The cross context virtual CPU structure.
4460 */
4461static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4462{
4463 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4464
4465 /* If a trap was already pending, we did something wrong! */
4466 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4467
4468 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4469 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4470 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4471
4472 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4473
4474 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4475 AssertRC(rc);
4476
4477 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4478 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4479
4480 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4481 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4482 else
4483 {
4484 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4485 switch (uVectorType)
4486 {
4487 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4488 TRPMSetTrapDueToIcebp(pVCpu);
4489 RT_FALL_THRU();
4490 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4491 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4492 {
4493 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4494 || ( uVector == X86_XCPT_BP /* INT3 */
4495 || uVector == X86_XCPT_OF /* INTO */
4496 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4497 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4498 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4499 break;
4500 }
4501 }
4502 }
4503
4504 /* We're now done converting the pending event. */
4505 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4506}
4507
4508
4509/**
4510 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4511 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4512 *
4513 * @param pVCpu The cross context virtual CPU structure.
4514 * @param pVmcsInfo The VMCS info. object.
4515 */
4516static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4517{
4518 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4519 {
4520 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4521 {
4522 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4523 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4524 AssertRC(rc);
4525 }
4526 Log4Func(("Enabled interrupt-window exiting\n"));
4527 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4528}
4529
4530
4531/**
4532 * Clears the interrupt-window exiting control in the VMCS.
4533 *
4534 * @param pVCpu The cross context virtual CPU structure.
4535 * @param pVmcsInfo The VMCS info. object.
4536 */
4537DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4538{
4539 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4540 {
4541 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4542 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4543 AssertRC(rc);
4544 Log4Func(("Disabled interrupt-window exiting\n"));
4545 }
4546}
4547
4548
4549/**
4550 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4551 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4552 *
4553 * @param pVCpu The cross context virtual CPU structure.
4554 * @param pVmcsInfo The VMCS info. object.
4555 */
4556static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4557{
4558 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4559 {
4560 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4561 {
4562 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4563 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4564 AssertRC(rc);
4565 Log4Func(("Enabled NMI-window exiting\n"));
4566 }
4567 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4568}
4569
4570
4571/**
4572 * Clears the NMI-window exiting control in the VMCS.
4573 *
4574 * @param pVCpu The cross context virtual CPU structure.
4575 * @param pVmcsInfo The VMCS info. object.
4576 */
4577DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4578{
4579 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4580 {
4581 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4582 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4583 AssertRC(rc);
4584 Log4Func(("Disabled NMI-window exiting\n"));
4585 }
4586}
4587
4588
4589/**
4590 * Injects an event into the guest upon VM-entry by updating the relevant fields
4591 * in the VM-entry area in the VMCS.
4592 *
4593 * @returns Strict VBox status code (i.e. informational status codes too).
4594 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4595 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4596 *
4597 * @param pVCpu The cross context virtual CPU structure.
4598 * @param pVmcsInfo The VMCS info object.
4599 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4600 * @param pEvent The event being injected.
4601 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4602 * will be updated if necessary. This cannot not be NULL.
4603 * @param fStepping Whether we're single-stepping guest execution and should
4604 * return VINF_EM_DBG_STEPPED if the event is injected
4605 * directly (registers modified by us, not by hardware on
4606 * VM-entry).
4607 */
4608static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4609 bool fStepping, uint32_t *pfIntrState)
4610{
4611 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4612 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4613 Assert(pfIntrState);
4614
4615#ifdef IN_NEM_DARWIN
4616 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4617#endif
4618
4619 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4620 uint32_t u32IntInfo = pEvent->u64IntInfo;
4621 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4622 uint32_t const cbInstr = pEvent->cbInstr;
4623 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4624 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4625 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4626
4627#ifdef VBOX_STRICT
4628 /*
4629 * Validate the error-code-valid bit for hardware exceptions.
4630 * No error codes for exceptions in real-mode.
4631 *
4632 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4633 */
4634 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4635 && !CPUMIsGuestInRealModeEx(pCtx))
4636 {
4637 switch (uVector)
4638 {
4639 case X86_XCPT_PF:
4640 case X86_XCPT_DF:
4641 case X86_XCPT_TS:
4642 case X86_XCPT_NP:
4643 case X86_XCPT_SS:
4644 case X86_XCPT_GP:
4645 case X86_XCPT_AC:
4646 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4647 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4648 RT_FALL_THRU();
4649 default:
4650 break;
4651 }
4652 }
4653
4654 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4655 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4656 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4657#endif
4658
4659 RT_NOREF(uVector);
4660 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4661 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4662 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4663 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4664 {
4665 Assert(uVector <= X86_XCPT_LAST);
4666 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4667 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4668 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4669 }
4670 else
4671 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4672
4673 /*
4674 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4675 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4676 * interrupt handler in the (real-mode) guest.
4677 *
4678 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4679 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4680 */
4681 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4682 {
4683#ifndef IN_NEM_DARWIN
4684 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4685#endif
4686 {
4687 /*
4688 * For CPUs with unrestricted guest execution enabled and with the guest
4689 * in real-mode, we must not set the deliver-error-code bit.
4690 *
4691 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4692 */
4693 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4694 }
4695#ifndef IN_NEM_DARWIN
4696 else
4697 {
4698 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4699 Assert(PDMVmmDevHeapIsEnabled(pVM));
4700 Assert(pVM->hm.s.vmx.pRealModeTSS);
4701 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4702
4703 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4704 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4705 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4706 AssertRCReturn(rc2, rc2);
4707
4708 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4709 size_t const cbIdtEntry = sizeof(X86IDTR16);
4710 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4711 {
4712 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4713 if (uVector == X86_XCPT_DF)
4714 return VINF_EM_RESET;
4715
4716 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4717 No error codes for exceptions in real-mode. */
4718 if (uVector == X86_XCPT_GP)
4719 {
4720 static HMEVENT const s_EventXcptDf
4721 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4722 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4723 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4724 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4725 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4726 }
4727
4728 /*
4729 * If we're injecting an event with no valid IDT entry, inject a #GP.
4730 * No error codes for exceptions in real-mode.
4731 *
4732 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4733 */
4734 static HMEVENT const s_EventXcptGp
4735 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4736 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4737 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4738 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4739 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4740 }
4741
4742 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4743 uint16_t uGuestIp = pCtx->ip;
4744 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4745 {
4746 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4747 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4748 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4749 }
4750 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4751 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4752
4753 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4754 X86IDTR16 IdtEntry;
4755 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4756 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4757 AssertRCReturn(rc2, rc2);
4758
4759 /* Construct the stack frame for the interrupt/exception handler. */
4760 VBOXSTRICTRC rcStrict;
4761 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, (uint16_t)pCtx->eflags.u);
4762 if (rcStrict == VINF_SUCCESS)
4763 {
4764 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4765 if (rcStrict == VINF_SUCCESS)
4766 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4767 }
4768
4769 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4770 if (rcStrict == VINF_SUCCESS)
4771 {
4772 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4773 pCtx->rip = IdtEntry.offSel;
4774 pCtx->cs.Sel = IdtEntry.uSel;
4775 pCtx->cs.ValidSel = IdtEntry.uSel;
4776 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4777 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4778 && uVector == X86_XCPT_PF)
4779 pCtx->cr2 = GCPtrFault;
4780
4781 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4782 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4783 | HM_CHANGED_GUEST_RSP);
4784
4785 /*
4786 * If we delivered a hardware exception (other than an NMI) and if there was
4787 * block-by-STI in effect, we should clear it.
4788 */
4789 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4790 {
4791 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4792 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4793 Log4Func(("Clearing inhibition due to STI\n"));
4794 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4795 }
4796
4797 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4798 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4799
4800 /*
4801 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4802 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4803 */
4804 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4805
4806 /*
4807 * If we eventually support nested-guest execution without unrestricted guest execution,
4808 * we should set fInterceptEvents here.
4809 */
4810 Assert(!fIsNestedGuest);
4811
4812 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4813 if (fStepping)
4814 rcStrict = VINF_EM_DBG_STEPPED;
4815 }
4816 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4817 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4818 return rcStrict;
4819 }
4820#else
4821 RT_NOREF(pVmcsInfo);
4822#endif
4823 }
4824
4825 /*
4826 * Validate.
4827 */
4828 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4829 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4830
4831 /*
4832 * Inject the event into the VMCS.
4833 */
4834 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4835 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4836 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4837 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4838 AssertRC(rc);
4839
4840 /*
4841 * Update guest CR2 if this is a page-fault.
4842 */
4843 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4844 pCtx->cr2 = GCPtrFault;
4845
4846 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4847 return VINF_SUCCESS;
4848}
4849
4850
4851/**
4852 * Evaluates the event to be delivered to the guest and sets it as the pending
4853 * event.
4854 *
4855 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4856 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4857 * NOT restore these force-flags.
4858 *
4859 * @returns Strict VBox status code (i.e. informational status codes too).
4860 * @param pVCpu The cross context virtual CPU structure.
4861 * @param pVmcsInfo The VMCS information structure.
4862 * @param fIsNestedGuest Flag whether the evaluation happens for a nested guest.
4863 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4864 */
4865static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t *pfIntrState)
4866{
4867 Assert(pfIntrState);
4868 Assert(!TRPMHasTrap(pVCpu));
4869
4870 /*
4871 * Compute/update guest-interruptibility state related FFs.
4872 * The FFs will be used below while evaluating events to be injected.
4873 */
4874 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4875
4876 /*
4877 * Evaluate if a new event needs to be injected.
4878 * An event that's already pending has already performed all necessary checks.
4879 */
4880 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4881 && !CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
4882 {
4883 /** @todo SMI. SMIs take priority over NMIs. */
4884
4885 /*
4886 * NMIs.
4887 * NMIs take priority over external interrupts.
4888 */
4889 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4890 {
4891 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4892 {
4893 /* Finally, inject the NMI and we're done. */
4894 vmxHCSetPendingXcptNmi(pVCpu);
4895 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4896 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4897 return VINF_SUCCESS;
4898 }
4899
4900 /*
4901 * Setup NMI-window exiting and also clear any interrupt-window exiting that might
4902 * still be active. This can happen if we got VM-exits that were higher priority
4903 * than an interrupt-window VM-exit.
4904 */
4905 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4906 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4907 }
4908 else
4909 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT));
4910
4911 /*
4912 * External interrupts (PIC/APIC).
4913 */
4914 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4915 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4916 {
4917 Assert(!DBGFIsStepping(pVCpu));
4918 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4919 AssertRC(rc);
4920
4921 if (pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF)
4922 {
4923 /*
4924 * Once PDMGetInterrupt() returns an interrupt we -must- deliver it.
4925 * We cannot re-request the interrupt from the controller again.
4926 */
4927 uint8_t u8Interrupt;
4928 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4929 if (RT_SUCCESS(rc))
4930 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4931 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4932 {
4933 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4934 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4935 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4936 /*
4937 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4938 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4939 * need to re-set this force-flag here.
4940 */
4941 }
4942 else
4943 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4944
4945 /* We must clear interrupt-window exiting for the same reason mentioned above for NMIs. */
4946 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4947 return VINF_SUCCESS;
4948 }
4949
4950 /* Setup interrupt-window exiting. */
4951 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4952 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT));
4953 }
4954 else
4955 {
4956 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4957 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT));
4958 }
4959 }
4960 else
4961 {
4962 /*
4963 * An event is being injected or we are in an interrupt shadow.
4964 * If another event is pending currently, instruct VT-x to cause a VM-exit as
4965 * soon as the guest is ready to accept it.
4966 */
4967 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4968 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4969 else
4970 {
4971 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT));
4972 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4973 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4974 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4975 else
4976 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT));
4977 }
4978 }
4979
4980 return VINF_SUCCESS;
4981}
4982
4983
4984#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4985/**
4986 * Evaluates the event to be delivered to the nested-guest and sets it as the
4987 * pending event.
4988 *
4989 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4990 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4991 * NOT restore these force-flags.
4992 *
4993 * @returns Strict VBox status code (i.e. informational status codes too).
4994 * @param pVCpu The cross context virtual CPU structure.
4995 * @param pVmcsInfo The VMCS information structure.
4996 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4997 *
4998 * @remarks The guest must be in VMX non-root mode.
4999 */
5000static VBOXSTRICTRC vmxHCEvaluatePendingEventNested(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t *pfIntrState)
5001{
5002 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5003
5004 Assert(pfIntrState);
5005 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
5006 Assert(!TRPMHasTrap(pVCpu));
5007
5008 /*
5009 * Compute/update guest-interruptibility state related FFs.
5010 * The FFs will be used below while evaluating events to be injected.
5011 */
5012 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
5013
5014 /*
5015 * If we are injecting an event, we must not setup any interrupt/NMI-window
5016 * exiting or we would get into an infinite VM-exit loop. An event that's
5017 * already pending has already performed all necessary checks.
5018 */
5019 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5020 return VINF_SUCCESS;
5021
5022 /*
5023 * An event injected by VMLAUNCH/VMRESUME instruction emulation should've been
5024 * made pending (TRPM to HM event) and would be handled above if we resumed
5025 * execution in HM. If somehow we fell back to emulation after the
5026 * VMLAUNCH/VMRESUME instruction, it would have been handled in iemRaiseXcptOrInt
5027 * (calling iemVmxVmexitEvent). Thus, if we get here the nested-hypervisor's VMX
5028 * intercepts should be active and any events pending here have been generated
5029 * while executing the guest in VMX non-root mode after virtual VM-entry completed.
5030 */
5031 Assert(CPUMIsGuestVmxInterceptEvents(pCtx));
5032
5033 /*
5034 * Interrupt shadows can also block NMIs. If we are in an interrupt shadow there's
5035 * nothing more to do here.
5036 *
5037 * See Intel spec. 24.4.2 "Guest Non-Register State".
5038 * See Intel spec. 25.4.1 "Event Blocking".
5039 */
5040 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
5041 { /* likely */ }
5042 else
5043 return VINF_SUCCESS;
5044
5045 /** @todo SMI. SMIs take priority over NMIs. */
5046
5047 /*
5048 * NMIs.
5049 * NMIs take priority over external interrupts.
5050 *
5051 * NMI blocking is in effect after delivering an NMI until the execution of IRET.
5052 * Only when there isn't any NMI blocking can an NMI-window VM-exit or delivery of an NMI happen.
5053 */
5054 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
5055 {
5056 /*
5057 * Nested-guest NMI-window exiting.
5058 * The NMI-window exit must happen regardless of whether an NMI is pending
5059 * provided virtual-NMI blocking is not in effect.
5060 *
5061 * See Intel spec. 25.2 "Other Causes Of VM Exits".
5062 */
5063 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
5064 && !CPUMIsGuestVmxVirtNmiBlocking(pCtx))
5065 {
5066 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
5067 return IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
5068 }
5069
5070 /*
5071 * For a nested-guest, the FF always indicates the outer guest's ability to
5072 * receive an NMI while the guest-interruptibility state bit depends on whether
5073 * the nested-hypervisor is using virtual-NMIs.
5074 *
5075 * It is very important that we also clear the force-flag if we are causing
5076 * an NMI VM-exit as it is the responsibility of the nested-hypervisor to deal
5077 * with re-injecting or discarding the NMI. This fixes the bug that showed up
5078 * with SMP Windows Server 2008 R2 with Hyper-V enabled, see @bugref{10318#c19}.
5079 */
5080 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5081 {
5082 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
5083 return IEMExecVmxVmexitXcptNmi(pVCpu);
5084 vmxHCSetPendingXcptNmi(pVCpu);
5085 return VINF_SUCCESS;
5086 }
5087 }
5088
5089 /*
5090 * Nested-guest interrupt-window exiting.
5091 *
5092 * We must cause the interrupt-window exit regardless of whether an interrupt is pending
5093 * provided virtual interrupts are enabled.
5094 *
5095 * See Intel spec. 25.2 "Other Causes Of VM Exits".
5096 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5097 */
5098 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
5099 && CPUMIsGuestVmxVirtIntrEnabled(pCtx))
5100 {
5101 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
5102 return IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
5103 }
5104
5105 /*
5106 * External interrupts (PIC/APIC).
5107 *
5108 * When "External interrupt exiting" is set the VM-exit happens regardless of RFLAGS.IF.
5109 * When it isn't set, RFLAGS.IF controls delivery of the interrupt as always.
5110 * This fixes a nasty SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued
5111 * by other VM-exits (like a preemption timer), see @bugref{9562#c18}.
5112 *
5113 * See Intel spec. 25.4.1 "Event Blocking".
5114 */
5115 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5116 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
5117 && CPUMIsGuestVmxPhysIntrEnabled(pCtx))
5118 {
5119 Assert(!DBGFIsStepping(pVCpu));
5120
5121 /* Nested-guest external interrupt VM-exit. */
5122 if ( CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
5123 && !CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
5124 {
5125 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
5126 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5127 return rcStrict;
5128 }
5129
5130 /*
5131 * Fetch the external interrupt from the interrupt controller.
5132 * Once PDMGetInterrupt() returns an interrupt we -must- deliver it or pass it to
5133 * the nested-hypervisor. We cannot re-request the interrupt from the controller again.
5134 */
5135 uint8_t u8Interrupt;
5136 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5137 if (RT_SUCCESS(rc))
5138 {
5139 /* Nested-guest external interrupt VM-exit when the "acknowledge interrupt on exit" is enabled. */
5140 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5141 {
5142 Assert(CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT));
5143 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
5144 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5145 return rcStrict;
5146 }
5147 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
5148 return VINF_SUCCESS;
5149 }
5150 }
5151 return VINF_SUCCESS;
5152}
5153#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5154
5155
5156/**
5157 * Injects any pending events into the guest if the guest is in a state to
5158 * receive them.
5159 *
5160 * @returns Strict VBox status code (i.e. informational status codes too).
5161 * @param pVCpu The cross context virtual CPU structure.
5162 * @param pVmcsInfo The VMCS information structure.
5163 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5164 * @param fIntrState The VT-x guest-interruptibility state.
5165 * @param fStepping Whether we are single-stepping the guest using the
5166 * hypervisor debugger and should return
5167 * VINF_EM_DBG_STEPPED if the event was dispatched
5168 * directly.
5169 */
5170static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5171 uint32_t fIntrState, bool fStepping)
5172{
5173 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5174#ifndef IN_NEM_DARWIN
5175 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5176#endif
5177
5178#ifdef VBOX_STRICT
5179 /*
5180 * Verify guest-interruptibility state.
5181 *
5182 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5183 * since injecting an event may modify the interruptibility state and we must thus always
5184 * use fIntrState.
5185 */
5186 {
5187 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5188 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5189 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5190 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5191 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5192 Assert(!TRPMHasTrap(pVCpu));
5193 NOREF(fBlockMovSS); NOREF(fBlockSti);
5194 }
5195#endif
5196
5197 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5198 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5199 {
5200 /*
5201 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5202 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5203 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5204 *
5205 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5206 */
5207 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5208#ifdef VBOX_STRICT
5209 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5210 {
5211 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
5212 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5213 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5214 }
5215 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5216 {
5217 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5218 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5219 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5220 }
5221#endif
5222 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5223 uIntType));
5224
5225 /*
5226 * Inject the event and get any changes to the guest-interruptibility state.
5227 *
5228 * The guest-interruptibility state may need to be updated if we inject the event
5229 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5230 */
5231 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5232 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5233
5234 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5235 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5236 else
5237 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5238 }
5239
5240 /*
5241 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5242 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5243 */
5244 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5245 && !fIsNestedGuest)
5246 {
5247 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5248
5249 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5250 {
5251 /*
5252 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5253 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5254 */
5255 Assert(!DBGFIsStepping(pVCpu));
5256 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_TF);
5257 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
5258 fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5259 AssertRC(rc);
5260 }
5261 else
5262 {
5263 /*
5264 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5265 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5266 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5267 * we use MTF, so just make sure it's called before executing guest-code.
5268 */
5269 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5270 }
5271 }
5272 /* else: for nested-guest currently handling while merging controls. */
5273
5274 /*
5275 * Finally, update the guest-interruptibility state.
5276 *
5277 * This is required for the real-on-v86 software interrupt injection, for
5278 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5279 */
5280 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5281 AssertRC(rc);
5282
5283 /*
5284 * There's no need to clear the VM-entry interruption-information field here if we're not
5285 * injecting anything. VT-x clears the valid bit on every VM-exit.
5286 *
5287 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5288 */
5289
5290 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5291 return rcStrict;
5292}
5293
5294
5295/**
5296 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5297 * and update error record fields accordingly.
5298 *
5299 * @returns VMX_IGS_* error codes.
5300 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5301 * wrong with the guest state.
5302 *
5303 * @param pVCpu The cross context virtual CPU structure.
5304 * @param pVmcsInfo The VMCS info. object.
5305 *
5306 * @remarks This function assumes our cache of the VMCS controls
5307 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5308 */
5309static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5310{
5311#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5312#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { uError = (err); break; } else do { } while (0)
5313
5314 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5315 uint32_t uError = VMX_IGS_ERROR;
5316 uint32_t u32IntrState = 0;
5317#ifndef IN_NEM_DARWIN
5318 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5319 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5320#else
5321 bool const fUnrestrictedGuest = true;
5322#endif
5323 do
5324 {
5325 int rc;
5326
5327 /*
5328 * Guest-interruptibility state.
5329 *
5330 * Read this first so that any check that fails prior to those that actually
5331 * require the guest-interruptibility state would still reflect the correct
5332 * VMCS value and avoids causing further confusion.
5333 */
5334 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5335 AssertRC(rc);
5336
5337 uint32_t u32Val;
5338 uint64_t u64Val;
5339
5340 /*
5341 * CR0.
5342 */
5343 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5344 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5345 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5346 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5347 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5348 if (fUnrestrictedGuest)
5349 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5350
5351 uint64_t u64GuestCr0;
5352 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5353 AssertRC(rc);
5354 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5355 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5356 if ( !fUnrestrictedGuest
5357 && (u64GuestCr0 & X86_CR0_PG)
5358 && !(u64GuestCr0 & X86_CR0_PE))
5359 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5360
5361 /*
5362 * CR4.
5363 */
5364 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5365 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5366 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5367
5368 uint64_t u64GuestCr4;
5369 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5370 AssertRC(rc);
5371 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5372 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5373
5374 /*
5375 * IA32_DEBUGCTL MSR.
5376 */
5377 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5378 AssertRC(rc);
5379 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5380 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5381 {
5382 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5383 }
5384 uint64_t u64DebugCtlMsr = u64Val;
5385
5386#ifdef VBOX_STRICT
5387 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5388 AssertRC(rc);
5389 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5390#endif
5391 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5392
5393 /*
5394 * RIP and RFLAGS.
5395 */
5396 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5397 AssertRC(rc);
5398 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5399 if ( !fLongModeGuest
5400 || !pCtx->cs.Attr.n.u1Long)
5401 {
5402 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5403 }
5404 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5405 * must be identical if the "IA-32e mode guest" VM-entry
5406 * control is 1 and CS.L is 1. No check applies if the
5407 * CPU supports 64 linear-address bits. */
5408
5409 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5410 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5411 AssertRC(rc);
5412 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5413 VMX_IGS_RFLAGS_RESERVED);
5414 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5415 uint32_t const u32Eflags = u64Val;
5416
5417 if ( fLongModeGuest
5418 || ( fUnrestrictedGuest
5419 && !(u64GuestCr0 & X86_CR0_PE)))
5420 {
5421 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5422 }
5423
5424 uint32_t u32EntryInfo;
5425 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5426 AssertRC(rc);
5427 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5428 {
5429 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5430 }
5431
5432 /*
5433 * 64-bit checks.
5434 */
5435 if (fLongModeGuest)
5436 {
5437 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5438 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5439 }
5440
5441 if ( !fLongModeGuest
5442 && (u64GuestCr4 & X86_CR4_PCIDE))
5443 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5444
5445 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5446 * 51:32 beyond the processor's physical-address width are 0. */
5447
5448 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5449 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5450 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5451
5452#ifndef IN_NEM_DARWIN
5453 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5454 AssertRC(rc);
5455 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5456
5457 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5458 AssertRC(rc);
5459 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5460#endif
5461
5462 /*
5463 * PERF_GLOBAL MSR.
5464 */
5465 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5466 {
5467 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5468 AssertRC(rc);
5469 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5470 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5471 }
5472
5473 /*
5474 * PAT MSR.
5475 */
5476 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5477 {
5478 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5479 AssertRC(rc);
5480 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5481 for (unsigned i = 0; i < 8; i++)
5482 {
5483 uint8_t u8Val = (u64Val & 0xff);
5484 if ( u8Val != 0 /* UC */
5485 && u8Val != 1 /* WC */
5486 && u8Val != 4 /* WT */
5487 && u8Val != 5 /* WP */
5488 && u8Val != 6 /* WB */
5489 && u8Val != 7 /* UC- */)
5490 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5491 u64Val >>= 8;
5492 }
5493 }
5494
5495 /*
5496 * EFER MSR.
5497 */
5498 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5499 {
5500 Assert(g_fHmVmxSupportsVmcsEfer);
5501 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5502 AssertRC(rc);
5503 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5504 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5505 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5506 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5507 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5508 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5509 * iemVmxVmentryCheckGuestState(). */
5510 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5511 || !(u64GuestCr0 & X86_CR0_PG)
5512 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5513 VMX_IGS_EFER_LMA_LME_MISMATCH);
5514 }
5515
5516 /*
5517 * Segment registers.
5518 */
5519 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5520 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5521 if (!(u32Eflags & X86_EFL_VM))
5522 {
5523 /* CS */
5524 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5525 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5526 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5527 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5528 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5529 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5530 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5531 /* CS cannot be loaded with NULL in protected mode. */
5532 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5533 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5534 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5535 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5536 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5537 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5538 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5539 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5540 else
5541 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5542
5543 /* SS */
5544 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5545 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5546 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5547 if ( !(pCtx->cr0 & X86_CR0_PE)
5548 || pCtx->cs.Attr.n.u4Type == 3)
5549 {
5550 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5551 }
5552
5553 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5554 {
5555 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5556 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5557 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5558 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5559 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5560 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5561 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5562 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5563 }
5564
5565 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5566 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5567 {
5568 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5569 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5570 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5571 || pCtx->ds.Attr.n.u4Type > 11
5572 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5573 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5574 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5575 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5576 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5577 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5578 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5579 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5580 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5581 }
5582 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5583 {
5584 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5585 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5586 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5587 || pCtx->es.Attr.n.u4Type > 11
5588 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5589 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5590 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5591 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5592 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5593 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5594 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5595 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5596 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5597 }
5598 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5599 {
5600 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5601 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5602 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5603 || pCtx->fs.Attr.n.u4Type > 11
5604 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5605 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5606 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5607 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5608 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5609 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5610 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5611 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5612 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5613 }
5614 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5615 {
5616 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5617 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5618 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5619 || pCtx->gs.Attr.n.u4Type > 11
5620 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5621 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5622 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5623 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5624 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5625 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5626 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5627 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5628 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5629 }
5630 /* 64-bit capable CPUs. */
5631 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5632 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5633 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5634 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5635 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5636 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5637 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5638 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5639 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5640 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5641 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5642 }
5643 else
5644 {
5645 /* V86 mode checks. */
5646 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5647 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5648 {
5649 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5650 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5651 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5652 }
5653 else
5654 {
5655 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5656 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5657 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5658 }
5659
5660 /* CS */
5661 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5662 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5663 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5664 /* SS */
5665 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5666 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5667 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5668 /* DS */
5669 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5670 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5671 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5672 /* ES */
5673 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5674 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5675 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5676 /* FS */
5677 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5678 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5679 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5680 /* GS */
5681 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5682 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5683 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5684 /* 64-bit capable CPUs. */
5685 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5686 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5687 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5688 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5689 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5690 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5691 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5692 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5693 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5694 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5695 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5696 }
5697
5698 /*
5699 * TR.
5700 */
5701 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5702 /* 64-bit capable CPUs. */
5703 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5704 if (fLongModeGuest)
5705 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5706 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5707 else
5708 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5709 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5710 VMX_IGS_TR_ATTR_TYPE_INVALID);
5711 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5712 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5713 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5714 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5715 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5716 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5717 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5718 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5719
5720 /*
5721 * GDTR and IDTR (64-bit capable checks).
5722 */
5723 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5724 AssertRC(rc);
5725 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5726
5727 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5728 AssertRC(rc);
5729 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5730
5731 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5732 AssertRC(rc);
5733 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5734
5735 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5736 AssertRC(rc);
5737 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5738
5739 /*
5740 * Guest Non-Register State.
5741 */
5742 /* Activity State. */
5743 uint32_t u32ActivityState;
5744 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5745 AssertRC(rc);
5746 HMVMX_CHECK_BREAK( !u32ActivityState
5747 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5748 VMX_IGS_ACTIVITY_STATE_INVALID);
5749 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5750 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5751
5752 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5753 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5754 {
5755 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5756 }
5757
5758 /** @todo Activity state and injecting interrupts. Left as a todo since we
5759 * currently don't use activity states but ACTIVE. */
5760
5761 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5762 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5763
5764 /* Guest interruptibility-state. */
5765 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5766 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5767 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5768 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5769 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5770 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5771 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5772 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5773 {
5774 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5775 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5776 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5777 }
5778 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5779 {
5780 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5781 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5782 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5783 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5784 }
5785 /** @todo Assumes the processor is not in SMM. */
5786 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5787 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5788 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5789 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5790 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5791 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5792 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5793 {
5794 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5795 }
5796
5797 /* Pending debug exceptions. */
5798 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5799 AssertRC(rc);
5800 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5801 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5802 u32Val = u64Val; /* For pending debug exceptions checks below. */
5803
5804 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5805 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5806 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5807 {
5808 if ( (u32Eflags & X86_EFL_TF)
5809 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5810 {
5811 /* Bit 14 is PendingDebug.BS. */
5812 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5813 }
5814 if ( !(u32Eflags & X86_EFL_TF)
5815 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5816 {
5817 /* Bit 14 is PendingDebug.BS. */
5818 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5819 }
5820 }
5821
5822#ifndef IN_NEM_DARWIN
5823 /* VMCS link pointer. */
5824 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5825 AssertRC(rc);
5826 if (u64Val != UINT64_C(0xffffffffffffffff))
5827 {
5828 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5829 /** @todo Bits beyond the processor's physical-address width MBZ. */
5830 /** @todo SMM checks. */
5831 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5832 Assert(pVmcsInfo->pvShadowVmcs);
5833 VMXVMCSREVID VmcsRevId;
5834 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5835 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5836 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5837 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5838 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5839 }
5840
5841 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5842 * not using nested paging? */
5843 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5844 && !fLongModeGuest
5845 && CPUMIsGuestInPAEModeEx(pCtx))
5846 {
5847 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5848 AssertRC(rc);
5849 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5850
5851 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5852 AssertRC(rc);
5853 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5854
5855 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5856 AssertRC(rc);
5857 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5858
5859 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5860 AssertRC(rc);
5861 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5862 }
5863#endif
5864
5865 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5866 if (uError == VMX_IGS_ERROR)
5867 uError = VMX_IGS_REASON_NOT_FOUND;
5868 } while (0);
5869
5870 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5871 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5872 return uError;
5873
5874#undef HMVMX_ERROR_BREAK
5875#undef HMVMX_CHECK_BREAK
5876}
5877
5878
5879#ifndef HMVMX_USE_FUNCTION_TABLE
5880/**
5881 * Handles a guest VM-exit from hardware-assisted VMX execution.
5882 *
5883 * @returns Strict VBox status code (i.e. informational status codes too).
5884 * @param pVCpu The cross context virtual CPU structure.
5885 * @param pVmxTransient The VMX-transient structure.
5886 */
5887DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5888{
5889#ifdef DEBUG_ramshankar
5890# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5891 do { \
5892 if (a_fSave != 0) \
5893 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5894 VBOXSTRICTRC rcStrict = a_CallExpr; \
5895 if (a_fSave != 0) \
5896 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5897 return rcStrict; \
5898 } while (0)
5899#else
5900# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5901#endif
5902 uint32_t const uExitReason = pVmxTransient->uExitReason;
5903 switch (uExitReason)
5904 {
5905 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5906 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5907 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5908 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5909 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5910 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5911 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5912 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5913 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5914 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5915 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5916 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5917 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5918 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5919 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5920 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5921 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5922 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5923 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5924 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5925 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5926 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5927 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5928 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5929 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5930 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5931 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5932 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5933 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5934 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5935#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5936 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5937 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5938 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5939 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5940 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5941 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5942 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5943 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5944 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5945 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5946#else
5947 case VMX_EXIT_VMCLEAR:
5948 case VMX_EXIT_VMLAUNCH:
5949 case VMX_EXIT_VMPTRLD:
5950 case VMX_EXIT_VMPTRST:
5951 case VMX_EXIT_VMREAD:
5952 case VMX_EXIT_VMRESUME:
5953 case VMX_EXIT_VMWRITE:
5954 case VMX_EXIT_VMXOFF:
5955 case VMX_EXIT_VMXON:
5956 case VMX_EXIT_INVVPID:
5957 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5958#endif
5959#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5960 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5961#else
5962 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5963#endif
5964
5965 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5966 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5967 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5968
5969 case VMX_EXIT_INIT_SIGNAL:
5970 case VMX_EXIT_SIPI:
5971 case VMX_EXIT_IO_SMI:
5972 case VMX_EXIT_SMI:
5973 case VMX_EXIT_ERR_MSR_LOAD:
5974 case VMX_EXIT_ERR_MACHINE_CHECK:
5975 case VMX_EXIT_PML_FULL:
5976 case VMX_EXIT_VIRTUALIZED_EOI:
5977 case VMX_EXIT_GDTR_IDTR_ACCESS:
5978 case VMX_EXIT_LDTR_TR_ACCESS:
5979 case VMX_EXIT_APIC_WRITE:
5980 case VMX_EXIT_RDRAND:
5981 case VMX_EXIT_RSM:
5982 case VMX_EXIT_VMFUNC:
5983 case VMX_EXIT_ENCLS:
5984 case VMX_EXIT_RDSEED:
5985 case VMX_EXIT_XSAVES:
5986 case VMX_EXIT_XRSTORS:
5987 case VMX_EXIT_UMWAIT:
5988 case VMX_EXIT_TPAUSE:
5989 case VMX_EXIT_LOADIWKEY:
5990 default:
5991 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5992 }
5993#undef VMEXIT_CALL_RET
5994}
5995#endif /* !HMVMX_USE_FUNCTION_TABLE */
5996
5997
5998#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5999/**
6000 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
6001 *
6002 * @returns Strict VBox status code (i.e. informational status codes too).
6003 * @param pVCpu The cross context virtual CPU structure.
6004 * @param pVmxTransient The VMX-transient structure.
6005 */
6006DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6007{
6008#ifdef DEBUG_ramshankar
6009# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
6010 do { \
6011 if (a_fSave != 0) \
6012 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
6013 VBOXSTRICTRC rcStrict = a_CallExpr; \
6014 return rcStrict; \
6015 } while (0)
6016#else
6017# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
6018#endif
6019
6020 uint32_t const uExitReason = pVmxTransient->uExitReason;
6021 switch (uExitReason)
6022 {
6023# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6024 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient));
6025 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolationNested(pVCpu, pVmxTransient));
6026# else
6027 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
6028 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
6029# endif
6030 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient));
6031 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstrNested(pVCpu, pVmxTransient));
6032 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHltNested(pVCpu, pVmxTransient));
6033
6034 /*
6035 * We shouldn't direct host physical interrupts to the nested-guest.
6036 */
6037 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
6038
6039 /*
6040 * Instructions that cause VM-exits unconditionally or the condition is
6041 * always taken solely from the nested hypervisor (meaning if the VM-exit
6042 * happens, it's guaranteed to be a nested-guest VM-exit).
6043 *
6044 * - Provides VM-exit instruction length ONLY.
6045 */
6046 case VMX_EXIT_CPUID: /* Unconditional. */
6047 case VMX_EXIT_VMCALL:
6048 case VMX_EXIT_GETSEC:
6049 case VMX_EXIT_INVD:
6050 case VMX_EXIT_XSETBV:
6051 case VMX_EXIT_VMLAUNCH:
6052 case VMX_EXIT_VMRESUME:
6053 case VMX_EXIT_VMXOFF:
6054 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
6055 case VMX_EXIT_VMFUNC:
6056 VMEXIT_CALL_RET(0, vmxHCExitInstrNested(pVCpu, pVmxTransient));
6057
6058 /*
6059 * Instructions that cause VM-exits unconditionally or the condition is
6060 * always taken solely from the nested hypervisor (meaning if the VM-exit
6061 * happens, it's guaranteed to be a nested-guest VM-exit).
6062 *
6063 * - Provides VM-exit instruction length.
6064 * - Provides VM-exit information.
6065 * - Optionally provides Exit qualification.
6066 *
6067 * Since Exit qualification is 0 for all VM-exits where it is not
6068 * applicable, reading and passing it to the guest should produce
6069 * defined behavior.
6070 *
6071 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
6072 */
6073 case VMX_EXIT_INVEPT: /* Unconditional. */
6074 case VMX_EXIT_INVVPID:
6075 case VMX_EXIT_VMCLEAR:
6076 case VMX_EXIT_VMPTRLD:
6077 case VMX_EXIT_VMPTRST:
6078 case VMX_EXIT_VMXON:
6079 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
6080 case VMX_EXIT_LDTR_TR_ACCESS:
6081 case VMX_EXIT_RDRAND:
6082 case VMX_EXIT_RDSEED:
6083 case VMX_EXIT_XSAVES:
6084 case VMX_EXIT_XRSTORS:
6085 case VMX_EXIT_UMWAIT:
6086 case VMX_EXIT_TPAUSE:
6087 VMEXIT_CALL_RET(0, vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient));
6088
6089 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtscNested(pVCpu, pVmxTransient));
6090 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscpNested(pVCpu, pVmxTransient));
6091 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsrNested(pVCpu, pVmxTransient));
6092 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsrNested(pVCpu, pVmxTransient));
6093 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpgNested(pVCpu, pVmxTransient));
6094 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcidNested(pVCpu, pVmxTransient));
6095 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient));
6096 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvdNested(pVCpu, pVmxTransient));
6097 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtfNested(pVCpu, pVmxTransient));
6098 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccessNested(pVCpu, pVmxTransient));
6099 case VMX_EXIT_APIC_WRITE: VMEXIT_CALL_RET(0, vmxHCExitApicWriteNested(pVCpu, pVmxTransient));
6100 case VMX_EXIT_VIRTUALIZED_EOI: VMEXIT_CALL_RET(0, vmxHCExitVirtEoiNested(pVCpu, pVmxTransient));
6101 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRxNested(pVCpu, pVmxTransient));
6102 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindowNested(pVCpu, pVmxTransient));
6103 case VMX_EXIT_NMI_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitNmiWindowNested(pVCpu, pVmxTransient));
6104 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient));
6105 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwaitNested(pVCpu, pVmxTransient));
6106 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitorNested(pVCpu, pVmxTransient));
6107 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPauseNested(pVCpu, pVmxTransient));
6108
6109 case VMX_EXIT_PREEMPT_TIMER:
6110 {
6111 /** @todo NSTVMX: Preempt timer. */
6112 VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
6113 }
6114
6115 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRxNested(pVCpu, pVmxTransient));
6116 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmcNested(pVCpu, pVmxTransient));
6117
6118 case VMX_EXIT_VMREAD:
6119 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient));
6120
6121 case VMX_EXIT_TRIPLE_FAULT: VMEXIT_CALL_RET(0, vmxHCExitTripleFaultNested(pVCpu, pVmxTransient));
6122 case VMX_EXIT_ERR_INVALID_GUEST_STATE: VMEXIT_CALL_RET(0, vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient));
6123
6124 case VMX_EXIT_INIT_SIGNAL:
6125 case VMX_EXIT_SIPI:
6126 case VMX_EXIT_IO_SMI:
6127 case VMX_EXIT_SMI:
6128 case VMX_EXIT_ERR_MSR_LOAD:
6129 case VMX_EXIT_ERR_MACHINE_CHECK:
6130 case VMX_EXIT_PML_FULL:
6131 case VMX_EXIT_RSM:
6132 default:
6133 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6134 }
6135#undef VMEXIT_CALL_RET
6136}
6137#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6138
6139
6140/** @name VM-exit helpers.
6141 * @{
6142 */
6143/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6144/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6145/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6146
6147/** Macro for VM-exits called unexpectedly. */
6148#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
6149 do { \
6150 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
6151 return VERR_VMX_UNEXPECTED_EXIT; \
6152 } while (0)
6153
6154#ifdef VBOX_STRICT
6155# ifndef IN_NEM_DARWIN
6156/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6157# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6158 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6159
6160# define HMVMX_ASSERT_PREEMPT_CPUID() \
6161 do { \
6162 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6163 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6164 } while (0)
6165
6166# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6167 do { \
6168 AssertPtr((a_pVCpu)); \
6169 AssertPtr((a_pVmxTransient)); \
6170 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6171 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6172 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6173 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6174 Assert((a_pVmxTransient)->pVmcsInfo); \
6175 Assert(ASMIntAreEnabled()); \
6176 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6177 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6178 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6179 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6180 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6181 HMVMX_ASSERT_PREEMPT_CPUID(); \
6182 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6183 } while (0)
6184# else
6185# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6186# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6187# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6188 do { \
6189 AssertPtr((a_pVCpu)); \
6190 AssertPtr((a_pVmxTransient)); \
6191 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6192 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6193 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6194 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6195 Assert((a_pVmxTransient)->pVmcsInfo); \
6196 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6197 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6198 } while (0)
6199# endif
6200
6201# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6202 do { \
6203 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6204 Assert((a_pVmxTransient)->fIsNestedGuest); \
6205 } while (0)
6206
6207# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6208 do { \
6209 Log4Func(("\n")); \
6210 } while (0)
6211#else
6212# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6213 do { \
6214 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6215 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6216 } while (0)
6217
6218# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6219 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6220
6221# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6222#endif
6223
6224#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6225/** Macro that does the necessary privilege checks and intercepted VM-exits for
6226 * guests that attempted to execute a VMX instruction. */
6227# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6228 do \
6229 { \
6230 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6231 if (rcStrictTmp == VINF_SUCCESS) \
6232 { /* likely */ } \
6233 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6234 { \
6235 Assert((a_pVCpu)->hm.s.Event.fPending); \
6236 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6237 return VINF_SUCCESS; \
6238 } \
6239 else \
6240 { \
6241 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6242 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6243 } \
6244 } while (0)
6245
6246/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6247# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6248 do \
6249 { \
6250 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6251 (a_pGCPtrEffAddr)); \
6252 if (rcStrictTmp == VINF_SUCCESS) \
6253 { /* likely */ } \
6254 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6255 { \
6256 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6257 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6258 NOREF(uXcptTmp); \
6259 return VINF_SUCCESS; \
6260 } \
6261 else \
6262 { \
6263 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6264 return rcStrictTmp; \
6265 } \
6266 } while (0)
6267#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6268
6269
6270/**
6271 * Advances the guest RIP by the specified number of bytes.
6272 *
6273 * @param pVCpu The cross context virtual CPU structure.
6274 * @param cbInstr Number of bytes to advance the RIP by.
6275 *
6276 * @remarks No-long-jump zone!!!
6277 */
6278DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6279{
6280 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
6281
6282 /*
6283 * Advance RIP.
6284 *
6285 * The upper 32 bits are only set when in 64-bit mode, so we have to detect
6286 * when the addition causes a "carry" into the upper half and check whether
6287 * we're in 64-bit and can go on with it or wether we should zap the top
6288 * half. (Note! The 8086, 80186 and 80286 emulation is done exclusively in
6289 * IEM, so we don't need to bother with pre-386 16-bit wraparound.)
6290 *
6291 * See PC wrap around tests in bs3-cpu-weird-1.
6292 */
6293 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
6294 uint64_t const uRipNext = uRipPrev + cbInstr;
6295 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & RT_BIT_64(32))
6296 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
6297 pVCpu->cpum.GstCtx.rip = uRipNext;
6298 else
6299 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
6300
6301 /*
6302 * Clear RF and interrupt shadowing.
6303 */
6304 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF))))
6305 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
6306 else
6307 {
6308 if ((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF)) == X86_EFL_TF)
6309 {
6310 /** @todo \#DB - single step. */
6311 }
6312 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
6313 }
6314 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
6315
6316 /* Mark both RIP and RFLAGS as updated. */
6317 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6318}
6319
6320
6321/**
6322 * Advances the guest RIP after reading it from the VMCS.
6323 *
6324 * @returns VBox status code, no informational status codes.
6325 * @param pVCpu The cross context virtual CPU structure.
6326 * @param pVmxTransient The VMX-transient structure.
6327 *
6328 * @remarks No-long-jump zone!!!
6329 */
6330static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6331{
6332 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6333 /** @todo consider template here after checking callers. */
6334 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6335 AssertRCReturn(rc, rc);
6336
6337 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6338 return VINF_SUCCESS;
6339}
6340
6341
6342/**
6343 * Handle a condition that occurred while delivering an event through the guest or
6344 * nested-guest IDT.
6345 *
6346 * @returns Strict VBox status code (i.e. informational status codes too).
6347 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6348 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6349 * to continue execution of the guest which will delivery the \#DF.
6350 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6351 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6352 *
6353 * @param pVCpu The cross context virtual CPU structure.
6354 * @param pVmxTransient The VMX-transient structure.
6355 *
6356 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6357 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6358 * is due to an EPT violation, PML full or SPP-related event.
6359 *
6360 * @remarks No-long-jump zone!!!
6361 */
6362static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6363{
6364 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6365 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6366 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6367 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6368 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6369 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6370
6371 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6372 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6373 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6374 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6375 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6376 {
6377 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6378 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6379
6380 /*
6381 * If the event was a software interrupt (generated with INT n) or a software exception
6382 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6383 * can handle the VM-exit and continue guest execution which will re-execute the
6384 * instruction rather than re-injecting the exception, as that can cause premature
6385 * trips to ring-3 before injection and involve TRPM which currently has no way of
6386 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6387 * the problem).
6388 */
6389 IEMXCPTRAISE enmRaise;
6390 IEMXCPTRAISEINFO fRaiseInfo;
6391 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6392 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6393 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6394 {
6395 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6396 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6397 }
6398 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6399 {
6400 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6401 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6402 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6403
6404 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6405 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6406
6407 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6408
6409 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6410 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6411 {
6412 pVmxTransient->fVectoringPF = true;
6413 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6414 }
6415 }
6416 else
6417 {
6418 /*
6419 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6420 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6421 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6422 */
6423 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6424 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6425 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6426 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6427 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6428 }
6429
6430 /*
6431 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6432 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6433 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6434 * subsequent VM-entry would fail, see @bugref{7445}.
6435 *
6436 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6437 */
6438 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6439 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6440 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6441 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6442 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6443
6444 switch (enmRaise)
6445 {
6446 case IEMXCPTRAISE_CURRENT_XCPT:
6447 {
6448 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6449 Assert(rcStrict == VINF_SUCCESS);
6450 break;
6451 }
6452
6453 case IEMXCPTRAISE_PREV_EVENT:
6454 {
6455 uint32_t u32ErrCode;
6456 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6457 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6458 else
6459 u32ErrCode = 0;
6460
6461 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6462 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6463 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6464 pVCpu->cpum.GstCtx.cr2);
6465
6466 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6467 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6468 Assert(rcStrict == VINF_SUCCESS);
6469 break;
6470 }
6471
6472 case IEMXCPTRAISE_REEXEC_INSTR:
6473 Assert(rcStrict == VINF_SUCCESS);
6474 break;
6475
6476 case IEMXCPTRAISE_DOUBLE_FAULT:
6477 {
6478 /*
6479 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6480 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6481 */
6482 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6483 {
6484 pVmxTransient->fVectoringDoublePF = true;
6485 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6486 pVCpu->cpum.GstCtx.cr2));
6487 rcStrict = VINF_SUCCESS;
6488 }
6489 else
6490 {
6491 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6492 vmxHCSetPendingXcptDF(pVCpu);
6493 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6494 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6495 rcStrict = VINF_HM_DOUBLE_FAULT;
6496 }
6497 break;
6498 }
6499
6500 case IEMXCPTRAISE_TRIPLE_FAULT:
6501 {
6502 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6503 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6504 rcStrict = VINF_EM_RESET;
6505 break;
6506 }
6507
6508 case IEMXCPTRAISE_CPU_HANG:
6509 {
6510 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6511 rcStrict = VERR_EM_GUEST_CPU_HANG;
6512 break;
6513 }
6514
6515 default:
6516 {
6517 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6518 rcStrict = VERR_VMX_IPE_2;
6519 break;
6520 }
6521 }
6522 }
6523 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6524 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6525 {
6526 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6527 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6528 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6529 {
6530 /*
6531 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6532 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6533 * that virtual NMIs remain blocked until the IRET execution is completed.
6534 *
6535 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6536 */
6537 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6538 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6539 }
6540 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6541 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6542 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6543 {
6544 /*
6545 * Execution of IRET caused an EPT violation, page-modification log-full event or
6546 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6547 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6548 * that virtual NMIs remain blocked until the IRET execution is completed.
6549 *
6550 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6551 */
6552 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6553 {
6554 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6555 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6556 }
6557 }
6558 }
6559
6560 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6561 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6562 return rcStrict;
6563}
6564
6565
6566#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6567/**
6568 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6569 * guest attempting to execute a VMX instruction.
6570 *
6571 * @returns Strict VBox status code (i.e. informational status codes too).
6572 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6573 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6574 *
6575 * @param pVCpu The cross context virtual CPU structure.
6576 * @param uExitReason The VM-exit reason.
6577 *
6578 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6579 * @remarks No-long-jump zone!!!
6580 */
6581static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6582{
6583 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6584 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6585
6586 /*
6587 * The physical CPU would have already checked the CPU mode/code segment.
6588 * We shall just assert here for paranoia.
6589 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6590 */
6591 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6592 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6593 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6594
6595 if (uExitReason == VMX_EXIT_VMXON)
6596 {
6597 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6598
6599 /*
6600 * We check CR4.VMXE because it is required to be always set while in VMX operation
6601 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6602 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6603 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6604 */
6605 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6606 {
6607 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6608 vmxHCSetPendingXcptUD(pVCpu);
6609 return VINF_HM_PENDING_XCPT;
6610 }
6611 }
6612 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6613 {
6614 /*
6615 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6616 * (other than VMXON), we need to raise a #UD.
6617 */
6618 Log4Func(("Not in VMX root mode -> #UD\n"));
6619 vmxHCSetPendingXcptUD(pVCpu);
6620 return VINF_HM_PENDING_XCPT;
6621 }
6622
6623 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6624 return VINF_SUCCESS;
6625}
6626
6627
6628/**
6629 * Decodes the memory operand of an instruction that caused a VM-exit.
6630 *
6631 * The Exit qualification field provides the displacement field for memory
6632 * operand instructions, if any.
6633 *
6634 * @returns Strict VBox status code (i.e. informational status codes too).
6635 * @retval VINF_SUCCESS if the operand was successfully decoded.
6636 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6637 * operand.
6638 * @param pVCpu The cross context virtual CPU structure.
6639 * @param uExitInstrInfo The VM-exit instruction information field.
6640 * @param enmMemAccess The memory operand's access type (read or write).
6641 * @param GCPtrDisp The instruction displacement field, if any. For
6642 * RIP-relative addressing pass RIP + displacement here.
6643 * @param pGCPtrMem Where to store the effective destination memory address.
6644 *
6645 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6646 * virtual-8086 mode hence skips those checks while verifying if the
6647 * segment is valid.
6648 */
6649static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6650 PRTGCPTR pGCPtrMem)
6651{
6652 Assert(pGCPtrMem);
6653 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6654 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6655 | CPUMCTX_EXTRN_CR0);
6656
6657 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6658 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6659 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6660
6661 VMXEXITINSTRINFO ExitInstrInfo;
6662 ExitInstrInfo.u = uExitInstrInfo;
6663 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6664 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6665 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6666 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6667 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6668 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6669 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6670 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6671 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6672
6673 /*
6674 * Validate instruction information.
6675 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6676 */
6677 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6678 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6679 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6680 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6681 AssertLogRelMsgReturn(fIsMemOperand,
6682 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6683
6684 /*
6685 * Compute the complete effective address.
6686 *
6687 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6688 * See AMD spec. 4.5.2 "Segment Registers".
6689 */
6690 RTGCPTR GCPtrMem = GCPtrDisp;
6691 if (fBaseRegValid)
6692 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6693 if (fIdxRegValid)
6694 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6695
6696 RTGCPTR const GCPtrOff = GCPtrMem;
6697 if ( !fIsLongMode
6698 || iSegReg >= X86_SREG_FS)
6699 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6700 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6701
6702 /*
6703 * Validate effective address.
6704 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6705 */
6706 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6707 Assert(cbAccess > 0);
6708 if (fIsLongMode)
6709 {
6710 if (X86_IS_CANONICAL(GCPtrMem))
6711 {
6712 *pGCPtrMem = GCPtrMem;
6713 return VINF_SUCCESS;
6714 }
6715
6716 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6717 * "Data Limit Checks in 64-bit Mode". */
6718 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6719 vmxHCSetPendingXcptGP(pVCpu, 0);
6720 return VINF_HM_PENDING_XCPT;
6721 }
6722
6723 /*
6724 * This is a watered down version of iemMemApplySegment().
6725 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6726 * and segment CPL/DPL checks are skipped.
6727 */
6728 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6729 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6730 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6731
6732 /* Check if the segment is present and usable. */
6733 if ( pSel->Attr.n.u1Present
6734 && !pSel->Attr.n.u1Unusable)
6735 {
6736 Assert(pSel->Attr.n.u1DescType);
6737 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6738 {
6739 /* Check permissions for the data segment. */
6740 if ( enmMemAccess == VMXMEMACCESS_WRITE
6741 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6742 {
6743 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6744 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6745 return VINF_HM_PENDING_XCPT;
6746 }
6747
6748 /* Check limits if it's a normal data segment. */
6749 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6750 {
6751 if ( GCPtrFirst32 > pSel->u32Limit
6752 || GCPtrLast32 > pSel->u32Limit)
6753 {
6754 Log4Func(("Data segment limit exceeded. "
6755 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6756 GCPtrLast32, pSel->u32Limit));
6757 if (iSegReg == X86_SREG_SS)
6758 vmxHCSetPendingXcptSS(pVCpu, 0);
6759 else
6760 vmxHCSetPendingXcptGP(pVCpu, 0);
6761 return VINF_HM_PENDING_XCPT;
6762 }
6763 }
6764 else
6765 {
6766 /* Check limits if it's an expand-down data segment.
6767 Note! The upper boundary is defined by the B bit, not the G bit! */
6768 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6769 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6770 {
6771 Log4Func(("Expand-down data segment limit exceeded. "
6772 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6773 GCPtrLast32, pSel->u32Limit));
6774 if (iSegReg == X86_SREG_SS)
6775 vmxHCSetPendingXcptSS(pVCpu, 0);
6776 else
6777 vmxHCSetPendingXcptGP(pVCpu, 0);
6778 return VINF_HM_PENDING_XCPT;
6779 }
6780 }
6781 }
6782 else
6783 {
6784 /* Check permissions for the code segment. */
6785 if ( enmMemAccess == VMXMEMACCESS_WRITE
6786 || ( enmMemAccess == VMXMEMACCESS_READ
6787 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6788 {
6789 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6790 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6791 vmxHCSetPendingXcptGP(pVCpu, 0);
6792 return VINF_HM_PENDING_XCPT;
6793 }
6794
6795 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6796 if ( GCPtrFirst32 > pSel->u32Limit
6797 || GCPtrLast32 > pSel->u32Limit)
6798 {
6799 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6800 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6801 if (iSegReg == X86_SREG_SS)
6802 vmxHCSetPendingXcptSS(pVCpu, 0);
6803 else
6804 vmxHCSetPendingXcptGP(pVCpu, 0);
6805 return VINF_HM_PENDING_XCPT;
6806 }
6807 }
6808 }
6809 else
6810 {
6811 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6812 vmxHCSetPendingXcptGP(pVCpu, 0);
6813 return VINF_HM_PENDING_XCPT;
6814 }
6815
6816 *pGCPtrMem = GCPtrMem;
6817 return VINF_SUCCESS;
6818}
6819#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6820
6821
6822/**
6823 * VM-exit helper for LMSW.
6824 */
6825static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6826{
6827 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6828 AssertRCReturn(rc, rc);
6829
6830 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6831 AssertMsg( rcStrict == VINF_SUCCESS
6832 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6833
6834 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6835 if (rcStrict == VINF_IEM_RAISED_XCPT)
6836 {
6837 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6838 rcStrict = VINF_SUCCESS;
6839 }
6840
6841 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6842 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6843 return rcStrict;
6844}
6845
6846
6847/**
6848 * VM-exit helper for CLTS.
6849 */
6850static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6851{
6852 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6853 AssertRCReturn(rc, rc);
6854
6855 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6856 AssertMsg( rcStrict == VINF_SUCCESS
6857 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6858
6859 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6860 if (rcStrict == VINF_IEM_RAISED_XCPT)
6861 {
6862 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6863 rcStrict = VINF_SUCCESS;
6864 }
6865
6866 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6867 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6868 return rcStrict;
6869}
6870
6871
6872/**
6873 * VM-exit helper for MOV from CRx (CRx read).
6874 */
6875static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6876{
6877 Assert(iCrReg < 16);
6878 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6879
6880 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6881 AssertRCReturn(rc, rc);
6882
6883 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6884 AssertMsg( rcStrict == VINF_SUCCESS
6885 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6886
6887 if (iGReg == X86_GREG_xSP)
6888 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6889 else
6890 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6891#ifdef VBOX_WITH_STATISTICS
6892 switch (iCrReg)
6893 {
6894 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6895 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6896 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6897 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6898 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6899 }
6900#endif
6901 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6902 return rcStrict;
6903}
6904
6905
6906/**
6907 * VM-exit helper for MOV to CRx (CRx write).
6908 */
6909static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6910{
6911 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6912
6913 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6914 AssertMsg( rcStrict == VINF_SUCCESS
6915 || rcStrict == VINF_IEM_RAISED_XCPT
6916 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6917
6918 switch (iCrReg)
6919 {
6920 case 0:
6921 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6922 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6923 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6924 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6925 break;
6926
6927 case 2:
6928 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6929 /* Nothing to do here, CR2 it's not part of the VMCS. */
6930 break;
6931
6932 case 3:
6933 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6934 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6935 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6936 break;
6937
6938 case 4:
6939 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6940 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6941#ifndef IN_NEM_DARWIN
6942 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6943 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6944#else
6945 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6946#endif
6947 break;
6948
6949 case 8:
6950 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6951 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6952 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6953 break;
6954
6955 default:
6956 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6957 break;
6958 }
6959
6960 if (rcStrict == VINF_IEM_RAISED_XCPT)
6961 {
6962 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6963 rcStrict = VINF_SUCCESS;
6964 }
6965 return rcStrict;
6966}
6967
6968
6969/**
6970 * VM-exit exception handler for \#PF (Page-fault exception).
6971 *
6972 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6973 */
6974static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6975{
6976 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6977 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6978
6979#ifndef IN_NEM_DARWIN
6980 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6981 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6982 { /* likely */ }
6983 else
6984#endif
6985 {
6986#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6987 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6988#endif
6989 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6990 if (!pVmxTransient->fVectoringDoublePF)
6991 {
6992 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6993 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6994 }
6995 else
6996 {
6997 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6998 Assert(!pVmxTransient->fIsNestedGuest);
6999 vmxHCSetPendingXcptDF(pVCpu);
7000 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
7001 }
7002 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
7003 return VINF_SUCCESS;
7004 }
7005
7006 Assert(!pVmxTransient->fIsNestedGuest);
7007
7008 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
7009 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
7010 if (pVmxTransient->fVectoringPF)
7011 {
7012 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7013 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7014 }
7015
7016 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7017 AssertRCReturn(rc, rc);
7018
7019 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
7020 pVCpu->cpum.GstCtx.rip, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pVCpu->cpum.GstCtx.cr3));
7021
7022 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
7023 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, &pVCpu->cpum.GstCtx, (RTGCPTR)pVmxTransient->uExitQual);
7024
7025 Log4Func(("#PF: rc=%Rrc\n", rc));
7026 if (rc == VINF_SUCCESS)
7027 {
7028 /*
7029 * This is typically a shadow page table sync or a MMIO instruction. But we may have
7030 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
7031 */
7032 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7033 TRPMResetTrap(pVCpu);
7034 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
7035 return rc;
7036 }
7037
7038 if (rc == VINF_EM_RAW_GUEST_TRAP)
7039 {
7040 if (!pVmxTransient->fVectoringDoublePF)
7041 {
7042 /* It's a guest page fault and needs to be reflected to the guest. */
7043 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
7044 TRPMResetTrap(pVCpu);
7045 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
7046 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
7047 uGstErrorCode, pVmxTransient->uExitQual);
7048 }
7049 else
7050 {
7051 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
7052 TRPMResetTrap(pVCpu);
7053 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
7054 vmxHCSetPendingXcptDF(pVCpu);
7055 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
7056 }
7057
7058 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
7059 return VINF_SUCCESS;
7060 }
7061
7062 TRPMResetTrap(pVCpu);
7063 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
7064 return rc;
7065}
7066
7067
7068/**
7069 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
7070 *
7071 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7072 */
7073static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7074{
7075 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7076 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
7077
7078 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7079 AssertRCReturn(rc, rc);
7080
7081 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
7082 {
7083 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
7084 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
7085
7086 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
7087 * provides VM-exit instruction length. If this causes problem later,
7088 * disassemble the instruction like it's done on AMD-V. */
7089 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7090 AssertRCReturn(rc2, rc2);
7091 return rc;
7092 }
7093
7094 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
7095 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7096 return VINF_SUCCESS;
7097}
7098
7099
7100/**
7101 * VM-exit exception handler for \#BP (Breakpoint exception).
7102 *
7103 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7104 */
7105static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7106{
7107 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7108 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
7109
7110 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7111 AssertRCReturn(rc, rc);
7112
7113 VBOXSTRICTRC rcStrict;
7114 if (!pVmxTransient->fIsNestedGuest)
7115 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
7116 else
7117 rcStrict = VINF_EM_RAW_GUEST_TRAP;
7118
7119 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
7120 {
7121 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7122 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7123 rcStrict = VINF_SUCCESS;
7124 }
7125
7126 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
7127 return rcStrict;
7128}
7129
7130
7131/**
7132 * VM-exit exception handler for \#AC (Alignment-check exception).
7133 *
7134 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7135 */
7136static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7137{
7138 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7139
7140 /*
7141 * Detect #ACs caused by host having enabled split-lock detection.
7142 * Emulate such instructions.
7143 */
7144#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
7145 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7146 AssertRCReturn(rc, rc);
7147 /** @todo detect split lock in cpu feature? */
7148 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
7149 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7150 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
7151 || CPUMGetGuestCPL(pVCpu) != 3
7152 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
7153 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
7154 {
7155 /*
7156 * Check for debug/trace events and import state accordingly.
7157 */
7158 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
7159 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7160 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7161#ifndef IN_NEM_DARWIN
7162 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7163#endif
7164 )
7165 {
7166 if (pVM->cCpus == 1)
7167 {
7168#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7169 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7170 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7171#else
7172 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7173 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7174#endif
7175 AssertRCReturn(rc, rc);
7176 }
7177 }
7178 else
7179 {
7180 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7181 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7182 AssertRCReturn(rc, rc);
7183
7184 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7185
7186 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7187 {
7188 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7189 if (rcStrict != VINF_SUCCESS)
7190 return rcStrict;
7191 }
7192 }
7193
7194 /*
7195 * Emulate the instruction.
7196 *
7197 * We have to ignore the LOCK prefix here as we must not retrigger the
7198 * detection on the host. This isn't all that satisfactory, though...
7199 */
7200 if (pVM->cCpus == 1)
7201 {
7202 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7203 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7204
7205 /** @todo For SMP configs we should do a rendezvous here. */
7206 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7207 if (rcStrict == VINF_SUCCESS)
7208#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7209 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7210 HM_CHANGED_GUEST_RIP
7211 | HM_CHANGED_GUEST_RFLAGS
7212 | HM_CHANGED_GUEST_GPRS_MASK
7213 | HM_CHANGED_GUEST_CS
7214 | HM_CHANGED_GUEST_SS);
7215#else
7216 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7217#endif
7218 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7219 {
7220 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7221 rcStrict = VINF_SUCCESS;
7222 }
7223 return rcStrict;
7224 }
7225 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7226 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7227 return VINF_EM_EMULATE_SPLIT_LOCK;
7228 }
7229
7230 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7231 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7232 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7233
7234 /* Re-inject it. We'll detect any nesting before getting here. */
7235 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7236 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7237 return VINF_SUCCESS;
7238}
7239
7240
7241/**
7242 * VM-exit exception handler for \#DB (Debug exception).
7243 *
7244 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7245 */
7246static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7247{
7248 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7249 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7250
7251 /*
7252 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7253 */
7254 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7255
7256 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7257 uint64_t const uDR6 = X86_DR6_INIT_VAL
7258 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7259 | X86_DR6_BD | X86_DR6_BS));
7260 Log6Func(("uDR6=%#RX64 uExitQual=%#RX64\n", uDR6, pVmxTransient->uExitQual));
7261
7262 int rc;
7263 if (!pVmxTransient->fIsNestedGuest)
7264 {
7265 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx, uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7266
7267 /*
7268 * Prevents stepping twice over the same instruction when the guest is stepping using
7269 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7270 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7271 */
7272 if ( rc == VINF_EM_DBG_STEPPED
7273 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7274 {
7275 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7276 rc = VINF_EM_RAW_GUEST_TRAP;
7277 }
7278 }
7279 else
7280 rc = VINF_EM_RAW_GUEST_TRAP;
7281 Log6Func(("rc=%Rrc\n", rc));
7282 if (rc == VINF_EM_RAW_GUEST_TRAP)
7283 {
7284 /*
7285 * The exception was for the guest. Update DR6, DR7.GD and
7286 * IA32_DEBUGCTL.LBR before forwarding it.
7287 * See Intel spec. 27.1 "Architectural State before a VM-Exit"
7288 * and @sdmv3{077,622,17.2.3,Debug Status Register (DR6)}.
7289 */
7290#ifndef IN_NEM_DARWIN
7291 VMMRZCallRing3Disable(pVCpu);
7292 HM_DISABLE_PREEMPT(pVCpu);
7293
7294 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
7295 pVCpu->cpum.GstCtx.dr[6] |= uDR6;
7296 if (CPUMIsGuestDebugStateActive(pVCpu))
7297 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
7298
7299 HM_RESTORE_PREEMPT();
7300 VMMRZCallRing3Enable(pVCpu);
7301#else
7302 /** @todo */
7303#endif
7304
7305 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7306 AssertRCReturn(rc, rc);
7307
7308 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7309 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_GD;
7310
7311 /* Paranoia. */
7312 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7313 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
7314
7315 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pVCpu->cpum.GstCtx.dr[7]);
7316 AssertRC(rc);
7317
7318 /*
7319 * Raise #DB in the guest.
7320 *
7321 * It is important to reflect exactly what the VM-exit gave us (preserving the
7322 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7323 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7324 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7325 *
7326 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7327 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7328 */
7329 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7330 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7331 return VINF_SUCCESS;
7332 }
7333
7334 /*
7335 * Not a guest trap, must be a hypervisor related debug event then.
7336 * Update DR6 in case someone is interested in it.
7337 */
7338 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7339 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7340 CPUMSetHyperDR6(pVCpu, uDR6);
7341
7342 return rc;
7343}
7344
7345
7346/**
7347 * Hacks its way around the lovely mesa driver's backdoor accesses.
7348 *
7349 * @sa hmR0SvmHandleMesaDrvGp.
7350 */
7351static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7352{
7353 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7354 RT_NOREF(pCtx);
7355
7356 /* For now we'll just skip the instruction. */
7357 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7358}
7359
7360
7361/**
7362 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7363 * backdoor logging w/o checking what it is running inside.
7364 *
7365 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7366 * backdoor port and magic numbers loaded in registers.
7367 *
7368 * @returns true if it is, false if it isn't.
7369 * @sa hmR0SvmIsMesaDrvGp.
7370 */
7371DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7372{
7373 /* 0xed: IN eAX,dx */
7374 uint8_t abInstr[1];
7375 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7376 return false;
7377
7378 /* Check that it is #GP(0). */
7379 if (pVmxTransient->uExitIntErrorCode != 0)
7380 return false;
7381
7382 /* Check magic and port. */
7383 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7384 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7385 if (pCtx->rax != UINT32_C(0x564d5868))
7386 return false;
7387 if (pCtx->dx != UINT32_C(0x5658))
7388 return false;
7389
7390 /* Flat ring-3 CS. */
7391 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7392 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7393 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7394 if (pCtx->cs.Attr.n.u2Dpl != 3)
7395 return false;
7396 if (pCtx->cs.u64Base != 0)
7397 return false;
7398
7399 /* Check opcode. */
7400 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7401 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7402 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7403 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7404 if (RT_FAILURE(rc))
7405 return false;
7406 if (abInstr[0] != 0xed)
7407 return false;
7408
7409 return true;
7410}
7411
7412
7413/**
7414 * VM-exit exception handler for \#GP (General-protection exception).
7415 *
7416 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7417 */
7418static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7419{
7420 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7421 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7422
7423 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7424 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7425#ifndef IN_NEM_DARWIN
7426 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7427 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7428 { /* likely */ }
7429 else
7430#endif
7431 {
7432#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7433# ifndef IN_NEM_DARWIN
7434 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7435# else
7436 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7437# endif
7438#endif
7439 /*
7440 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7441 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7442 */
7443 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7444 AssertRCReturn(rc, rc);
7445 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7446 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7447
7448 if ( pVmxTransient->fIsNestedGuest
7449 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7450 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7451 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7452 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7453 else
7454 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7455 return rc;
7456 }
7457
7458#ifndef IN_NEM_DARWIN
7459 Assert(CPUMIsGuestInRealModeEx(pCtx));
7460 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7461 Assert(!pVmxTransient->fIsNestedGuest);
7462
7463 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7464 AssertRCReturn(rc, rc);
7465
7466 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7467 if (rcStrict == VINF_SUCCESS)
7468 {
7469 if (!CPUMIsGuestInRealModeEx(pCtx))
7470 {
7471 /*
7472 * The guest is no longer in real-mode, check if we can continue executing the
7473 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7474 */
7475 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7476 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7477 {
7478 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7479 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7480 }
7481 else
7482 {
7483 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7484 rcStrict = VINF_EM_RESCHEDULE;
7485 }
7486 }
7487 else
7488 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7489 }
7490 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7491 {
7492 rcStrict = VINF_SUCCESS;
7493 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7494 }
7495 return VBOXSTRICTRC_VAL(rcStrict);
7496#endif
7497}
7498
7499
7500/**
7501 * VM-exit exception handler for \#DE (Divide Error).
7502 *
7503 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7504 */
7505static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7506{
7507 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7508 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7509
7510 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7511 AssertRCReturn(rc, rc);
7512
7513 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7514 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7515 {
7516 uint8_t cbInstr = 0;
7517 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7518 if (rc2 == VINF_SUCCESS)
7519 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7520 else if (rc2 == VERR_NOT_FOUND)
7521 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7522 else
7523 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7524 }
7525 else
7526 rcStrict = VINF_SUCCESS; /* Do nothing. */
7527
7528 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7529 if (RT_FAILURE(rcStrict))
7530 {
7531 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7532 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7533 rcStrict = VINF_SUCCESS;
7534 }
7535
7536 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7537 return VBOXSTRICTRC_VAL(rcStrict);
7538}
7539
7540
7541/**
7542 * VM-exit exception handler wrapper for all other exceptions that are not handled
7543 * by a specific handler.
7544 *
7545 * This simply re-injects the exception back into the VM without any special
7546 * processing.
7547 *
7548 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7549 */
7550static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7551{
7552 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7553
7554#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7555# ifndef IN_NEM_DARWIN
7556 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7557 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7558 ("uVector=%#x u32XcptBitmap=%#X32\n",
7559 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7560 NOREF(pVmcsInfo);
7561# endif
7562#endif
7563
7564 /*
7565 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7566 * would have been handled while checking exits due to event delivery.
7567 */
7568 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7569
7570#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7571 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7572 AssertRCReturn(rc, rc);
7573 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7574#endif
7575
7576#ifdef VBOX_WITH_STATISTICS
7577 switch (uVector)
7578 {
7579 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7580 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7581 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7582 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7583 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7584 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7585 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7586 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7587 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7588 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7589 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7590 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7591 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7592 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7593 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7594 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7595 default:
7596 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7597 break;
7598 }
7599#endif
7600
7601 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7602 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7603 NOREF(uVector);
7604
7605 /* Re-inject the original exception into the guest. */
7606 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7607 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7608 return VINF_SUCCESS;
7609}
7610
7611
7612/**
7613 * VM-exit exception handler for all exceptions (except NMIs!).
7614 *
7615 * @remarks This may be called for both guests and nested-guests. Take care to not
7616 * make assumptions and avoid doing anything that is not relevant when
7617 * executing a nested-guest (e.g., Mesa driver hacks).
7618 */
7619static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7620{
7621 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7622
7623 /*
7624 * If this VM-exit occurred while delivering an event through the guest IDT, take
7625 * action based on the return code and additional hints (e.g. for page-faults)
7626 * that will be updated in the VMX transient structure.
7627 */
7628 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7629 if (rcStrict == VINF_SUCCESS)
7630 {
7631 /*
7632 * If an exception caused a VM-exit due to delivery of an event, the original
7633 * event may have to be re-injected into the guest. We shall reinject it and
7634 * continue guest execution. However, page-fault is a complicated case and
7635 * needs additional processing done in vmxHCExitXcptPF().
7636 */
7637 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7638 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7639 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7640 || uVector == X86_XCPT_PF)
7641 {
7642 switch (uVector)
7643 {
7644 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7645 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7646 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7647 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7648 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7649 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7650 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7651 default:
7652 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7653 }
7654 }
7655 /* else: inject pending event before resuming guest execution. */
7656 }
7657 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7658 {
7659 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7660 rcStrict = VINF_SUCCESS;
7661 }
7662
7663 return rcStrict;
7664}
7665/** @} */
7666
7667
7668/** @name VM-exit handlers.
7669 * @{
7670 */
7671/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7672/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7673/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7674
7675/**
7676 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7677 */
7678HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7679{
7680 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7681 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7682
7683#ifndef IN_NEM_DARWIN
7684 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7685 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7686 return VINF_SUCCESS;
7687 return VINF_EM_RAW_INTERRUPT;
7688#else
7689 return VINF_SUCCESS;
7690#endif
7691}
7692
7693
7694/**
7695 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7696 * VM-exit.
7697 */
7698HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7699{
7700 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7701 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7702
7703 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7704
7705 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7706 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7707 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7708
7709 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7710 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7711 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7712 NOREF(pVmcsInfo);
7713
7714 VBOXSTRICTRC rcStrict;
7715 switch (uExitIntType)
7716 {
7717#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7718 /*
7719 * Host physical NMIs:
7720 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7721 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7722 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7723 *
7724 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7725 * See Intel spec. 27.5.5 "Updating Non-Register State".
7726 */
7727 case VMX_EXIT_INT_INFO_TYPE_NMI:
7728 {
7729 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7730 break;
7731 }
7732#endif
7733
7734 /*
7735 * Privileged software exceptions (#DB from ICEBP),
7736 * Software exceptions (#BP and #OF),
7737 * Hardware exceptions:
7738 * Process the required exceptions and resume guest execution if possible.
7739 */
7740 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7741 Assert(uVector == X86_XCPT_DB);
7742 RT_FALL_THRU();
7743 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7744 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7745 RT_FALL_THRU();
7746 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7747 {
7748 NOREF(uVector);
7749 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7750 | HMVMX_READ_EXIT_INSTR_LEN
7751 | HMVMX_READ_IDT_VECTORING_INFO
7752 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7753 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7754 break;
7755 }
7756
7757 default:
7758 {
7759 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7760 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7761 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7762 break;
7763 }
7764 }
7765
7766 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7767 return rcStrict;
7768}
7769
7770
7771/**
7772 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7773 */
7774HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7775{
7776 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7777
7778 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7779 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7780 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7781
7782 /* Evaluate and deliver pending events and resume guest execution. */
7783 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7784 return VINF_SUCCESS;
7785}
7786
7787
7788/**
7789 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7790 */
7791HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7792{
7793 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7794
7795 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7796 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7797 {
7798 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7799 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7800 }
7801
7802 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
7803
7804 /*
7805 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7806 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7807 */
7808 uint32_t fIntrState;
7809 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7810 AssertRC(rc);
7811 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7812 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7813 {
7814 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
7815
7816 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7817 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7818 AssertRC(rc);
7819 }
7820
7821 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready. */
7822 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7823
7824 /* Evaluate and deliver pending events and resume guest execution. */
7825 return VINF_SUCCESS;
7826}
7827
7828
7829/**
7830 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7831 */
7832HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7833{
7834 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7835 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7836}
7837
7838
7839/**
7840 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7841 */
7842HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7843{
7844 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7845 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7846}
7847
7848
7849/**
7850 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7851 */
7852HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7853{
7854 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7855
7856 /*
7857 * Get the state we need and update the exit history entry.
7858 */
7859 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7860 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7861 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7862 AssertRCReturn(rc, rc);
7863
7864 VBOXSTRICTRC rcStrict;
7865 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7866 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7867 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7868 if (!pExitRec)
7869 {
7870 /*
7871 * Regular CPUID instruction execution.
7872 */
7873 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7874 if (rcStrict == VINF_SUCCESS)
7875 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7876 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7877 {
7878 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7879 rcStrict = VINF_SUCCESS;
7880 }
7881 }
7882 else
7883 {
7884 /*
7885 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7886 */
7887 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7888 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7889 AssertRCReturn(rc2, rc2);
7890
7891 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7892 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7893
7894 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7895 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7896
7897 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7898 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7899 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7900 }
7901 return rcStrict;
7902}
7903
7904
7905/**
7906 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7907 */
7908HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7909{
7910 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7911
7912 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7913 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7914 AssertRCReturn(rc, rc);
7915
7916 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7917 return VINF_EM_RAW_EMULATE_INSTR;
7918
7919 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7920 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7921}
7922
7923
7924/**
7925 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7926 */
7927HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7928{
7929 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7930
7931 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7932 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7933 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7934 AssertRCReturn(rc, rc);
7935
7936 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7937 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7938 {
7939 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7940 we must reset offsetting on VM-entry. See @bugref{6634}. */
7941 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7942 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7943 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7944 }
7945 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7946 {
7947 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7948 rcStrict = VINF_SUCCESS;
7949 }
7950 return rcStrict;
7951}
7952
7953
7954/**
7955 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7956 */
7957HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7958{
7959 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7960
7961 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7962 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7963 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7964 AssertRCReturn(rc, rc);
7965
7966 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7967 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7968 {
7969 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7970 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7971 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7972 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7973 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7974 }
7975 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7976 {
7977 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7978 rcStrict = VINF_SUCCESS;
7979 }
7980 return rcStrict;
7981}
7982
7983
7984/**
7985 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7986 */
7987HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7988{
7989 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7990
7991 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7992 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7993 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7994 AssertRCReturn(rc, rc);
7995
7996 VBOXSTRICTRC rcStrict = IEMExecDecodedRdpmc(pVCpu, pVmxTransient->cbExitInstr);
7997 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7998 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7999 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8000 {
8001 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8002 rcStrict = VINF_SUCCESS;
8003 }
8004 return rcStrict;
8005}
8006
8007
8008/**
8009 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
8010 */
8011HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8012{
8013 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8014
8015 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
8016 if (EMAreHypercallInstructionsEnabled(pVCpu))
8017 {
8018 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8019 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
8020 | CPUMCTX_EXTRN_RFLAGS
8021 | CPUMCTX_EXTRN_CR0
8022 | CPUMCTX_EXTRN_SS
8023 | CPUMCTX_EXTRN_CS
8024 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
8025 AssertRCReturn(rc, rc);
8026
8027 /* Perform the hypercall. */
8028 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
8029 if (rcStrict == VINF_SUCCESS)
8030 {
8031 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8032 AssertRCReturn(rc, rc);
8033 }
8034 else
8035 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
8036 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
8037 || RT_FAILURE(rcStrict));
8038
8039 /* If the hypercall changes anything other than guest's general-purpose registers,
8040 we would need to reload the guest changed bits here before VM-entry. */
8041 }
8042 else
8043 Log4Func(("Hypercalls not enabled\n"));
8044
8045 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
8046 if (RT_FAILURE(rcStrict))
8047 {
8048 vmxHCSetPendingXcptUD(pVCpu);
8049 rcStrict = VINF_SUCCESS;
8050 }
8051
8052 return rcStrict;
8053}
8054
8055
8056/**
8057 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
8058 */
8059HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8060{
8061 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8062#ifndef IN_NEM_DARWIN
8063 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
8064#endif
8065
8066 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8067 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8068 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8069 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8070 AssertRCReturn(rc, rc);
8071
8072 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
8073
8074 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
8075 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8076 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8077 {
8078 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8079 rcStrict = VINF_SUCCESS;
8080 }
8081 else
8082 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
8083 VBOXSTRICTRC_VAL(rcStrict)));
8084 return rcStrict;
8085}
8086
8087
8088/**
8089 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
8090 */
8091HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8092{
8093 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8094
8095 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8096 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8097 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
8098 AssertRCReturn(rc, rc);
8099
8100 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
8101 if (rcStrict == VINF_SUCCESS)
8102 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8103 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8104 {
8105 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8106 rcStrict = VINF_SUCCESS;
8107 }
8108
8109 return rcStrict;
8110}
8111
8112
8113/**
8114 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
8115 */
8116HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8117{
8118 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8119
8120 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8121 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8122 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8123 AssertRCReturn(rc, rc);
8124
8125 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
8126 if (RT_SUCCESS(rcStrict))
8127 {
8128 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8129 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
8130 rcStrict = VINF_SUCCESS;
8131 }
8132
8133 return rcStrict;
8134}
8135
8136
8137/**
8138 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
8139 * VM-exit.
8140 */
8141HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8142{
8143 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8144 return VINF_EM_RESET;
8145}
8146
8147
8148/**
8149 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
8150 */
8151HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8152{
8153 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8154
8155 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8156 AssertRCReturn(rc, rc);
8157
8158 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
8159 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
8160 rc = VINF_SUCCESS;
8161 else
8162 rc = VINF_EM_HALT;
8163
8164 if (rc != VINF_SUCCESS)
8165 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8166 return rc;
8167}
8168
8169
8170#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8171/**
8172 * VM-exit handler for instructions that result in a \#UD exception delivered to
8173 * the guest.
8174 */
8175HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8176{
8177 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8178 vmxHCSetPendingXcptUD(pVCpu);
8179 return VINF_SUCCESS;
8180}
8181#endif
8182
8183
8184/**
8185 * VM-exit handler for expiry of the VMX-preemption timer.
8186 */
8187HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8188{
8189 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8190
8191 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8192 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8193Log12(("vmxHCExitPreemptTimer:\n"));
8194
8195 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8196 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8197 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8198 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8199 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8200}
8201
8202
8203/**
8204 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8205 */
8206HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8207{
8208 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8209
8210 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8211 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8212 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8213 AssertRCReturn(rc, rc);
8214
8215 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8216 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8217 : HM_CHANGED_RAISED_XCPT_MASK);
8218
8219#ifndef IN_NEM_DARWIN
8220 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8221 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8222 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8223 {
8224 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8225 hmR0VmxUpdateStartVmFunction(pVCpu);
8226 }
8227#endif
8228
8229 return rcStrict;
8230}
8231
8232
8233/**
8234 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8235 */
8236HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8237{
8238 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8239
8240 /** @todo Enable the new code after finding a reliably guest test-case. */
8241#if 1
8242 return VERR_EM_INTERPRETER;
8243#else
8244 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8245 | HMVMX_READ_EXIT_INSTR_INFO
8246 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8247 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8248 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8249 AssertRCReturn(rc, rc);
8250
8251 /* Paranoia. Ensure this has a memory operand. */
8252 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8253
8254 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8255 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8256 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8257 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8258
8259 RTGCPTR GCPtrDesc;
8260 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8261
8262 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8263 GCPtrDesc, uType);
8264 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8265 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8266 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8267 {
8268 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8269 rcStrict = VINF_SUCCESS;
8270 }
8271 return rcStrict;
8272#endif
8273}
8274
8275
8276/**
8277 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8278 * VM-exit.
8279 */
8280HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8281{
8282 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8283 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8284 AssertRCReturn(rc, rc);
8285
8286 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8287 if (RT_FAILURE(rc))
8288 return rc;
8289
8290 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8291 NOREF(uInvalidReason);
8292
8293#ifdef VBOX_STRICT
8294 uint32_t fIntrState;
8295 uint64_t u64Val;
8296 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8297 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8298 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8299
8300 Log4(("uInvalidReason %u\n", uInvalidReason));
8301 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8302 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8303 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8304
8305 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8306 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8307 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8308 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8309 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8310 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8311 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8312 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8313 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8314 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8315 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8316 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8317# ifndef IN_NEM_DARWIN
8318 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8319 {
8320 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8321 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8322 }
8323
8324 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8325# endif
8326#endif
8327
8328 return VERR_VMX_INVALID_GUEST_STATE;
8329}
8330
8331/**
8332 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8333 */
8334HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8335{
8336 /*
8337 * Cumulative notes of all recognized but unexpected VM-exits.
8338 *
8339 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8340 * nested-paging is used.
8341 *
8342 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8343 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8344 * this function (and thereby stop VM execution) for handling such instructions.
8345 *
8346 *
8347 * VMX_EXIT_INIT_SIGNAL:
8348 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8349 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8350 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8351 *
8352 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8353 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8354 * See Intel spec. "23.8 Restrictions on VMX operation".
8355 *
8356 * VMX_EXIT_SIPI:
8357 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8358 * activity state is used. We don't make use of it as our guests don't have direct
8359 * access to the host local APIC.
8360 *
8361 * See Intel spec. 25.3 "Other Causes of VM-exits".
8362 *
8363 * VMX_EXIT_IO_SMI:
8364 * VMX_EXIT_SMI:
8365 * This can only happen if we support dual-monitor treatment of SMI, which can be
8366 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8367 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8368 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8369 *
8370 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8371 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8372 *
8373 * VMX_EXIT_ERR_MSR_LOAD:
8374 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8375 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8376 * execution.
8377 *
8378 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8379 *
8380 * VMX_EXIT_ERR_MACHINE_CHECK:
8381 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8382 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8383 * #MC exception abort class exception is raised. We thus cannot assume a
8384 * reasonable chance of continuing any sort of execution and we bail.
8385 *
8386 * See Intel spec. 15.1 "Machine-check Architecture".
8387 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8388 *
8389 * VMX_EXIT_PML_FULL:
8390 * VMX_EXIT_VIRTUALIZED_EOI:
8391 * VMX_EXIT_APIC_WRITE:
8392 * We do not currently support any of these features and thus they are all unexpected
8393 * VM-exits.
8394 *
8395 * VMX_EXIT_GDTR_IDTR_ACCESS:
8396 * VMX_EXIT_LDTR_TR_ACCESS:
8397 * VMX_EXIT_RDRAND:
8398 * VMX_EXIT_RSM:
8399 * VMX_EXIT_VMFUNC:
8400 * VMX_EXIT_ENCLS:
8401 * VMX_EXIT_RDSEED:
8402 * VMX_EXIT_XSAVES:
8403 * VMX_EXIT_XRSTORS:
8404 * VMX_EXIT_UMWAIT:
8405 * VMX_EXIT_TPAUSE:
8406 * VMX_EXIT_LOADIWKEY:
8407 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8408 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8409 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8410 *
8411 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8412 */
8413 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8414 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8415 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8416}
8417
8418
8419/**
8420 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8421 */
8422HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8423{
8424 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8425
8426 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8427
8428 /** @todo Optimize this: We currently drag in the whole MSR state
8429 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8430 * MSRs required. That would require changes to IEM and possibly CPUM too.
8431 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8432 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8433 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8434 int rc;
8435 switch (idMsr)
8436 {
8437 default:
8438 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8439 __FUNCTION__);
8440 AssertRCReturn(rc, rc);
8441 break;
8442 case MSR_K8_FS_BASE:
8443 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8444 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8445 AssertRCReturn(rc, rc);
8446 break;
8447 case MSR_K8_GS_BASE:
8448 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8449 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8450 AssertRCReturn(rc, rc);
8451 break;
8452 }
8453
8454 Log4Func(("ecx=%#RX32\n", idMsr));
8455
8456#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8457 Assert(!pVmxTransient->fIsNestedGuest);
8458 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8459 {
8460 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8461 && idMsr != MSR_K6_EFER)
8462 {
8463 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8464 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8465 }
8466 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8467 {
8468 Assert(pVmcsInfo->pvMsrBitmap);
8469 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8470 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8471 {
8472 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8473 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8474 }
8475 }
8476 }
8477#endif
8478
8479 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8480 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8481 if (rcStrict == VINF_SUCCESS)
8482 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8483 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8484 {
8485 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8486 rcStrict = VINF_SUCCESS;
8487 }
8488 else
8489 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8490 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8491
8492 return rcStrict;
8493}
8494
8495
8496/**
8497 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8498 */
8499HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8500{
8501 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8502
8503 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8504
8505 /*
8506 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8507 * Although we don't need to fetch the base as it will be overwritten shortly, while
8508 * loading guest-state we would also load the entire segment register including limit
8509 * and attributes and thus we need to load them here.
8510 */
8511 /** @todo Optimize this: We currently drag in the whole MSR state
8512 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8513 * MSRs required. That would require changes to IEM and possibly CPUM too.
8514 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8515 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8516 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8517 int rc;
8518 switch (idMsr)
8519 {
8520 default:
8521 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8522 __FUNCTION__);
8523 AssertRCReturn(rc, rc);
8524 break;
8525
8526 case MSR_K8_FS_BASE:
8527 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8528 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8529 AssertRCReturn(rc, rc);
8530 break;
8531 case MSR_K8_GS_BASE:
8532 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8533 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8534 AssertRCReturn(rc, rc);
8535 break;
8536 }
8537 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8538
8539 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8540 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8541
8542 if (rcStrict == VINF_SUCCESS)
8543 {
8544 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8545
8546 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8547 if ( idMsr == MSR_IA32_APICBASE
8548 || ( idMsr >= MSR_IA32_X2APIC_START
8549 && idMsr <= MSR_IA32_X2APIC_END))
8550 {
8551 /*
8552 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8553 * When full APIC register virtualization is implemented we'll have to make
8554 * sure APIC state is saved from the VMCS before IEM changes it.
8555 */
8556 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8557 }
8558 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8559 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8560 else if (idMsr == MSR_K6_EFER)
8561 {
8562 /*
8563 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8564 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8565 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8566 */
8567 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8568 }
8569
8570 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8571 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8572 {
8573 switch (idMsr)
8574 {
8575 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8576 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8577 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8578 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8579 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8580 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8581 default:
8582 {
8583#ifndef IN_NEM_DARWIN
8584 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8585 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8586 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8587 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8588#else
8589 AssertMsgFailed(("TODO\n"));
8590#endif
8591 break;
8592 }
8593 }
8594 }
8595#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8596 else
8597 {
8598 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8599 switch (idMsr)
8600 {
8601 case MSR_IA32_SYSENTER_CS:
8602 case MSR_IA32_SYSENTER_EIP:
8603 case MSR_IA32_SYSENTER_ESP:
8604 case MSR_K8_FS_BASE:
8605 case MSR_K8_GS_BASE:
8606 {
8607 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8608 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8609 }
8610
8611 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8612 default:
8613 {
8614 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8615 {
8616 /* EFER MSR writes are always intercepted. */
8617 if (idMsr != MSR_K6_EFER)
8618 {
8619 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8620 idMsr));
8621 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8622 }
8623 }
8624
8625 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8626 {
8627 Assert(pVmcsInfo->pvMsrBitmap);
8628 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8629 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8630 {
8631 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8632 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8633 }
8634 }
8635 break;
8636 }
8637 }
8638 }
8639#endif /* VBOX_STRICT */
8640 }
8641 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8642 {
8643 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8644 rcStrict = VINF_SUCCESS;
8645 }
8646 else
8647 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8648 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8649
8650 return rcStrict;
8651}
8652
8653
8654/**
8655 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8656 */
8657HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8658{
8659 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8660
8661 /** @todo The guest has likely hit a contended spinlock. We might want to
8662 * poke a schedule different guest VCPU. */
8663 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8664 if (RT_SUCCESS(rc))
8665 return VINF_EM_RAW_INTERRUPT;
8666
8667 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8668 return rc;
8669}
8670
8671
8672/**
8673 * VM-exit handler for when the TPR value is lowered below the specified
8674 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8675 */
8676HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8677{
8678 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8679 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8680
8681 /*
8682 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8683 * We'll re-evaluate pending interrupts and inject them before the next VM
8684 * entry so we can just continue execution here.
8685 */
8686 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8687 return VINF_SUCCESS;
8688}
8689
8690
8691/**
8692 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8693 * VM-exit.
8694 *
8695 * @retval VINF_SUCCESS when guest execution can continue.
8696 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8697 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8698 * incompatible guest state for VMX execution (real-on-v86 case).
8699 */
8700HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8701{
8702 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8703 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8704
8705 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8706 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8707 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8708
8709 VBOXSTRICTRC rcStrict;
8710 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8711 uint64_t const uExitQual = pVmxTransient->uExitQual;
8712 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8713 switch (uAccessType)
8714 {
8715 /*
8716 * MOV to CRx.
8717 */
8718 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8719 {
8720 /*
8721 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8722 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8723 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8724 * PAE PDPTEs as well.
8725 */
8726 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8727 AssertRCReturn(rc, rc);
8728
8729 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8730#ifndef IN_NEM_DARWIN
8731 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8732#endif
8733 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8734 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8735
8736 /*
8737 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8738 * - When nested paging isn't used.
8739 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8740 * - We are executing in the VM debug loop.
8741 */
8742#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8743# ifndef IN_NEM_DARWIN
8744 Assert( iCrReg != 3
8745 || !VM_IS_VMX_NESTED_PAGING(pVM)
8746 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8747 || pVCpu->hmr0.s.fUsingDebugLoop);
8748# else
8749 Assert( iCrReg != 3
8750 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8751# endif
8752#endif
8753
8754 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8755 Assert( iCrReg != 8
8756 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8757
8758 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8759 AssertMsg( rcStrict == VINF_SUCCESS
8760 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8761
8762#ifndef IN_NEM_DARWIN
8763 /*
8764 * This is a kludge for handling switches back to real mode when we try to use
8765 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8766 * deal with special selector values, so we have to return to ring-3 and run
8767 * there till the selector values are V86 mode compatible.
8768 *
8769 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8770 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8771 * this function.
8772 */
8773 if ( iCrReg == 0
8774 && rcStrict == VINF_SUCCESS
8775 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8776 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8777 && (uOldCr0 & X86_CR0_PE)
8778 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8779 {
8780 /** @todo Check selectors rather than returning all the time. */
8781 Assert(!pVmxTransient->fIsNestedGuest);
8782 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8783 rcStrict = VINF_EM_RESCHEDULE_REM;
8784 }
8785#endif
8786
8787 break;
8788 }
8789
8790 /*
8791 * MOV from CRx.
8792 */
8793 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8794 {
8795 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8796 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8797
8798 /*
8799 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8800 * - When nested paging isn't used.
8801 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8802 * - We are executing in the VM debug loop.
8803 */
8804#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8805# ifndef IN_NEM_DARWIN
8806 Assert( iCrReg != 3
8807 || !VM_IS_VMX_NESTED_PAGING(pVM)
8808 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8809 || pVCpu->hmr0.s.fLeaveDone);
8810# else
8811 Assert( iCrReg != 3
8812 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8813# endif
8814#endif
8815
8816 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8817 Assert( iCrReg != 8
8818 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8819
8820 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8821 break;
8822 }
8823
8824 /*
8825 * CLTS (Clear Task-Switch Flag in CR0).
8826 */
8827 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8828 {
8829 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8830 break;
8831 }
8832
8833 /*
8834 * LMSW (Load Machine-Status Word into CR0).
8835 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8836 */
8837 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8838 {
8839 RTGCPTR GCPtrEffDst;
8840 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8841 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8842 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8843 if (fMemOperand)
8844 {
8845 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8846 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8847 }
8848 else
8849 GCPtrEffDst = NIL_RTGCPTR;
8850 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8851 break;
8852 }
8853
8854 default:
8855 {
8856 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8857 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8858 }
8859 }
8860
8861 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8862 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8863 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8864
8865 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8866 NOREF(pVM);
8867 return rcStrict;
8868}
8869
8870
8871/**
8872 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8873 * VM-exit.
8874 */
8875HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8876{
8877 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8878 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8879
8880 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8881 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8882 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8883 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8884#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8885 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8886 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8887 AssertRCReturn(rc, rc);
8888
8889 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8890 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8891 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8892 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8893 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8894 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8895 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8896 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8897
8898 /*
8899 * Update exit history to see if this exit can be optimized.
8900 */
8901 VBOXSTRICTRC rcStrict;
8902 PCEMEXITREC pExitRec = NULL;
8903 if ( !fGstStepping
8904 && !fDbgStepping)
8905 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8906 !fIOString
8907 ? !fIOWrite
8908 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8909 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8910 : !fIOWrite
8911 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8912 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8913 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8914 if (!pExitRec)
8915 {
8916 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8917 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8918
8919 uint32_t const cbValue = s_aIOSizes[uIOSize];
8920 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8921 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8922 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8923 if (fIOString)
8924 {
8925 /*
8926 * INS/OUTS - I/O String instruction.
8927 *
8928 * Use instruction-information if available, otherwise fall back on
8929 * interpreting the instruction.
8930 */
8931 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8932 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8933 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8934 if (fInsOutsInfo)
8935 {
8936 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8937 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8938 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8939 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8940 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8941 if (fIOWrite)
8942 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8943 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8944 else
8945 {
8946 /*
8947 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8948 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8949 * See Intel Instruction spec. for "INS".
8950 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8951 */
8952 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8953 }
8954 }
8955 else
8956 rcStrict = IEMExecOne(pVCpu);
8957
8958 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8959 fUpdateRipAlready = true;
8960 }
8961 else
8962 {
8963 /*
8964 * IN/OUT - I/O instruction.
8965 */
8966 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8967 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8968 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8969 if (fIOWrite)
8970 {
8971 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8972 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8973#ifndef IN_NEM_DARWIN
8974 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8975 && !pCtx->eflags.Bits.u1TF)
8976 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8977#endif
8978 }
8979 else
8980 {
8981 uint32_t u32Result = 0;
8982 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8983 if (IOM_SUCCESS(rcStrict))
8984 {
8985 /* Save result of I/O IN instr. in AL/AX/EAX. */
8986 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8987 }
8988#ifndef IN_NEM_DARWIN
8989 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8990 && !pCtx->eflags.Bits.u1TF)
8991 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8992#endif
8993 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8994 }
8995 }
8996
8997 if (IOM_SUCCESS(rcStrict))
8998 {
8999 if (!fUpdateRipAlready)
9000 {
9001 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
9002 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
9003 }
9004
9005 /*
9006 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
9007 * while booting Fedora 17 64-bit guest.
9008 *
9009 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
9010 */
9011 if (fIOString)
9012 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
9013
9014 /*
9015 * If any I/O breakpoints are armed, we need to check if one triggered
9016 * and take appropriate action.
9017 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
9018 */
9019#if 1
9020 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
9021#else
9022 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
9023 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
9024 AssertRCReturn(rc, rc);
9025#endif
9026
9027 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
9028 * execution engines about whether hyper BPs and such are pending. */
9029 uint32_t const uDr7 = pCtx->dr[7];
9030 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
9031 && X86_DR7_ANY_RW_IO(uDr7)
9032 && (pCtx->cr4 & X86_CR4_DE))
9033 || DBGFBpIsHwIoArmed(pVM)))
9034 {
9035 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
9036
9037#ifndef IN_NEM_DARWIN
9038 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
9039 VMMRZCallRing3Disable(pVCpu);
9040 HM_DISABLE_PREEMPT(pVCpu);
9041
9042 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
9043
9044 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
9045 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
9046 {
9047 /* Raise #DB. */
9048 if (fIsGuestDbgActive)
9049 ASMSetDR6(pCtx->dr[6]);
9050 if (pCtx->dr[7] != uDr7)
9051 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
9052
9053 vmxHCSetPendingXcptDB(pVCpu);
9054 }
9055 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
9056 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
9057 else if ( rcStrict2 != VINF_SUCCESS
9058 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
9059 rcStrict = rcStrict2;
9060 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
9061
9062 HM_RESTORE_PREEMPT();
9063 VMMRZCallRing3Enable(pVCpu);
9064#else
9065 /** @todo */
9066#endif
9067 }
9068 }
9069
9070#ifdef VBOX_STRICT
9071 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
9072 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
9073 Assert(!fIOWrite);
9074 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
9075 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
9076 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
9077 Assert(fIOWrite);
9078 else
9079 {
9080# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
9081 * statuses, that the VMM device and some others may return. See
9082 * IOM_SUCCESS() for guidance. */
9083 AssertMsg( RT_FAILURE(rcStrict)
9084 || rcStrict == VINF_SUCCESS
9085 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
9086 || rcStrict == VINF_EM_DBG_BREAKPOINT
9087 || rcStrict == VINF_EM_RAW_GUEST_TRAP
9088 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9089# endif
9090 }
9091#endif
9092 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
9093 }
9094 else
9095 {
9096 /*
9097 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
9098 */
9099 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
9100 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
9101 AssertRCReturn(rc2, rc2);
9102 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
9103 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
9104 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
9105 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9106 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
9107 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
9108
9109 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9110 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9111
9112 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9113 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9114 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9115 }
9116 return rcStrict;
9117}
9118
9119
9120/**
9121 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
9122 * VM-exit.
9123 */
9124HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9125{
9126 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9127
9128 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
9129 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9130 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
9131 {
9132 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
9133 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9134 {
9135 uint32_t uErrCode;
9136 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
9137 {
9138 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9139 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
9140 }
9141 else
9142 uErrCode = 0;
9143
9144 RTGCUINTPTR GCPtrFaultAddress;
9145 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
9146 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
9147 else
9148 GCPtrFaultAddress = 0;
9149
9150 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9151
9152 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
9153 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
9154
9155 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
9156 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
9157 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9158 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9159 }
9160 }
9161
9162 /* Fall back to the interpreter to emulate the task-switch. */
9163 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9164 return VERR_EM_INTERPRETER;
9165}
9166
9167
9168/**
9169 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9170 */
9171HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9172{
9173 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9174
9175 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9176 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9177 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9178 AssertRC(rc);
9179 return VINF_EM_DBG_STEPPED;
9180}
9181
9182
9183/**
9184 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9185 */
9186HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9187{
9188 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9189 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9190
9191 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9192 | HMVMX_READ_EXIT_INSTR_LEN
9193 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9194 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9195 | HMVMX_READ_IDT_VECTORING_INFO
9196 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9197
9198 /*
9199 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9200 */
9201 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9202 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9203 {
9204 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9205 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9206 {
9207 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9208 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9209 }
9210 }
9211 else
9212 {
9213 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9214 return rcStrict;
9215 }
9216
9217 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9218 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9219 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9220 AssertRCReturn(rc, rc);
9221
9222 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9223 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9224 switch (uAccessType)
9225 {
9226#ifndef IN_NEM_DARWIN
9227 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9228 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9229 {
9230 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9231 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9232 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9233
9234 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9235 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9236 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9237 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9238 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9239
9240 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9241 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9242 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9243 if ( rcStrict == VINF_SUCCESS
9244 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9245 || rcStrict == VERR_PAGE_NOT_PRESENT)
9246 {
9247 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9248 | HM_CHANGED_GUEST_APIC_TPR);
9249 rcStrict = VINF_SUCCESS;
9250 }
9251 break;
9252 }
9253#else
9254 /** @todo */
9255#endif
9256
9257 default:
9258 {
9259 Log4Func(("uAccessType=%#x\n", uAccessType));
9260 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9261 break;
9262 }
9263 }
9264
9265 if (rcStrict != VINF_SUCCESS)
9266 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9267 return rcStrict;
9268}
9269
9270
9271/**
9272 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9273 * VM-exit.
9274 */
9275HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9276{
9277 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9278 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9279
9280 /*
9281 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9282 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9283 * must emulate the MOV DRx access.
9284 */
9285 if (!pVmxTransient->fIsNestedGuest)
9286 {
9287 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9288 if ( pVmxTransient->fWasGuestDebugStateActive
9289#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9290 && !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx
9291#endif
9292 )
9293 {
9294 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9295 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9296 }
9297
9298 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9299 && !pVmxTransient->fWasHyperDebugStateActive)
9300 {
9301 Assert(!DBGFIsStepping(pVCpu));
9302 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9303
9304 /* Whether we disable intercepting MOV DRx instructions and resume
9305 the current one, or emulate it and keep intercepting them is
9306 configurable. Though it usually comes down to whether there are
9307 any new DR6 & DR7 bits (RTM) we want to hide from the guest. */
9308#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9309 bool const fResumeInstruction = !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx;
9310#else
9311 bool const fResumeInstruction = true;
9312#endif
9313 if (fResumeInstruction)
9314 {
9315 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9316 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9317 AssertRC(rc);
9318 }
9319
9320#ifndef IN_NEM_DARWIN
9321 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9322 VMMRZCallRing3Disable(pVCpu);
9323 HM_DISABLE_PREEMPT(pVCpu);
9324
9325 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9326 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9327 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9328
9329 HM_RESTORE_PREEMPT();
9330 VMMRZCallRing3Enable(pVCpu);
9331#else
9332 CPUMR3NemActivateGuestDebugState(pVCpu);
9333 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9334 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9335#endif
9336
9337 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9338 if (fResumeInstruction)
9339 {
9340#ifdef VBOX_WITH_STATISTICS
9341 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9342 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9343 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9344 else
9345 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9346#endif
9347 return VINF_SUCCESS;
9348 }
9349 }
9350 }
9351
9352 /*
9353 * Import state. We must have DR7 loaded here as it's always consulted,
9354 * both for reading and writing. The other debug registers are never
9355 * exported as such.
9356 */
9357 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9358 int rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
9359 | CPUMCTX_EXTRN_GPRS_MASK
9360 | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9361 AssertRCReturn(rc, rc);
9362
9363 uint8_t const iGReg = VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual);
9364 uint8_t const iDrReg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
9365 Log4Func(("cs:rip=%#04x:%08RX64 r%d %s dr%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iGReg,
9366 VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE ? "->" : "<-", iDrReg));
9367
9368 VBOXSTRICTRC rcStrict;
9369 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9370 {
9371 /*
9372 * Write DRx register.
9373 */
9374 rcStrict = IEMExecDecodedMovDRxWrite(pVCpu, pVmxTransient->cbExitInstr, iDrReg, iGReg);
9375 AssertMsg( rcStrict == VINF_SUCCESS
9376 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9377
9378 if (rcStrict == VINF_SUCCESS)
9379 {
9380 /** @todo r=bird: Not sure why we always flag DR7 as modified here, but I've
9381 * kept it for now to avoid breaking something non-obvious. */
9382 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9383 | HM_CHANGED_GUEST_DR7);
9384 /* Update the DR6 register if guest debug state is active, otherwise we'll
9385 trash it when calling CPUMR0DebugStateMaybeSaveGuestAndRestoreHost. */
9386 if (iDrReg == 6 && CPUMIsGuestDebugStateActive(pVCpu))
9387 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
9388 Log4Func(("r%d=%#RX64 => dr%d=%#RX64\n", iGReg, pVCpu->cpum.GstCtx.aGRegs[iGReg].u,
9389 iDrReg, pVCpu->cpum.GstCtx.dr[iDrReg]));
9390 }
9391 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9392 {
9393 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9394 rcStrict = VINF_SUCCESS;
9395 }
9396
9397 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9398 }
9399 else
9400 {
9401 /*
9402 * Read DRx register into a general purpose register.
9403 */
9404 rcStrict = IEMExecDecodedMovDRxRead(pVCpu, pVmxTransient->cbExitInstr, iGReg, iDrReg);
9405 AssertMsg( rcStrict == VINF_SUCCESS
9406 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9407
9408 if (rcStrict == VINF_SUCCESS)
9409 {
9410 if (iGReg == X86_GREG_xSP)
9411 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9412 | HM_CHANGED_GUEST_RSP);
9413 else
9414 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9415 }
9416 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9417 {
9418 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9419 rcStrict = VINF_SUCCESS;
9420 }
9421
9422 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9423 }
9424
9425 return rcStrict;
9426}
9427
9428
9429/**
9430 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9431 * Conditional VM-exit.
9432 */
9433HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9434{
9435 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9436
9437#ifndef IN_NEM_DARWIN
9438 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9439
9440 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9441 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9442 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9443 | HMVMX_READ_IDT_VECTORING_INFO
9444 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9445 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9446
9447 /*
9448 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9449 */
9450 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9451 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9452 {
9453 /*
9454 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9455 * instruction emulation to inject the original event. Otherwise, injecting the original event
9456 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9457 */
9458 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9459 { /* likely */ }
9460 else
9461 {
9462 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9463# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9464 /** @todo NSTVMX: Think about how this should be handled. */
9465 if (pVmxTransient->fIsNestedGuest)
9466 return VERR_VMX_IPE_3;
9467# endif
9468 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9469 }
9470 }
9471 else
9472 {
9473 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9474 return rcStrict;
9475 }
9476
9477 /*
9478 * Get sufficient state and update the exit history entry.
9479 */
9480 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9481 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9482 AssertRCReturn(rc, rc);
9483
9484 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9485 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9486 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9487 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9488 if (!pExitRec)
9489 {
9490 /*
9491 * If we succeed, resume guest execution.
9492 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9493 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9494 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9495 * weird case. See @bugref{6043}.
9496 */
9497 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9498/** @todo bird: We can probably just go straight to IOM here and assume that
9499 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9500 * well. However, we need to address that aliasing workarounds that
9501 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9502 *
9503 * Might also be interesting to see if we can get this done more or
9504 * less locklessly inside IOM. Need to consider the lookup table
9505 * updating and use a bit more carefully first (or do all updates via
9506 * rendezvous) */
9507 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, &pVCpu->cpum.GstCtx, GCPhys, UINT32_MAX);
9508 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
9509 if ( rcStrict == VINF_SUCCESS
9510 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9511 || rcStrict == VERR_PAGE_NOT_PRESENT)
9512 {
9513 /* Successfully handled MMIO operation. */
9514 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9515 | HM_CHANGED_GUEST_APIC_TPR);
9516 rcStrict = VINF_SUCCESS;
9517 }
9518 }
9519 else
9520 {
9521 /*
9522 * Frequent exit or something needing probing. Call EMHistoryExec.
9523 */
9524 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9525 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9526
9527 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9528 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9529
9530 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9531 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9532 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9533 }
9534 return rcStrict;
9535#else
9536 AssertFailed();
9537 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9538#endif
9539}
9540
9541
9542/**
9543 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9544 * VM-exit.
9545 */
9546HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9547{
9548 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9549#ifndef IN_NEM_DARWIN
9550 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9551
9552 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9553 | HMVMX_READ_EXIT_INSTR_LEN
9554 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9555 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9556 | HMVMX_READ_IDT_VECTORING_INFO
9557 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9558 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9559
9560 /*
9561 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9562 */
9563 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9564 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9565 {
9566 /*
9567 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9568 * we shall resolve the nested #PF and re-inject the original event.
9569 */
9570 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9571 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9572 }
9573 else
9574 {
9575 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9576 return rcStrict;
9577 }
9578
9579 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9580 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9581 AssertRCReturn(rc, rc);
9582
9583 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9584 uint64_t const uExitQual = pVmxTransient->uExitQual;
9585 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9586
9587 RTGCUINT uErrorCode = 0;
9588 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9589 uErrorCode |= X86_TRAP_PF_ID;
9590 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9591 uErrorCode |= X86_TRAP_PF_RW;
9592 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9593 uErrorCode |= X86_TRAP_PF_P;
9594
9595 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9596 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9597
9598 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9599
9600 /*
9601 * Handle the pagefault trap for the nested shadow table.
9602 */
9603 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9604 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, pCtx, GCPhys);
9605 TRPMResetTrap(pVCpu);
9606
9607 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9608 if ( rcStrict == VINF_SUCCESS
9609 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9610 || rcStrict == VERR_PAGE_NOT_PRESENT)
9611 {
9612 /* Successfully synced our nested page tables. */
9613 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9614 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9615 return VINF_SUCCESS;
9616 }
9617 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9618 return rcStrict;
9619
9620#else /* IN_NEM_DARWIN */
9621 PVM pVM = pVCpu->CTX_SUFF(pVM);
9622 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9623 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9624 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9625 vmxHCImportGuestRip(pVCpu);
9626 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9627
9628 /*
9629 * Ask PGM for information about the given GCPhys. We need to check if we're
9630 * out of sync first.
9631 */
9632 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE),
9633 false,
9634 false };
9635 PGMPHYSNEMPAGEINFO Info;
9636 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9637 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9638 if (RT_SUCCESS(rc))
9639 {
9640 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9641 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9642 {
9643 if (State.fCanResume)
9644 {
9645 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9646 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9647 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9648 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9649 State.fDidSomething ? "" : " no-change"));
9650 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9651 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9652 return VINF_SUCCESS;
9653 }
9654 }
9655
9656 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9657 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9658 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9659 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9660 State.fDidSomething ? "" : " no-change"));
9661 }
9662 else
9663 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9664 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9665 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9666
9667 /*
9668 * Emulate the memory access, either access handler or special memory.
9669 */
9670 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9671 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9672 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9673 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9674 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9675
9676 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9677 AssertRCReturn(rc, rc);
9678
9679 VBOXSTRICTRC rcStrict;
9680 if (!pExitRec)
9681 rcStrict = IEMExecOne(pVCpu);
9682 else
9683 {
9684 /* Frequent access or probing. */
9685 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9686 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9687 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9688 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9689 }
9690
9691 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9692
9693 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9694 return rcStrict;
9695#endif /* IN_NEM_DARWIN */
9696}
9697
9698#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9699
9700/**
9701 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9702 */
9703HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9704{
9705 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9706
9707 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9708 | HMVMX_READ_EXIT_INSTR_INFO
9709 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9710 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9711 | CPUMCTX_EXTRN_SREG_MASK
9712 | CPUMCTX_EXTRN_HWVIRT
9713 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9714 AssertRCReturn(rc, rc);
9715
9716 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9717
9718 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9719 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9720
9721 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9722 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9723 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9724 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9725 {
9726 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9727 rcStrict = VINF_SUCCESS;
9728 }
9729 return rcStrict;
9730}
9731
9732
9733/**
9734 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9735 */
9736HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9737{
9738 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9739
9740 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9741 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9742 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9743 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9744 AssertRCReturn(rc, rc);
9745
9746 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9747
9748 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9749 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9750 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9751 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9752 {
9753 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9754 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9755 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9756 }
9757 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9758 return rcStrict;
9759}
9760
9761
9762/**
9763 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9764 */
9765HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9766{
9767 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9768
9769 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9770 | HMVMX_READ_EXIT_INSTR_INFO
9771 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9772 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9773 | CPUMCTX_EXTRN_SREG_MASK
9774 | CPUMCTX_EXTRN_HWVIRT
9775 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9776 AssertRCReturn(rc, rc);
9777
9778 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9779
9780 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9781 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9782
9783 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9784 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9785 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9786 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9787 {
9788 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9789 rcStrict = VINF_SUCCESS;
9790 }
9791 return rcStrict;
9792}
9793
9794
9795/**
9796 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9797 */
9798HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9799{
9800 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9801
9802 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9803 | HMVMX_READ_EXIT_INSTR_INFO
9804 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9805 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9806 | CPUMCTX_EXTRN_SREG_MASK
9807 | CPUMCTX_EXTRN_HWVIRT
9808 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9809 AssertRCReturn(rc, rc);
9810
9811 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9812
9813 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9814 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9815
9816 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9817 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9818 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9819 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9820 {
9821 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9822 rcStrict = VINF_SUCCESS;
9823 }
9824 return rcStrict;
9825}
9826
9827
9828/**
9829 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9830 */
9831HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9832{
9833 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9834
9835 /*
9836 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9837 * thus might not need to import the shadow VMCS state, it's safer just in case
9838 * code elsewhere dares look at unsynced VMCS fields.
9839 */
9840 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9841 | HMVMX_READ_EXIT_INSTR_INFO
9842 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9843 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9844 | CPUMCTX_EXTRN_SREG_MASK
9845 | CPUMCTX_EXTRN_HWVIRT
9846 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9847 AssertRCReturn(rc, rc);
9848
9849 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9850
9851 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9852 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9853 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9854
9855 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9856 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9857 {
9858 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9859
9860# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9861 /* Try for exit optimization. This is on the following instruction
9862 because it would be a waste of time to have to reinterpret the
9863 already decoded vmwrite instruction. */
9864 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9865 if (pExitRec)
9866 {
9867 /* Frequent access or probing. */
9868 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9869 AssertRCReturn(rc, rc);
9870
9871 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9872 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9873 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9874 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9875 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9876 }
9877# endif
9878 }
9879 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9880 {
9881 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9882 rcStrict = VINF_SUCCESS;
9883 }
9884 return rcStrict;
9885}
9886
9887
9888/**
9889 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9890 */
9891HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9892{
9893 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9894
9895 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9896 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9897 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9898 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9899 AssertRCReturn(rc, rc);
9900
9901 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9902
9903 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9904 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9905 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9906 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9907 {
9908 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9909 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9910 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9911 }
9912 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9913 return rcStrict;
9914}
9915
9916
9917/**
9918 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9919 */
9920HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9921{
9922 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9923
9924 /*
9925 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9926 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9927 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9928 */
9929 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9930 | HMVMX_READ_EXIT_INSTR_INFO
9931 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9932 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9933 | CPUMCTX_EXTRN_SREG_MASK
9934 | CPUMCTX_EXTRN_HWVIRT
9935 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9936 AssertRCReturn(rc, rc);
9937
9938 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9939
9940 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9941 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9942 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9943
9944 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9945 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9946 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9947 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9948 {
9949 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9950 rcStrict = VINF_SUCCESS;
9951 }
9952 return rcStrict;
9953}
9954
9955
9956/**
9957 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9958 */
9959HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9960{
9961 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9962
9963 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9964 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9965 | CPUMCTX_EXTRN_HWVIRT
9966 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9967 AssertRCReturn(rc, rc);
9968
9969 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9970
9971 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9972 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9973 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9974 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9975 {
9976 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9977 rcStrict = VINF_SUCCESS;
9978 }
9979 return rcStrict;
9980}
9981
9982
9983/**
9984 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9985 */
9986HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9987{
9988 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9989
9990 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9991 | HMVMX_READ_EXIT_INSTR_INFO
9992 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9993 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9994 | CPUMCTX_EXTRN_SREG_MASK
9995 | CPUMCTX_EXTRN_HWVIRT
9996 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9997 AssertRCReturn(rc, rc);
9998
9999 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10000
10001 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10002 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10003
10004 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
10005 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10006 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
10007 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10008 {
10009 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10010 rcStrict = VINF_SUCCESS;
10011 }
10012 return rcStrict;
10013}
10014
10015
10016/**
10017 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
10018 */
10019HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10020{
10021 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10022
10023 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10024 | HMVMX_READ_EXIT_INSTR_INFO
10025 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10026 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
10027 | CPUMCTX_EXTRN_SREG_MASK
10028 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10029 AssertRCReturn(rc, rc);
10030
10031 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10032
10033 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10034 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10035
10036 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
10037 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10038 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10039 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10040 {
10041 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10042 rcStrict = VINF_SUCCESS;
10043 }
10044 return rcStrict;
10045}
10046
10047
10048# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10049/**
10050 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
10051 */
10052HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10053{
10054 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10055
10056 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10057 | HMVMX_READ_EXIT_INSTR_INFO
10058 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10059 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
10060 | CPUMCTX_EXTRN_SREG_MASK
10061 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10062 AssertRCReturn(rc, rc);
10063
10064 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10065
10066 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10067 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10068
10069 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
10070 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10071 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10072 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10073 {
10074 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10075 rcStrict = VINF_SUCCESS;
10076 }
10077 return rcStrict;
10078}
10079# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10080#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10081/** @} */
10082
10083
10084#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10085/** @name Nested-guest VM-exit handlers.
10086 * @{
10087 */
10088/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10089/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10090/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10091
10092/**
10093 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
10094 * Conditional VM-exit.
10095 */
10096HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10097{
10098 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10099
10100 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
10101
10102 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
10103 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
10104 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
10105
10106 switch (uExitIntType)
10107 {
10108# ifndef IN_NEM_DARWIN
10109 /*
10110 * Physical NMIs:
10111 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
10112 */
10113 case VMX_EXIT_INT_INFO_TYPE_NMI:
10114 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
10115# endif
10116
10117 /*
10118 * Hardware exceptions,
10119 * Software exceptions,
10120 * Privileged software exceptions:
10121 * Figure out if the exception must be delivered to the guest or the nested-guest.
10122 */
10123 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10124 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10125 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10126 {
10127 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10128 | HMVMX_READ_EXIT_INSTR_LEN
10129 | HMVMX_READ_IDT_VECTORING_INFO
10130 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10131
10132 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10133 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
10134 {
10135 /* Exit qualification is required for debug and page-fault exceptions. */
10136 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10137
10138 /*
10139 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
10140 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
10141 * length. However, if delivery of a software interrupt, software exception or privileged
10142 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
10143 */
10144 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10145 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
10146 pVmxTransient->uExitIntErrorCode,
10147 pVmxTransient->uIdtVectoringInfo,
10148 pVmxTransient->uIdtVectoringErrorCode);
10149#ifdef DEBUG_ramshankar
10150 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10151 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
10152 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
10153 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
10154 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
10155 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
10156#endif
10157 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
10158 }
10159
10160 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
10161 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10162 return vmxHCExitXcpt(pVCpu, pVmxTransient);
10163 }
10164
10165 /*
10166 * Software interrupts:
10167 * VM-exits cannot be caused by software interrupts.
10168 *
10169 * External interrupts:
10170 * This should only happen when "acknowledge external interrupts on VM-exit"
10171 * control is set. However, we never set this when executing a guest or
10172 * nested-guest. For nested-guests it is emulated while injecting interrupts into
10173 * the guest.
10174 */
10175 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10176 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
10177 default:
10178 {
10179 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
10180 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10181 }
10182 }
10183}
10184
10185
10186/**
10187 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
10188 * Unconditional VM-exit.
10189 */
10190HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10191{
10192 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10193 return IEMExecVmxVmexitTripleFault(pVCpu);
10194}
10195
10196
10197/**
10198 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10199 */
10200HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10201{
10202 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10203
10204 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10205 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10206 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10207}
10208
10209
10210/**
10211 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10212 */
10213HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10214{
10215 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10216
10217 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10218 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10219 return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
10220}
10221
10222
10223/**
10224 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10225 * Unconditional VM-exit.
10226 */
10227HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10228{
10229 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10230
10231 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10232 | HMVMX_READ_EXIT_INSTR_LEN
10233 | HMVMX_READ_IDT_VECTORING_INFO
10234 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10235
10236 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10237 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10238 pVmxTransient->uIdtVectoringErrorCode);
10239 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10240}
10241
10242
10243/**
10244 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10245 */
10246HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10247{
10248 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10249
10250 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10251 {
10252 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10253 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10254 }
10255 return vmxHCExitHlt(pVCpu, pVmxTransient);
10256}
10257
10258
10259/**
10260 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10261 */
10262HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10263{
10264 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10265
10266 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10267 {
10268 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10269 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10270 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10271 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10272 }
10273 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10274}
10275
10276
10277/**
10278 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10279 */
10280HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10281{
10282 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10283
10284 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10285 {
10286 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10287 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10288 }
10289 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10290}
10291
10292
10293/**
10294 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10295 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10296 */
10297HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10298{
10299 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10300
10301 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10302 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10303
10304 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10305
10306 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10307 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10308 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10309
10310 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10311 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10312 u64VmcsField &= UINT64_C(0xffffffff);
10313
10314 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10315 {
10316 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10317 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10318 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10319 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10320 }
10321
10322 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10323 return vmxHCExitVmread(pVCpu, pVmxTransient);
10324 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10325}
10326
10327
10328/**
10329 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10330 */
10331HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10332{
10333 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10334
10335 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10336 {
10337 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10338 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10339 }
10340
10341 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10342}
10343
10344
10345/**
10346 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10347 * Conditional VM-exit.
10348 */
10349HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10350{
10351 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10352
10353 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10354 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10355
10356 VBOXSTRICTRC rcStrict;
10357 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10358 switch (uAccessType)
10359 {
10360 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10361 {
10362 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10363 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10364 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10365 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10366
10367 bool fIntercept;
10368 switch (iCrReg)
10369 {
10370 case 0:
10371 case 4:
10372 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10373 break;
10374
10375 case 3:
10376 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10377 break;
10378
10379 case 8:
10380 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10381 break;
10382
10383 default:
10384 fIntercept = false;
10385 break;
10386 }
10387 if (fIntercept)
10388 {
10389 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10390 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10391 }
10392 else
10393 {
10394 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10395 AssertRCReturn(rc, rc);
10396 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10397 }
10398 break;
10399 }
10400
10401 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10402 {
10403 /*
10404 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10405 * CR2 reads do not cause a VM-exit.
10406 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10407 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10408 */
10409 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10410 if ( iCrReg == 3
10411 || iCrReg == 8)
10412 {
10413 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10414 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10415 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10416 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10417 {
10418 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10419 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10420 }
10421 else
10422 {
10423 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10424 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10425 }
10426 }
10427 else
10428 {
10429 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10430 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10431 }
10432 break;
10433 }
10434
10435 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10436 {
10437 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10438 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10439 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10440 if ( (uGstHostMask & X86_CR0_TS)
10441 && (uReadShadow & X86_CR0_TS))
10442 {
10443 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10444 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10445 }
10446 else
10447 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10448 break;
10449 }
10450
10451 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10452 {
10453 RTGCPTR GCPtrEffDst;
10454 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10455 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10456 if (fMemOperand)
10457 {
10458 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10459 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10460 }
10461 else
10462 GCPtrEffDst = NIL_RTGCPTR;
10463
10464 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10465 {
10466 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10467 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10468 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10469 }
10470 else
10471 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10472 break;
10473 }
10474
10475 default:
10476 {
10477 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10478 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10479 }
10480 }
10481
10482 if (rcStrict == VINF_IEM_RAISED_XCPT)
10483 {
10484 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10485 rcStrict = VINF_SUCCESS;
10486 }
10487 return rcStrict;
10488}
10489
10490
10491/**
10492 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10493 * Conditional VM-exit.
10494 */
10495HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10496{
10497 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10498
10499 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10500 {
10501 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10502 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10503 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10504 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10505 }
10506 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10507}
10508
10509
10510/**
10511 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10512 * Conditional VM-exit.
10513 */
10514HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10515{
10516 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10517
10518 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10519
10520 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10521 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10522 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10523
10524 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10525 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10526 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10527 {
10528 /*
10529 * IN/OUT instruction:
10530 * - Provides VM-exit instruction length.
10531 *
10532 * INS/OUTS instruction:
10533 * - Provides VM-exit instruction length.
10534 * - Provides Guest-linear address.
10535 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10536 */
10537 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10538 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10539
10540 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10541 pVmxTransient->ExitInstrInfo.u = 0;
10542 pVmxTransient->uGuestLinearAddr = 0;
10543
10544 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10545 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10546 if (fIOString)
10547 {
10548 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10549 if (fVmxInsOutsInfo)
10550 {
10551 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10552 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10553 }
10554 }
10555
10556 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10557 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10558 }
10559 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10560}
10561
10562
10563/**
10564 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10565 */
10566HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10567{
10568 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10569
10570 uint32_t fMsrpm;
10571 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10572 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10573 else
10574 fMsrpm = VMXMSRPM_EXIT_RD;
10575
10576 if (fMsrpm & VMXMSRPM_EXIT_RD)
10577 {
10578 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10579 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10580 }
10581 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10582}
10583
10584
10585/**
10586 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10587 */
10588HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10589{
10590 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10591
10592 uint32_t fMsrpm;
10593 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10594 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10595 else
10596 fMsrpm = VMXMSRPM_EXIT_WR;
10597
10598 if (fMsrpm & VMXMSRPM_EXIT_WR)
10599 {
10600 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10601 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10602 }
10603 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10604}
10605
10606
10607/**
10608 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10609 */
10610HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10611{
10612 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10613
10614 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10615 {
10616 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10617 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10618 }
10619 return vmxHCExitMwait(pVCpu, pVmxTransient);
10620}
10621
10622
10623/**
10624 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10625 * VM-exit.
10626 */
10627HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10628{
10629 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10630
10631 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10632 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10633 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10634 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10635}
10636
10637
10638/**
10639 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10640 */
10641HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10642{
10643 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10644
10645 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10646 {
10647 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10648 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10649 }
10650 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10651}
10652
10653
10654/**
10655 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10656 */
10657HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10658{
10659 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10660
10661 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10662 * PAUSE when executing a nested-guest? If it does not, we would not need
10663 * to check for the intercepts here. Just call VM-exit... */
10664
10665 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10666 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10667 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10668 {
10669 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10670 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10671 }
10672 return vmxHCExitPause(pVCpu, pVmxTransient);
10673}
10674
10675
10676/**
10677 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10678 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10679 */
10680HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10681{
10682 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10683
10684 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10685 {
10686 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10687 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10688 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10689 }
10690 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10691}
10692
10693
10694/**
10695 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10696 * VM-exit.
10697 */
10698HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10699{
10700 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10701
10702 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10703 | HMVMX_READ_EXIT_INSTR_LEN
10704 | HMVMX_READ_IDT_VECTORING_INFO
10705 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10706
10707 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10708
10709 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10710 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10711
10712 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10713 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10714 pVmxTransient->uIdtVectoringErrorCode);
10715 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10716}
10717
10718
10719/**
10720 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10721 * Conditional VM-exit.
10722 */
10723HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10724{
10725 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10726
10727 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10728 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10729 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10730}
10731
10732
10733/**
10734 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10735 * Conditional VM-exit.
10736 */
10737HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10738{
10739 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10740
10741 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10742 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10743 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10744}
10745
10746
10747/**
10748 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10749 */
10750HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10751{
10752 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10753
10754 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10755 {
10756 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10757 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10758 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10759 }
10760 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10761}
10762
10763
10764/**
10765 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10766 */
10767HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10768{
10769 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10770
10771 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10772 {
10773 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10774 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10775 }
10776 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10777}
10778
10779
10780/**
10781 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10782 */
10783HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10784{
10785 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10786
10787 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10788 {
10789 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10790 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10791 | HMVMX_READ_EXIT_INSTR_INFO
10792 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10793 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10794 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10795 }
10796 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10797}
10798
10799
10800/**
10801 * Nested-guest VM-exit handler for invalid-guest state
10802 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10803 */
10804HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10805{
10806 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10807
10808 /*
10809 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10810 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10811 * Handle it like it's in an invalid guest state of the outer guest.
10812 *
10813 * When the fast path is implemented, this should be changed to cause the corresponding
10814 * nested-guest VM-exit.
10815 */
10816 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10817}
10818
10819
10820/**
10821 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10822 * and only provide the instruction length.
10823 *
10824 * Unconditional VM-exit.
10825 */
10826HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10827{
10828 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10829
10830#ifdef VBOX_STRICT
10831 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10832 switch (pVmxTransient->uExitReason)
10833 {
10834 case VMX_EXIT_ENCLS:
10835 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10836 break;
10837
10838 case VMX_EXIT_VMFUNC:
10839 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10840 break;
10841 }
10842#endif
10843
10844 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10845 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10846}
10847
10848
10849/**
10850 * Nested-guest VM-exit handler for instructions that provide instruction length as
10851 * well as more information.
10852 *
10853 * Unconditional VM-exit.
10854 */
10855HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10856{
10857 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10858
10859# ifdef VBOX_STRICT
10860 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10861 switch (pVmxTransient->uExitReason)
10862 {
10863 case VMX_EXIT_GDTR_IDTR_ACCESS:
10864 case VMX_EXIT_LDTR_TR_ACCESS:
10865 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10866 break;
10867
10868 case VMX_EXIT_RDRAND:
10869 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10870 break;
10871
10872 case VMX_EXIT_RDSEED:
10873 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10874 break;
10875
10876 case VMX_EXIT_XSAVES:
10877 case VMX_EXIT_XRSTORS:
10878 /** @todo NSTVMX: Verify XSS-bitmap. */
10879 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10880 break;
10881
10882 case VMX_EXIT_UMWAIT:
10883 case VMX_EXIT_TPAUSE:
10884 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10885 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10886 break;
10887
10888 case VMX_EXIT_LOADIWKEY:
10889 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10890 break;
10891 }
10892# endif
10893
10894 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10895 | HMVMX_READ_EXIT_INSTR_LEN
10896 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10897 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10898 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10899}
10900
10901# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10902
10903/**
10904 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10905 * Conditional VM-exit.
10906 */
10907HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10908{
10909 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10910 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10911
10912 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10913 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10914 {
10915 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10916 | HMVMX_READ_EXIT_INSTR_LEN
10917 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10918 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10919 | HMVMX_READ_IDT_VECTORING_INFO
10920 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10921 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10922 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10923 AssertRCReturn(rc, rc);
10924
10925 /*
10926 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10927 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10928 * it's its problem to deal with that issue and we'll clear the recovered event.
10929 */
10930 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10931 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10932 { /*likely*/ }
10933 else
10934 {
10935 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10936 return rcStrict;
10937 }
10938 uint32_t const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10939
10940 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10941 uint64_t const uExitQual = pVmxTransient->uExitQual;
10942
10943 RTGCPTR GCPtrNestedFault;
10944 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10945 if (fIsLinearAddrValid)
10946 {
10947 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10948 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10949 }
10950 else
10951 GCPtrNestedFault = 0;
10952
10953 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10954 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10955 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10956 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10957 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10958
10959 PGMPTWALK Walk;
10960 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10961 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, pCtx, GCPhysNestedFault,
10962 fIsLinearAddrValid, GCPtrNestedFault, &Walk);
10963 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10964 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10965 if (RT_SUCCESS(rcStrict))
10966 {
10967 if (rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE)
10968 {
10969 Assert(!fClearEventOnForward);
10970 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM));
10971 rcStrict = VINF_EM_RESCHEDULE_REM;
10972 }
10973 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
10974 return rcStrict;
10975 }
10976
10977 if (fClearEventOnForward)
10978 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10979
10980 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10981 pVmxTransient->uIdtVectoringErrorCode);
10982 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10983 {
10984 VMXVEXITINFO const ExitInfo
10985 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10986 pVmxTransient->uExitQual,
10987 pVmxTransient->cbExitInstr,
10988 pVmxTransient->uGuestLinearAddr,
10989 pVmxTransient->uGuestPhysicalAddr);
10990 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10991 }
10992
10993 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10994 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10995 }
10996
10997 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10998}
10999
11000
11001/**
11002 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
11003 * Conditional VM-exit.
11004 */
11005HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11006{
11007 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11008 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
11009
11010 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11011 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
11012 {
11013 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
11014 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
11015 AssertRCReturn(rc, rc);
11016
11017 PGMPTWALK Walk;
11018 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11019 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
11020 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, pCtx,
11021 GCPhysNestedFault, false /* fIsLinearAddrValid */,
11022 0 /* GCPtrNestedFault */, &Walk);
11023 if (RT_SUCCESS(rcStrict))
11024 {
11025 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
11026 return rcStrict;
11027 }
11028
11029 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
11030 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
11031 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
11032
11033 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
11034 pVmxTransient->uIdtVectoringErrorCode);
11035 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
11036 }
11037
11038 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
11039}
11040
11041# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
11042
11043/** @} */
11044#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
11045
11046
11047/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
11048 * probes.
11049 *
11050 * The following few functions and associated structure contains the bloat
11051 * necessary for providing detailed debug events and dtrace probes as well as
11052 * reliable host side single stepping. This works on the principle of
11053 * "subclassing" the normal execution loop and workers. We replace the loop
11054 * method completely and override selected helpers to add necessary adjustments
11055 * to their core operation.
11056 *
11057 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
11058 * any performance for debug and analysis features.
11059 *
11060 * @{
11061 */
11062
11063/**
11064 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
11065 * the debug run loop.
11066 */
11067typedef struct VMXRUNDBGSTATE
11068{
11069 /** The RIP we started executing at. This is for detecting that we stepped. */
11070 uint64_t uRipStart;
11071 /** The CS we started executing with. */
11072 uint16_t uCsStart;
11073
11074 /** Whether we've actually modified the 1st execution control field. */
11075 bool fModifiedProcCtls : 1;
11076 /** Whether we've actually modified the 2nd execution control field. */
11077 bool fModifiedProcCtls2 : 1;
11078 /** Whether we've actually modified the exception bitmap. */
11079 bool fModifiedXcptBitmap : 1;
11080
11081 /** We desire the modified the CR0 mask to be cleared. */
11082 bool fClearCr0Mask : 1;
11083 /** We desire the modified the CR4 mask to be cleared. */
11084 bool fClearCr4Mask : 1;
11085 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
11086 uint32_t fCpe1Extra;
11087 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
11088 uint32_t fCpe1Unwanted;
11089 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
11090 uint32_t fCpe2Extra;
11091 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
11092 uint32_t bmXcptExtra;
11093 /** The sequence number of the Dtrace provider settings the state was
11094 * configured against. */
11095 uint32_t uDtraceSettingsSeqNo;
11096 /** VM-exits to check (one bit per VM-exit). */
11097 uint32_t bmExitsToCheck[3];
11098
11099 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
11100 uint32_t fProcCtlsInitial;
11101 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
11102 uint32_t fProcCtls2Initial;
11103 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
11104 uint32_t bmXcptInitial;
11105} VMXRUNDBGSTATE;
11106AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
11107typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
11108
11109
11110/**
11111 * Initializes the VMXRUNDBGSTATE structure.
11112 *
11113 * @param pVCpu The cross context virtual CPU structure of the
11114 * calling EMT.
11115 * @param pVmxTransient The VMX-transient structure.
11116 * @param pDbgState The debug state to initialize.
11117 */
11118static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11119{
11120 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
11121 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
11122
11123 pDbgState->fModifiedProcCtls = false;
11124 pDbgState->fModifiedProcCtls2 = false;
11125 pDbgState->fModifiedXcptBitmap = false;
11126 pDbgState->fClearCr0Mask = false;
11127 pDbgState->fClearCr4Mask = false;
11128 pDbgState->fCpe1Extra = 0;
11129 pDbgState->fCpe1Unwanted = 0;
11130 pDbgState->fCpe2Extra = 0;
11131 pDbgState->bmXcptExtra = 0;
11132 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
11133 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
11134 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
11135}
11136
11137
11138/**
11139 * Updates the VMSC fields with changes requested by @a pDbgState.
11140 *
11141 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
11142 * immediately before executing guest code, i.e. when interrupts are disabled.
11143 * We don't check status codes here as we cannot easily assert or return in the
11144 * latter case.
11145 *
11146 * @param pVCpu The cross context virtual CPU structure.
11147 * @param pVmxTransient The VMX-transient structure.
11148 * @param pDbgState The debug state.
11149 */
11150static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11151{
11152 /*
11153 * Ensure desired flags in VMCS control fields are set.
11154 * (Ignoring write failure here, as we're committed and it's just debug extras.)
11155 *
11156 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
11157 * there should be no stale data in pCtx at this point.
11158 */
11159 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11160 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
11161 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
11162 {
11163 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
11164 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
11165 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
11166 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
11167 pDbgState->fModifiedProcCtls = true;
11168 }
11169
11170 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
11171 {
11172 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
11173 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
11174 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
11175 pDbgState->fModifiedProcCtls2 = true;
11176 }
11177
11178 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
11179 {
11180 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
11181 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
11182 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
11183 pDbgState->fModifiedXcptBitmap = true;
11184 }
11185
11186 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11187 {
11188 pVmcsInfo->u64Cr0Mask = 0;
11189 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
11190 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11191 }
11192
11193 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11194 {
11195 pVmcsInfo->u64Cr4Mask = 0;
11196 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
11197 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11198 }
11199
11200 NOREF(pVCpu);
11201}
11202
11203
11204/**
11205 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11206 * re-entry next time around.
11207 *
11208 * @returns Strict VBox status code (i.e. informational status codes too).
11209 * @param pVCpu The cross context virtual CPU structure.
11210 * @param pVmxTransient The VMX-transient structure.
11211 * @param pDbgState The debug state.
11212 * @param rcStrict The return code from executing the guest using single
11213 * stepping.
11214 */
11215static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11216 VBOXSTRICTRC rcStrict)
11217{
11218 /*
11219 * Restore VM-exit control settings as we may not reenter this function the
11220 * next time around.
11221 */
11222 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11223
11224 /* We reload the initial value, trigger what we can of recalculations the
11225 next time around. From the looks of things, that's all that's required atm. */
11226 if (pDbgState->fModifiedProcCtls)
11227 {
11228 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11229 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11230 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11231 AssertRC(rc2);
11232 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11233 }
11234
11235 /* We're currently the only ones messing with this one, so just restore the
11236 cached value and reload the field. */
11237 if ( pDbgState->fModifiedProcCtls2
11238 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11239 {
11240 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11241 AssertRC(rc2);
11242 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11243 }
11244
11245 /* If we've modified the exception bitmap, we restore it and trigger
11246 reloading and partial recalculation the next time around. */
11247 if (pDbgState->fModifiedXcptBitmap)
11248 {
11249 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pDbgState->bmXcptInitial);
11250 AssertRC(rc2);
11251 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11252 }
11253
11254 return rcStrict;
11255}
11256
11257
11258/**
11259 * Configures VM-exit controls for current DBGF and DTrace settings.
11260 *
11261 * This updates @a pDbgState and the VMCS execution control fields to reflect
11262 * the necessary VM-exits demanded by DBGF and DTrace.
11263 *
11264 * @param pVCpu The cross context virtual CPU structure.
11265 * @param pVmxTransient The VMX-transient structure. May update
11266 * fUpdatedTscOffsettingAndPreemptTimer.
11267 * @param pDbgState The debug state.
11268 */
11269static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11270{
11271#ifndef IN_NEM_DARWIN
11272 /*
11273 * Take down the dtrace serial number so we can spot changes.
11274 */
11275 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11276 ASMCompilerBarrier();
11277#endif
11278
11279 /*
11280 * We'll rebuild most of the middle block of data members (holding the
11281 * current settings) as we go along here, so start by clearing it all.
11282 */
11283 pDbgState->bmXcptExtra = 0;
11284 pDbgState->fCpe1Extra = 0;
11285 pDbgState->fCpe1Unwanted = 0;
11286 pDbgState->fCpe2Extra = 0;
11287 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11288 pDbgState->bmExitsToCheck[i] = 0;
11289
11290 /*
11291 * Software interrupts (INT XXh) - no idea how to trigger these...
11292 */
11293 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11294 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11295 || VBOXVMM_INT_SOFTWARE_ENABLED())
11296 {
11297 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11298 }
11299
11300 /*
11301 * INT3 breakpoints - triggered by #BP exceptions.
11302 */
11303 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11304 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11305
11306 /*
11307 * Exception bitmap and XCPT events+probes.
11308 */
11309 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11310 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11311 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11312
11313 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11314 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11315 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11316 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11317 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11318 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11319 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11320 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11321 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11322 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11323 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11324 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11325 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11326 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11327 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11328 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11329 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11330 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11331
11332 if (pDbgState->bmXcptExtra)
11333 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11334
11335 /*
11336 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11337 *
11338 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11339 * So, when adding/changing/removing please don't forget to update it.
11340 *
11341 * Some of the macros are picking up local variables to save horizontal space,
11342 * (being able to see it in a table is the lesser evil here).
11343 */
11344#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11345 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11346 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11347#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11348 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11349 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11350 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11351 } else do { } while (0)
11352#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11353 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11354 { \
11355 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11356 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11357 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11358 } else do { } while (0)
11359#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11360 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11361 { \
11362 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11363 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11364 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11365 } else do { } while (0)
11366#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11367 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11368 { \
11369 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11370 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11371 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11372 } else do { } while (0)
11373
11374 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11375 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11376 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11377 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11378 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11379
11380 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11381 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11382 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11383 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11384 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11385 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11386 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11387 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11388 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11389 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11390 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11391 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11392 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11393 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11394 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11395 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11396 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11397 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11398 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11399 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11400 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11401 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11402 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11403 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11404 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11405 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11406 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11407 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11408 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11409 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11410 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11411 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11412 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11413 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11414 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11415 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11416
11417 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11418 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11419 {
11420 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11421 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11422 AssertRC(rc);
11423
11424#if 0 /** @todo fix me */
11425 pDbgState->fClearCr0Mask = true;
11426 pDbgState->fClearCr4Mask = true;
11427#endif
11428 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11429 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11430 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11431 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11432 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11433 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11434 require clearing here and in the loop if we start using it. */
11435 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11436 }
11437 else
11438 {
11439 if (pDbgState->fClearCr0Mask)
11440 {
11441 pDbgState->fClearCr0Mask = false;
11442 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11443 }
11444 if (pDbgState->fClearCr4Mask)
11445 {
11446 pDbgState->fClearCr4Mask = false;
11447 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11448 }
11449 }
11450 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11451 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11452
11453 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11454 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11455 {
11456 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11457 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11458 }
11459 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11460 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11461
11462 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11463 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11464 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11465 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11466 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11467 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11468 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11469 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11470#if 0 /** @todo too slow, fix handler. */
11471 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11472#endif
11473 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11474
11475 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11476 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11477 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11478 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11479 {
11480 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11481 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11482 }
11483 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11484 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11485 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11486 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11487
11488 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11489 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11490 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11491 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11492 {
11493 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11494 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11495 }
11496 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11497 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11498 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11499 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11500
11501 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11502 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11503 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11504 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11505 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11506 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11507 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11508 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11509 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11510 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11511 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11512 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11513 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11514 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11515 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11516 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11517 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11518 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11519 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11520 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11521 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11522 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11523
11524#undef IS_EITHER_ENABLED
11525#undef SET_ONLY_XBM_IF_EITHER_EN
11526#undef SET_CPE1_XBM_IF_EITHER_EN
11527#undef SET_CPEU_XBM_IF_EITHER_EN
11528#undef SET_CPE2_XBM_IF_EITHER_EN
11529
11530 /*
11531 * Sanitize the control stuff.
11532 */
11533 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11534 if (pDbgState->fCpe2Extra)
11535 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11536 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11537 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11538#ifndef IN_NEM_DARWIN
11539 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11540 {
11541 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11542 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11543 }
11544#else
11545 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11546 {
11547 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11548 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11549 }
11550#endif
11551
11552 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11553 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11554 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11555 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11556}
11557
11558
11559/**
11560 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11561 * appropriate.
11562 *
11563 * The caller has checked the VM-exit against the
11564 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11565 * already, so we don't have to do that either.
11566 *
11567 * @returns Strict VBox status code (i.e. informational status codes too).
11568 * @param pVCpu The cross context virtual CPU structure.
11569 * @param pVmxTransient The VMX-transient structure.
11570 * @param uExitReason The VM-exit reason.
11571 *
11572 * @remarks The name of this function is displayed by dtrace, so keep it short
11573 * and to the point. No longer than 33 chars long, please.
11574 */
11575static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11576{
11577 /*
11578 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11579 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11580 *
11581 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11582 * does. Must add/change/remove both places. Same ordering, please.
11583 *
11584 * Added/removed events must also be reflected in the next section
11585 * where we dispatch dtrace events.
11586 */
11587 bool fDtrace1 = false;
11588 bool fDtrace2 = false;
11589 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11590 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11591 uint32_t uEventArg = 0;
11592#define SET_EXIT(a_EventSubName) \
11593 do { \
11594 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11595 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11596 } while (0)
11597#define SET_BOTH(a_EventSubName) \
11598 do { \
11599 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11600 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11601 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11602 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11603 } while (0)
11604 switch (uExitReason)
11605 {
11606 case VMX_EXIT_MTF:
11607 return vmxHCExitMtf(pVCpu, pVmxTransient);
11608
11609 case VMX_EXIT_XCPT_OR_NMI:
11610 {
11611 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11612 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11613 {
11614 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11615 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11616 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11617 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11618 {
11619 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11620 {
11621 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11622 uEventArg = pVmxTransient->uExitIntErrorCode;
11623 }
11624 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11625 switch (enmEvent1)
11626 {
11627 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11628 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11629 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11630 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11631 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11632 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11633 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11634 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11635 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11636 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11637 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11638 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11639 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11640 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11641 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11642 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11643 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11644 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11645 default: break;
11646 }
11647 }
11648 else
11649 AssertFailed();
11650 break;
11651
11652 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11653 uEventArg = idxVector;
11654 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11655 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11656 break;
11657 }
11658 break;
11659 }
11660
11661 case VMX_EXIT_TRIPLE_FAULT:
11662 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11663 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11664 break;
11665 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11666 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11667 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11668 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11669 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11670
11671 /* Instruction specific VM-exits: */
11672 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11673 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11674 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11675 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11676 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11677 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11678 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11679 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11680 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11681 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11682 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11683 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11684 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11685 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11686 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11687 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11688 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11689 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11690 case VMX_EXIT_MOV_CRX:
11691 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11692 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11693 SET_BOTH(CRX_READ);
11694 else
11695 SET_BOTH(CRX_WRITE);
11696 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11697 break;
11698 case VMX_EXIT_MOV_DRX:
11699 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11700 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11701 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11702 SET_BOTH(DRX_READ);
11703 else
11704 SET_BOTH(DRX_WRITE);
11705 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11706 break;
11707 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11708 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11709 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11710 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11711 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11712 case VMX_EXIT_GDTR_IDTR_ACCESS:
11713 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11714 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11715 {
11716 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11717 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11718 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11719 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11720 }
11721 break;
11722
11723 case VMX_EXIT_LDTR_TR_ACCESS:
11724 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11725 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11726 {
11727 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11728 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11729 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11730 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11731 }
11732 break;
11733
11734 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11735 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11736 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11737 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11738 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11739 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11740 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11741 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11742 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11743 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11744 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11745
11746 /* Events that aren't relevant at this point. */
11747 case VMX_EXIT_EXT_INT:
11748 case VMX_EXIT_INT_WINDOW:
11749 case VMX_EXIT_NMI_WINDOW:
11750 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11751 case VMX_EXIT_PREEMPT_TIMER:
11752 case VMX_EXIT_IO_INSTR:
11753 break;
11754
11755 /* Errors and unexpected events. */
11756 case VMX_EXIT_INIT_SIGNAL:
11757 case VMX_EXIT_SIPI:
11758 case VMX_EXIT_IO_SMI:
11759 case VMX_EXIT_SMI:
11760 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11761 case VMX_EXIT_ERR_MSR_LOAD:
11762 case VMX_EXIT_ERR_MACHINE_CHECK:
11763 case VMX_EXIT_PML_FULL:
11764 case VMX_EXIT_VIRTUALIZED_EOI:
11765 break;
11766
11767 default:
11768 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11769 break;
11770 }
11771#undef SET_BOTH
11772#undef SET_EXIT
11773
11774 /*
11775 * Dtrace tracepoints go first. We do them here at once so we don't
11776 * have to copy the guest state saving and stuff a few dozen times.
11777 * Down side is that we've got to repeat the switch, though this time
11778 * we use enmEvent since the probes are a subset of what DBGF does.
11779 */
11780 if (fDtrace1 || fDtrace2)
11781 {
11782 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11783 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11784 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11785 switch (enmEvent1)
11786 {
11787 /** @todo consider which extra parameters would be helpful for each probe. */
11788 case DBGFEVENT_END: break;
11789 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11790 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11791 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11792 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11793 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11794 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11795 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11796 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11797 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11798 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11799 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11800 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11801 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11802 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11803 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11804 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11805 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11806 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11807 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11808 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11809 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11810 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11811 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11812 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11813 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11814 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11815 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11816 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11817 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11818 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11819 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11820 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11821 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11822 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11823 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11824 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11825 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11826 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11827 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11828 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11829 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11830 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11831 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11832 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11833 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11834 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11835 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11836 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11837 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11838 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11839 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11840 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11841 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11842 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11843 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11844 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11845 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11846 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11847 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11848 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11849 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11850 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11851 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11852 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11853 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11854 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11855 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11856 }
11857 switch (enmEvent2)
11858 {
11859 /** @todo consider which extra parameters would be helpful for each probe. */
11860 case DBGFEVENT_END: break;
11861 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11862 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11863 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11864 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11865 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11866 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11867 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11868 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11869 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11870 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11871 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11872 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11873 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11874 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11875 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11876 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11877 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11878 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11879 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11880 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11881 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11882 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11883 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11884 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11885 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11886 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11887 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11888 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11889 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11890 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11891 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11892 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11893 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11894 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11895 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11896 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11897 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11898 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11899 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11900 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11901 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11902 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11903 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11904 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11905 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11906 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11907 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11908 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11909 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11910 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11911 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11912 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11913 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11914 }
11915 }
11916
11917 /*
11918 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11919 * the DBGF call will do a full check).
11920 *
11921 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11922 * Note! If we have to events, we prioritize the first, i.e. the instruction
11923 * one, in order to avoid event nesting.
11924 */
11925 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11926 if ( enmEvent1 != DBGFEVENT_END
11927 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11928 {
11929 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11930 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11931 if (rcStrict != VINF_SUCCESS)
11932 return rcStrict;
11933 }
11934 else if ( enmEvent2 != DBGFEVENT_END
11935 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11936 {
11937 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11938 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11939 if (rcStrict != VINF_SUCCESS)
11940 return rcStrict;
11941 }
11942
11943 return VINF_SUCCESS;
11944}
11945
11946
11947/**
11948 * Single-stepping VM-exit filtering.
11949 *
11950 * This is preprocessing the VM-exits and deciding whether we've gotten far
11951 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11952 * handling is performed.
11953 *
11954 * @returns Strict VBox status code (i.e. informational status codes too).
11955 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11956 * @param pVmxTransient The VMX-transient structure.
11957 * @param pDbgState The debug state.
11958 */
11959DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11960{
11961 /*
11962 * Expensive (saves context) generic dtrace VM-exit probe.
11963 */
11964 uint32_t const uExitReason = pVmxTransient->uExitReason;
11965 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11966 { /* more likely */ }
11967 else
11968 {
11969 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11970 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11971 AssertRC(rc);
11972 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11973 }
11974
11975#ifndef IN_NEM_DARWIN
11976 /*
11977 * Check for host NMI, just to get that out of the way.
11978 */
11979 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11980 { /* normally likely */ }
11981 else
11982 {
11983 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11984 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11985 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11986 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11987 }
11988#endif
11989
11990 /*
11991 * Check for single stepping event if we're stepping.
11992 */
11993 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11994 {
11995 switch (uExitReason)
11996 {
11997 case VMX_EXIT_MTF:
11998 return vmxHCExitMtf(pVCpu, pVmxTransient);
11999
12000 /* Various events: */
12001 case VMX_EXIT_XCPT_OR_NMI:
12002 case VMX_EXIT_EXT_INT:
12003 case VMX_EXIT_TRIPLE_FAULT:
12004 case VMX_EXIT_INT_WINDOW:
12005 case VMX_EXIT_NMI_WINDOW:
12006 case VMX_EXIT_TASK_SWITCH:
12007 case VMX_EXIT_TPR_BELOW_THRESHOLD:
12008 case VMX_EXIT_APIC_ACCESS:
12009 case VMX_EXIT_EPT_VIOLATION:
12010 case VMX_EXIT_EPT_MISCONFIG:
12011 case VMX_EXIT_PREEMPT_TIMER:
12012
12013 /* Instruction specific VM-exits: */
12014 case VMX_EXIT_CPUID:
12015 case VMX_EXIT_GETSEC:
12016 case VMX_EXIT_HLT:
12017 case VMX_EXIT_INVD:
12018 case VMX_EXIT_INVLPG:
12019 case VMX_EXIT_RDPMC:
12020 case VMX_EXIT_RDTSC:
12021 case VMX_EXIT_RSM:
12022 case VMX_EXIT_VMCALL:
12023 case VMX_EXIT_VMCLEAR:
12024 case VMX_EXIT_VMLAUNCH:
12025 case VMX_EXIT_VMPTRLD:
12026 case VMX_EXIT_VMPTRST:
12027 case VMX_EXIT_VMREAD:
12028 case VMX_EXIT_VMRESUME:
12029 case VMX_EXIT_VMWRITE:
12030 case VMX_EXIT_VMXOFF:
12031 case VMX_EXIT_VMXON:
12032 case VMX_EXIT_MOV_CRX:
12033 case VMX_EXIT_MOV_DRX:
12034 case VMX_EXIT_IO_INSTR:
12035 case VMX_EXIT_RDMSR:
12036 case VMX_EXIT_WRMSR:
12037 case VMX_EXIT_MWAIT:
12038 case VMX_EXIT_MONITOR:
12039 case VMX_EXIT_PAUSE:
12040 case VMX_EXIT_GDTR_IDTR_ACCESS:
12041 case VMX_EXIT_LDTR_TR_ACCESS:
12042 case VMX_EXIT_INVEPT:
12043 case VMX_EXIT_RDTSCP:
12044 case VMX_EXIT_INVVPID:
12045 case VMX_EXIT_WBINVD:
12046 case VMX_EXIT_XSETBV:
12047 case VMX_EXIT_RDRAND:
12048 case VMX_EXIT_INVPCID:
12049 case VMX_EXIT_VMFUNC:
12050 case VMX_EXIT_RDSEED:
12051 case VMX_EXIT_XSAVES:
12052 case VMX_EXIT_XRSTORS:
12053 {
12054 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
12055 AssertRCReturn(rc, rc);
12056 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
12057 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
12058 return VINF_EM_DBG_STEPPED;
12059 break;
12060 }
12061
12062 /* Errors and unexpected events: */
12063 case VMX_EXIT_INIT_SIGNAL:
12064 case VMX_EXIT_SIPI:
12065 case VMX_EXIT_IO_SMI:
12066 case VMX_EXIT_SMI:
12067 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
12068 case VMX_EXIT_ERR_MSR_LOAD:
12069 case VMX_EXIT_ERR_MACHINE_CHECK:
12070 case VMX_EXIT_PML_FULL:
12071 case VMX_EXIT_VIRTUALIZED_EOI:
12072 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
12073 break;
12074
12075 default:
12076 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
12077 break;
12078 }
12079 }
12080
12081 /*
12082 * Check for debugger event breakpoints and dtrace probes.
12083 */
12084 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
12085 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
12086 {
12087 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
12088 if (rcStrict != VINF_SUCCESS)
12089 return rcStrict;
12090 }
12091
12092 /*
12093 * Normal processing.
12094 */
12095#ifdef HMVMX_USE_FUNCTION_TABLE
12096 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
12097#else
12098 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
12099#endif
12100}
12101
12102/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette