VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp@ 96860

Last change on this file since 96860 was 96821, checked in by vboxsync, 20 months ago

VMM/IEM: Define separate log groups for the VMX and SVM code in IEM since we're more or less out of log levels to use in IEM (and the code isn't following the assignments). Defined Log2 to be for logging vmexits. Needs more cleaning up.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 67.8 KB
Line 
1/* $Id: IEMAllCImplSvmInstr.cpp 96821 2022-09-22 00:35:59Z vboxsync $ */
2/** @file
3 * IEM - AMD-V (Secure Virtual Machine) instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM_SVM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/iem.h>
35#include <VBox/vmm/cpum.h>
36#include <VBox/vmm/apic.h>
37#include <VBox/vmm/pgm.h>
38#include <VBox/vmm/em.h>
39#include <VBox/vmm/hm.h>
40#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
41# include <VBox/vmm/hm_svm.h>
42#endif
43#include <VBox/vmm/gim.h>
44#include <VBox/vmm/tm.h>
45#include "IEMInternal.h"
46#include <VBox/vmm/vmcc.h>
47#include <VBox/log.h>
48#include <VBox/disopcode.h> /* for OP_VMMCALL */
49#include <VBox/err.h>
50#include <VBox/param.h>
51#include <iprt/assert.h>
52#include <iprt/string.h>
53#include <iprt/x86.h>
54
55#include "IEMInline.h"
56
57
58/*********************************************************************************************************************************
59* Defined Constants And Macros *
60*********************************************************************************************************************************/
61#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
62/**
63 * Check the common SVM instruction preconditions.
64 */
65# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
66 do { \
67 if (!CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu))) \
68 { \
69 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
70 return iemRaiseUndefinedOpcode(a_pVCpu); \
71 } \
72 if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \
73 { \
74 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
75 return iemRaiseUndefinedOpcode(a_pVCpu); \
76 } \
77 if ((a_pVCpu)->iem.s.uCpl != 0) \
78 { \
79 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
80 return iemRaiseGeneralProtectionFault0(a_pVCpu); \
81 } \
82 } while (0)
83
84
85/**
86 * Converts an IEM exception event type to an SVM event type.
87 *
88 * @returns The SVM event type.
89 * @retval UINT8_MAX if the specified type of event isn't among the set
90 * of recognized IEM event types.
91 *
92 * @param uVector The vector of the event.
93 * @param fIemXcptFlags The IEM exception / interrupt flags.
94 */
95IEM_STATIC uint8_t iemGetSvmEventType(uint32_t uVector, uint32_t fIemXcptFlags)
96{
97 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
98 {
99 if (uVector != X86_XCPT_NMI)
100 return SVM_EVENT_EXCEPTION;
101 return SVM_EVENT_NMI;
102 }
103
104 /* See AMD spec. Table 15-1. "Guest Exception or Interrupt Types". */
105 if (fIemXcptFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR | IEM_XCPT_FLAGS_OF_INSTR))
106 return SVM_EVENT_EXCEPTION;
107
108 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_EXT_INT)
109 return SVM_EVENT_EXTERNAL_IRQ;
110
111 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
112 return SVM_EVENT_SOFTWARE_INT;
113
114 AssertMsgFailed(("iemGetSvmEventType: Invalid IEM xcpt/int. type %#x, uVector=%#x\n", fIemXcptFlags, uVector));
115 return UINT8_MAX;
116}
117
118
119/**
120 * Performs an SVM world-switch (VMRUN, \#VMEXIT) updating PGM and IEM internals.
121 *
122 * @returns Strict VBox status code from PGMChangeMode.
123 * @param pVCpu The cross context virtual CPU structure.
124 */
125DECLINLINE(VBOXSTRICTRC) iemSvmWorldSwitch(PVMCPUCC pVCpu)
126{
127 /*
128 * Inform PGM about paging mode changes.
129 * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet,
130 * see comment in iemMemPageTranslateAndCheckAccess().
131 */
132 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
133 true /* fForce */);
134 AssertRCReturn(rc, rc);
135
136 /* Invalidate IEM TLBs now that we've forced a PGM mode change. */
137 IEMTlbInvalidateAll(pVCpu);
138
139 /* Inform CPUM (recompiler), can later be removed. */
140 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
141
142 /* Re-initialize IEM cache/state after the drastic mode switch. */
143 iemReInitExec(pVCpu);
144 return rc;
145}
146
147
148/**
149 * SVM \#VMEXIT handler.
150 *
151 * @returns Strict VBox status code.
152 * @retval VINF_SVM_VMEXIT when the \#VMEXIT is successful.
153 * @retval VERR_SVM_VMEXIT_FAILED when the \#VMEXIT failed restoring the guest's
154 * "host state" and a shutdown is required.
155 *
156 * @param pVCpu The cross context virtual CPU structure.
157 * @param uExitCode The exit code.
158 * @param uExitInfo1 The exit info. 1 field.
159 * @param uExitInfo2 The exit info. 2 field.
160 */
161VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2) RT_NOEXCEPT
162{
163 VBOXSTRICTRC rcStrict;
164 if ( CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
165 || uExitCode == SVM_EXIT_INVALID)
166 {
167 Log2(("iemSvmVmexit: CS:RIP=%04x:%08RX64 uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n",
168 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uExitCode, uExitInfo1, uExitInfo2));
169
170 /*
171 * Disable the global-interrupt flag to prevent interrupts during the 'atomic' world switch.
172 */
173 CPUMSetGuestGif(&pVCpu->cpum.GstCtx, false);
174
175 /*
176 * Map the nested-guest VMCB from its location in guest memory.
177 * Write exactly what the CPU does on #VMEXIT thereby preserving most other bits in the
178 * guest's VMCB in memory, see @bugref{7243#c113} and related comment on iemSvmVmrun().
179 */
180 PSVMVMCB pVmcbMem;
181 PGMPAGEMAPLOCK PgLockMem;
182 PSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl;
183 rcStrict = iemMemPageMap(pVCpu, pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb, IEM_ACCESS_DATA_RW, (void **)&pVmcbMem,
184 &PgLockMem);
185 if (rcStrict == VINF_SUCCESS)
186 {
187 /*
188 * Notify HM in case the nested-guest was executed using hardware-assisted SVM (which
189 * would have modified some VMCB state) that might need to be restored on #VMEXIT before
190 * writing the VMCB back to guest memory.
191 */
192 HMNotifySvmNstGstVmexit(pVCpu, IEM_GET_CTX(pVCpu));
193
194 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
195 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
196 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
197 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
198
199 /*
200 * Save the nested-guest state into the VMCB state-save area.
201 */
202 PSVMVMCBSTATESAVE pVmcbMemState = &pVmcbMem->guest;
203 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, ES, es);
204 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, CS, cs);
205 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, SS, ss);
206 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, DS, ds);
207 pVmcbMemState->GDTR.u32Limit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
208 pVmcbMemState->GDTR.u64Base = pVCpu->cpum.GstCtx.gdtr.pGdt;
209 pVmcbMemState->IDTR.u32Limit = pVCpu->cpum.GstCtx.idtr.cbIdt;
210 pVmcbMemState->IDTR.u64Base = pVCpu->cpum.GstCtx.idtr.pIdt;
211 pVmcbMemState->u64EFER = pVCpu->cpum.GstCtx.msrEFER;
212 pVmcbMemState->u64CR4 = pVCpu->cpum.GstCtx.cr4;
213 pVmcbMemState->u64CR3 = pVCpu->cpum.GstCtx.cr3;
214 pVmcbMemState->u64CR2 = pVCpu->cpum.GstCtx.cr2;
215 pVmcbMemState->u64CR0 = pVCpu->cpum.GstCtx.cr0;
216 /** @todo Nested paging. */
217 pVmcbMemState->u64RFlags = pVCpu->cpum.GstCtx.rflags.u64;
218 pVmcbMemState->u64RIP = pVCpu->cpum.GstCtx.rip;
219 pVmcbMemState->u64RSP = pVCpu->cpum.GstCtx.rsp;
220 pVmcbMemState->u64RAX = pVCpu->cpum.GstCtx.rax;
221 pVmcbMemState->u64DR7 = pVCpu->cpum.GstCtx.dr[7];
222 pVmcbMemState->u64DR6 = pVCpu->cpum.GstCtx.dr[6];
223 pVmcbMemState->u8CPL = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl; /* See comment in CPUMGetGuestCPL(). */
224 Assert(CPUMGetGuestCPL(pVCpu) == pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl);
225 if (CPUMIsGuestSvmNestedPagingEnabled(pVCpu, IEM_GET_CTX(pVCpu)))
226 pVmcbMemState->u64PAT = pVCpu->cpum.GstCtx.msrPAT;
227
228 /*
229 * Save additional state and intercept information.
230 *
231 * - V_IRQ: Tracked using VMCPU_FF_INTERRUPT_NESTED_GUEST force-flag and updated below.
232 * - V_TPR: Updated by iemCImpl_load_CrX or by the physical CPU for hardware-assisted
233 * SVM execution.
234 * - Interrupt shadow: Tracked using VMCPU_FF_INHIBIT_INTERRUPTS and RIP.
235 */
236 PSVMVMCBCTRL pVmcbMemCtrl = &pVmcbMem->ctrl;
237 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)) /* V_IRQ. */
238 pVmcbMemCtrl->IntCtrl.n.u1VIrqPending = 0;
239 else
240 {
241 Assert(pVmcbCtrl->IntCtrl.n.u1VIrqPending);
242 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
243 }
244
245 pVmcbMemCtrl->IntCtrl.n.u8VTPR = pVmcbCtrl->IntCtrl.n.u8VTPR; /* V_TPR. */
246
247 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) /* Interrupt shadow. */
248 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)
249 {
250 pVmcbMemCtrl->IntShadow.n.u1IntShadow = 1;
251 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
252 LogFlow(("iemSvmVmexit: Interrupt shadow till %#RX64\n", pVCpu->cpum.GstCtx.rip));
253 }
254 else
255 pVmcbMemCtrl->IntShadow.n.u1IntShadow = 0;
256
257 /*
258 * Save nRIP, instruction length and byte fields.
259 */
260 pVmcbMemCtrl->u64NextRIP = pVmcbCtrl->u64NextRIP;
261 pVmcbMemCtrl->cbInstrFetched = pVmcbCtrl->cbInstrFetched;
262 memcpy(&pVmcbMemCtrl->abInstr[0], &pVmcbCtrl->abInstr[0], sizeof(pVmcbMemCtrl->abInstr));
263
264 /*
265 * Save exit information.
266 */
267 pVmcbMemCtrl->u64ExitCode = uExitCode;
268 pVmcbMemCtrl->u64ExitInfo1 = uExitInfo1;
269 pVmcbMemCtrl->u64ExitInfo2 = uExitInfo2;
270
271 /*
272 * Update the exit interrupt-information field if this #VMEXIT happened as a result
273 * of delivering an event through IEM.
274 *
275 * Don't update the exit interrupt-information field if the event wasn't being injected
276 * through IEM, as it would have been updated by real hardware if the nested-guest was
277 * executed using hardware-assisted SVM.
278 */
279 {
280 uint8_t uExitIntVector;
281 uint32_t uExitIntErr;
282 uint32_t fExitIntFlags;
283 bool const fRaisingEvent = IEMGetCurrentXcpt(pVCpu, &uExitIntVector, &fExitIntFlags, &uExitIntErr,
284 NULL /* uExitIntCr2 */);
285 if (fRaisingEvent)
286 {
287 pVmcbCtrl->ExitIntInfo.n.u1Valid = 1;
288 pVmcbCtrl->ExitIntInfo.n.u8Vector = uExitIntVector;
289 pVmcbCtrl->ExitIntInfo.n.u3Type = iemGetSvmEventType(uExitIntVector, fExitIntFlags);
290 if (fExitIntFlags & IEM_XCPT_FLAGS_ERR)
291 {
292 pVmcbCtrl->ExitIntInfo.n.u1ErrorCodeValid = true;
293 pVmcbCtrl->ExitIntInfo.n.u32ErrorCode = uExitIntErr;
294 }
295 }
296 }
297
298 /*
299 * Save the exit interrupt-information field.
300 *
301 * We write the whole field including overwriting reserved bits as it was observed on an
302 * AMD Ryzen 5 Pro 1500 that the CPU does not preserve reserved bits in EXITINTINFO.
303 */
304 pVmcbMemCtrl->ExitIntInfo = pVmcbCtrl->ExitIntInfo;
305
306 /*
307 * Clear event injection.
308 */
309 pVmcbMemCtrl->EventInject.n.u1Valid = 0;
310
311 iemMemPageUnmap(pVCpu, pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb, IEM_ACCESS_DATA_RW, pVmcbMem, &PgLockMem);
312 }
313
314 /*
315 * Prepare for guest's "host mode" by clearing internal processor state bits.
316 *
317 * We don't need to zero out the state-save area, just the controls should be
318 * sufficient because it has the critical bit of indicating whether we're inside
319 * the nested-guest or not.
320 */
321 memset(pVmcbCtrl, 0, sizeof(*pVmcbCtrl));
322 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
323
324 /*
325 * Restore the subset of force-flags that were preserved.
326 */
327 if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions)
328 {
329 VMCPU_FF_SET_MASK(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions);
330 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0;
331 }
332
333 if (rcStrict == VINF_SUCCESS)
334 {
335 /** @todo Nested paging. */
336 /** @todo ASID. */
337
338 /*
339 * If we are switching to PAE mode host, validate the PDPEs first.
340 * Any invalid PDPEs here causes a VCPU shutdown.
341 */
342 PCSVMHOSTSTATE pHostState = &pVCpu->cpum.GstCtx.hwvirt.svm.HostState;
343 bool const fHostInPaeMode = CPUMIsPaePagingEnabled(pHostState->uCr0, pHostState->uCr4, pHostState->uEferMsr);
344 if (fHostInPaeMode)
345 rcStrict = PGMGstMapPaePdpesAtCr3(pVCpu, pHostState->uCr3);
346 if (RT_SUCCESS(rcStrict))
347 {
348 /*
349 * Reload the host state.
350 */
351 CPUMSvmVmExitRestoreHostState(pVCpu, IEM_GET_CTX(pVCpu));
352
353 /*
354 * Update PGM, IEM and others of a world-switch.
355 */
356 rcStrict = iemSvmWorldSwitch(pVCpu);
357 if (rcStrict == VINF_SUCCESS)
358 rcStrict = VINF_SVM_VMEXIT;
359 else if (RT_SUCCESS(rcStrict))
360 {
361 LogFlow(("iemSvmVmexit: Setting passup status from iemSvmWorldSwitch %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
362 iemSetPassUpStatus(pVCpu, rcStrict);
363 rcStrict = VINF_SVM_VMEXIT;
364 }
365 else
366 LogFlow(("iemSvmVmexit: iemSvmWorldSwitch unexpected failure. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
367 }
368 else
369 {
370 Log(("iemSvmVmexit: PAE PDPEs invalid while restoring host state. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
371 rcStrict = VINF_EM_TRIPLE_FAULT;
372 }
373 }
374 else
375 {
376 AssertMsgFailed(("iemSvmVmexit: Mapping VMCB at %#RGp failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb, VBOXSTRICTRC_VAL(rcStrict)));
377 rcStrict = VINF_EM_TRIPLE_FAULT;
378 }
379 }
380 else
381 {
382 AssertMsgFailed(("iemSvmVmexit: Not in SVM guest mode! uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitCode, uExitInfo1, uExitInfo2));
383 rcStrict = VERR_SVM_IPE_3;
384 }
385
386# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
387 /* CLGI/STGI may not have been intercepted and thus not executed in IEM. */
388 if ( HMIsEnabled(pVCpu->CTX_SUFF(pVM))
389 && HMIsSvmVGifActive(pVCpu->CTX_SUFF(pVM)))
390 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
391# endif
392 return rcStrict;
393}
394
395
396/**
397 * Interface for HM and EM to emulate \#VMEXIT.
398 *
399 * @returns Strict VBox status code.
400 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
401 * @param uExitCode The exit code.
402 * @param uExitInfo1 The exit info. 1 field.
403 * @param uExitInfo2 The exit info. 2 field.
404 * @thread EMT(pVCpu)
405 */
406VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
407{
408 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
409 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
410 if (pVCpu->iem.s.cActiveMappings)
411 iemMemRollback(pVCpu);
412 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
413}
414
415
416/**
417 * Performs the operations necessary that are part of the vmrun instruction
418 * execution in the guest.
419 *
420 * @returns Strict VBox status code (i.e. informational status codes too).
421 * @retval VINF_SUCCESS successfully executed VMRUN and entered nested-guest
422 * code execution.
423 * @retval VINF_SVM_VMEXIT when executing VMRUN causes a \#VMEXIT
424 * (SVM_EXIT_INVALID most likely).
425 *
426 * @param pVCpu The cross context virtual CPU structure.
427 * @param cbInstr The length of the VMRUN instruction.
428 * @param GCPhysVmcb Guest physical address of the VMCB to run.
429 */
430static VBOXSTRICTRC iemSvmVmrun(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPHYS GCPhysVmcb) RT_NOEXCEPT
431{
432 LogFlow(("iemSvmVmrun\n"));
433
434 /*
435 * Cache the physical address of the VMCB for #VMEXIT exceptions.
436 */
437 pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb = GCPhysVmcb;
438
439 /*
440 * Save the host state.
441 */
442 CPUMSvmVmRunSaveHostState(IEM_GET_CTX(pVCpu), cbInstr);
443
444 /*
445 * Read the guest VMCB.
446 */
447 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
448 int rc = PGMPhysSimpleReadGCPhys(pVM, &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb, GCPhysVmcb, sizeof(SVMVMCB));
449 if (RT_SUCCESS(rc))
450 {
451 /*
452 * AMD-V seems to preserve reserved fields and only writes back selected, recognized
453 * fields on #VMEXIT. However, not all reserved bits are preserved (e.g, EXITINTINFO)
454 * but in our implementation we try to preserve as much as we possibly can.
455 *
456 * We could read the entire page here and only write back the relevant fields on
457 * #VMEXIT but since our internal VMCB is also being used by HM during hardware-assisted
458 * SVM execution, it creates a potential for a nested-hypervisor to set bits that are
459 * currently reserved but may be recognized as features bits in future CPUs causing
460 * unexpected & undesired results. Hence, we zero out unrecognized fields here as we
461 * typically enter hardware-assisted SVM soon anyway, see @bugref{7243#c113}.
462 */
463 PSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl;
464 PSVMVMCBSTATESAVE pVmcbNstGst = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.guest;
465
466 RT_ZERO(pVmcbCtrl->u8Reserved0);
467 RT_ZERO(pVmcbCtrl->u8Reserved1);
468 RT_ZERO(pVmcbCtrl->u8Reserved2);
469 RT_ZERO(pVmcbNstGst->u8Reserved0);
470 RT_ZERO(pVmcbNstGst->u8Reserved1);
471 RT_ZERO(pVmcbNstGst->u8Reserved2);
472 RT_ZERO(pVmcbNstGst->u8Reserved3);
473 RT_ZERO(pVmcbNstGst->u8Reserved4);
474 RT_ZERO(pVmcbNstGst->u8Reserved5);
475 pVmcbCtrl->u32Reserved0 = 0;
476 pVmcbCtrl->TLBCtrl.n.u24Reserved = 0;
477 pVmcbCtrl->IntCtrl.n.u6Reserved = 0;
478 pVmcbCtrl->IntCtrl.n.u3Reserved = 0;
479 pVmcbCtrl->IntCtrl.n.u5Reserved = 0;
480 pVmcbCtrl->IntCtrl.n.u24Reserved = 0;
481 pVmcbCtrl->IntShadow.n.u30Reserved = 0;
482 pVmcbCtrl->ExitIntInfo.n.u19Reserved = 0;
483 pVmcbCtrl->NestedPagingCtrl.n.u29Reserved = 0;
484 pVmcbCtrl->EventInject.n.u19Reserved = 0;
485 pVmcbCtrl->LbrVirt.n.u30Reserved = 0;
486
487 /*
488 * Validate guest-state and controls.
489 */
490 /* VMRUN must always be intercepted. */
491 if (!CPUMIsGuestSvmCtrlInterceptSet(pVCpu, IEM_GET_CTX(pVCpu), SVM_CTRL_INTERCEPT_VMRUN))
492 {
493 Log(("iemSvmVmrun: VMRUN instruction not intercepted -> #VMEXIT\n"));
494 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
495 }
496
497 /* Nested paging. */
498 if ( pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
499 && !pVM->cpum.ro.GuestFeatures.fSvmNestedPaging)
500 {
501 Log(("iemSvmVmrun: Nested paging not supported -> Disabling\n"));
502 pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging = 0;
503 }
504
505 /* AVIC. */
506 if ( pVmcbCtrl->IntCtrl.n.u1AvicEnable
507 && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
508 {
509 Log(("iemSvmVmrun: AVIC not supported -> Disabling\n"));
510 pVmcbCtrl->IntCtrl.n.u1AvicEnable = 0;
511 }
512
513 /* Last branch record (LBR) virtualization. */
514 if ( pVmcbCtrl->LbrVirt.n.u1LbrVirt
515 && !pVM->cpum.ro.GuestFeatures.fSvmLbrVirt)
516 {
517 Log(("iemSvmVmrun: LBR virtualization not supported -> Disabling\n"));
518 pVmcbCtrl->LbrVirt.n.u1LbrVirt = 0;
519 }
520
521 /* Virtualized VMSAVE/VMLOAD. */
522 if ( pVmcbCtrl->LbrVirt.n.u1VirtVmsaveVmload
523 && !pVM->cpum.ro.GuestFeatures.fSvmVirtVmsaveVmload)
524 {
525 Log(("iemSvmVmrun: Virtualized VMSAVE/VMLOAD not supported -> Disabling\n"));
526 pVmcbCtrl->LbrVirt.n.u1VirtVmsaveVmload = 0;
527 }
528
529 /* Virtual GIF. */
530 if ( pVmcbCtrl->IntCtrl.n.u1VGifEnable
531 && !pVM->cpum.ro.GuestFeatures.fSvmVGif)
532 {
533 Log(("iemSvmVmrun: Virtual GIF not supported -> Disabling\n"));
534 pVmcbCtrl->IntCtrl.n.u1VGifEnable = 0;
535 }
536
537 /* Guest ASID. */
538 if (!pVmcbCtrl->TLBCtrl.n.u32ASID)
539 {
540 Log(("iemSvmVmrun: Guest ASID is invalid -> #VMEXIT\n"));
541 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
542 }
543
544 /* Guest AVIC. */
545 if ( pVmcbCtrl->IntCtrl.n.u1AvicEnable
546 && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
547 {
548 Log(("iemSvmVmrun: AVIC not supported -> Disabling\n"));
549 pVmcbCtrl->IntCtrl.n.u1AvicEnable = 0;
550 }
551
552 /* Guest Secure Encrypted Virtualization. */
553 if ( ( pVmcbCtrl->NestedPagingCtrl.n.u1Sev
554 || pVmcbCtrl->NestedPagingCtrl.n.u1SevEs)
555 && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
556 {
557 Log(("iemSvmVmrun: SEV not supported -> Disabling\n"));
558 pVmcbCtrl->NestedPagingCtrl.n.u1Sev = 0;
559 pVmcbCtrl->NestedPagingCtrl.n.u1SevEs = 0;
560 }
561
562 /* Flush by ASID. */
563 if ( !pVM->cpum.ro.GuestFeatures.fSvmFlusbByAsid
564 && pVmcbCtrl->TLBCtrl.n.u8TLBFlush != SVM_TLB_FLUSH_NOTHING
565 && pVmcbCtrl->TLBCtrl.n.u8TLBFlush != SVM_TLB_FLUSH_ENTIRE)
566 {
567 Log(("iemSvmVmrun: Flush-by-ASID not supported -> #VMEXIT\n"));
568 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
569 }
570
571 /* IO permission bitmap. */
572 RTGCPHYS const GCPhysIOBitmap = pVmcbCtrl->u64IOPMPhysAddr;
573 if ( (GCPhysIOBitmap & X86_PAGE_4K_OFFSET_MASK)
574 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap)
575 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + X86_PAGE_4K_SIZE)
576 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + (X86_PAGE_4K_SIZE << 1)))
577 {
578 Log(("iemSvmVmrun: IO bitmap physaddr invalid. GCPhysIOBitmap=%#RX64 -> #VMEXIT\n", GCPhysIOBitmap));
579 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
580 }
581
582 /* MSR permission bitmap. */
583 RTGCPHYS const GCPhysMsrBitmap = pVmcbCtrl->u64MSRPMPhysAddr;
584 if ( (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
585 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap)
586 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap + X86_PAGE_4K_SIZE))
587 {
588 Log(("iemSvmVmrun: MSR bitmap physaddr invalid. GCPhysMsrBitmap=%#RX64 -> #VMEXIT\n", GCPhysMsrBitmap));
589 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
590 }
591
592 /* CR0. */
593 if ( !(pVmcbNstGst->u64CR0 & X86_CR0_CD)
594 && (pVmcbNstGst->u64CR0 & X86_CR0_NW))
595 {
596 Log(("iemSvmVmrun: CR0 no-write through with cache disabled. CR0=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64CR0));
597 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
598 }
599 if (pVmcbNstGst->u64CR0 >> 32)
600 {
601 Log(("iemSvmVmrun: CR0 reserved bits set. CR0=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64CR0));
602 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
603 }
604 /** @todo Implement all reserved bits/illegal combinations for CR3, CR4. */
605
606 /* DR6 and DR7. */
607 if ( pVmcbNstGst->u64DR6 >> 32
608 || pVmcbNstGst->u64DR7 >> 32)
609 {
610 Log(("iemSvmVmrun: DR6 and/or DR7 reserved bits set. DR6=%#RX64 DR7=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64DR6,
611 pVmcbNstGst->u64DR6));
612 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
613 }
614
615 /*
616 * PAT (Page Attribute Table) MSR.
617 *
618 * The CPU only validates and loads it when nested-paging is enabled.
619 * See AMD spec. "15.25.4 Nested Paging and VMRUN/#VMEXIT".
620 */
621 if ( pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
622 && !CPUMIsPatMsrValid(pVmcbNstGst->u64PAT))
623 {
624 Log(("iemSvmVmrun: PAT invalid. u64PAT=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64PAT));
625 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
626 }
627
628 /*
629 * Copy the IO permission bitmap into the cache.
630 */
631 AssertCompile(sizeof(pVCpu->cpum.GstCtx.hwvirt.svm.abIoBitmap) == SVM_IOPM_PAGES * X86_PAGE_4K_SIZE);
632 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->cpum.GstCtx.hwvirt.svm.abIoBitmap, GCPhysIOBitmap,
633 sizeof(pVCpu->cpum.GstCtx.hwvirt.svm.abIoBitmap));
634 if (RT_FAILURE(rc))
635 {
636 Log(("iemSvmVmrun: Failed reading the IO permission bitmap at %#RGp. rc=%Rrc\n", GCPhysIOBitmap, rc));
637 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
638 }
639
640 /*
641 * Copy the MSR permission bitmap into the cache.
642 */
643 AssertCompile(sizeof(pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap) == SVM_MSRPM_PAGES * X86_PAGE_4K_SIZE);
644 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap, GCPhysMsrBitmap,
645 sizeof(pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap));
646 if (RT_FAILURE(rc))
647 {
648 Log(("iemSvmVmrun: Failed reading the MSR permission bitmap at %#RGp. rc=%Rrc\n", GCPhysMsrBitmap, rc));
649 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
650 }
651
652 /*
653 * Copy segments from nested-guest VMCB state to the guest-CPU state.
654 *
655 * We do this here as we need to use the CS attributes and it's easier this way
656 * then using the VMCB format selectors. It doesn't really matter where we copy
657 * the state, we restore the guest-CPU context state on the \#VMEXIT anyway.
658 */
659 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, ES, es);
660 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, CS, cs);
661 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, SS, ss);
662 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, DS, ds);
663
664 /** @todo Segment attribute overrides by VMRUN. */
665
666 /*
667 * CPL adjustments and overrides.
668 *
669 * SS.DPL is apparently the CPU's CPL, see comment in CPUMGetGuestCPL().
670 * We shall thus adjust both CS.DPL and SS.DPL here.
671 */
672 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl = pVmcbNstGst->u8CPL;
673 if (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(pVCpu)))
674 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl = 3;
675 if (CPUMIsGuestInRealModeEx(IEM_GET_CTX(pVCpu)))
676 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl = 0;
677 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
678
679 /*
680 * Continue validating guest-state and controls.
681 *
682 * We pass CR0 as 0 to CPUMIsGuestEferMsrWriteValid() below to skip the illegal
683 * EFER.LME bit transition check. We pass the nested-guest's EFER as both the
684 * old and new EFER value to not have any guest EFER bits influence the new
685 * nested-guest EFER.
686 */
687 uint64_t uValidEfer;
688 rc = CPUMIsGuestEferMsrWriteValid(pVM, 0 /* CR0 */, pVmcbNstGst->u64EFER, pVmcbNstGst->u64EFER, &uValidEfer);
689 if (RT_FAILURE(rc))
690 {
691 Log(("iemSvmVmrun: EFER invalid uOldEfer=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64EFER));
692 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
693 }
694
695 /* Validate paging and CPU mode bits. */
696 bool const fSvm = RT_BOOL(uValidEfer & MSR_K6_EFER_SVME);
697 bool const fLongModeSupported = RT_BOOL(pVM->cpum.ro.GuestFeatures.fLongMode);
698 bool const fLongModeEnabled = RT_BOOL(uValidEfer & MSR_K6_EFER_LME);
699 bool const fPaging = RT_BOOL(pVmcbNstGst->u64CR0 & X86_CR0_PG);
700 bool const fPae = RT_BOOL(pVmcbNstGst->u64CR4 & X86_CR4_PAE);
701 bool const fProtMode = RT_BOOL(pVmcbNstGst->u64CR0 & X86_CR0_PE);
702 bool const fLongModeWithPaging = fLongModeEnabled && fPaging;
703 bool const fLongModeConformCS = pVCpu->cpum.GstCtx.cs.Attr.n.u1Long && pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig;
704 /* Adjust EFER.LMA (this is normally done by the CPU when system software writes CR0). */
705 if (fLongModeWithPaging)
706 uValidEfer |= MSR_K6_EFER_LMA;
707 bool const fLongModeActiveOrEnabled = RT_BOOL(uValidEfer & (MSR_K6_EFER_LME | MSR_K6_EFER_LMA));
708 if ( !fSvm
709 || (!fLongModeSupported && fLongModeActiveOrEnabled)
710 || (fLongModeWithPaging && !fPae)
711 || (fLongModeWithPaging && !fProtMode)
712 || ( fLongModeEnabled
713 && fPaging
714 && fPae
715 && fLongModeConformCS))
716 {
717 Log(("iemSvmVmrun: EFER invalid. uValidEfer=%#RX64 -> #VMEXIT\n", uValidEfer));
718 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
719 }
720
721 /*
722 * Preserve the required force-flags.
723 *
724 * We only preserve the force-flags that would affect the execution of the
725 * nested-guest (or the guest).
726 *
727 * - VMCPU_FF_BLOCK_NMIS needs to be preserved as it blocks NMI until the
728 * execution of a subsequent IRET instruction in the guest.
729 *
730 * The remaining FFs (e.g. timers) can stay in place so that we will be able to
731 * generate interrupts that should cause #VMEXITs for the nested-guest.
732 *
733 * VMRUN has implicit GIF (Global Interrupt Flag) handling, we don't need to
734 * preserve VMCPU_FF_INHIBIT_INTERRUPTS.
735 */
736 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
737 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
738
739 /*
740 * Pause filter.
741 */
742 if (pVM->cpum.ro.GuestFeatures.fSvmPauseFilter)
743 {
744 pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter = pVmcbCtrl->u16PauseFilterCount;
745 if (pVM->cpum.ro.GuestFeatures.fSvmPauseFilterThreshold)
746 pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold = pVmcbCtrl->u16PauseFilterCount;
747 }
748
749 /*
750 * Interrupt shadow.
751 */
752 if (pVmcbCtrl->IntShadow.n.u1IntShadow)
753 {
754 LogFlow(("iemSvmVmrun: setting interrupt shadow. inhibit PC=%#RX64\n", pVmcbNstGst->u64RIP));
755 /** @todo will this cause trouble if the nested-guest is 64-bit but the guest is 32-bit? */
756 EMSetInhibitInterruptsPC(pVCpu, pVmcbNstGst->u64RIP);
757 }
758
759 /*
760 * TLB flush control.
761 * Currently disabled since it's redundant as we unconditionally flush the TLB
762 * in iemSvmWorldSwitch() below.
763 */
764# if 0
765 /** @todo @bugref{7243}: ASID based PGM TLB flushes. */
766 if ( pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE
767 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
768 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
769 PGMFlushTLB(pVCpu, pVmcbNstGst->u64CR3, true /* fGlobal */);
770# endif
771
772 /*
773 * Validate and map PAE PDPEs if the guest will be using PAE paging.
774 * Invalid PAE PDPEs here causes a #VMEXIT.
775 */
776 if ( !pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
777 && CPUMIsPaePagingEnabled(pVmcbNstGst->u64CR0, pVmcbNstGst->u64CR4, uValidEfer))
778 {
779 rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcbNstGst->u64CR3);
780 if (RT_SUCCESS(rc))
781 { /* likely */ }
782 else
783 {
784 Log(("iemSvmVmrun: PAE PDPEs invalid -> #VMEXIT\n"));
785 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
786 }
787 }
788
789 /*
790 * Copy the remaining guest state from the VMCB to the guest-CPU context.
791 */
792 pVCpu->cpum.GstCtx.gdtr.cbGdt = pVmcbNstGst->GDTR.u32Limit;
793 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcbNstGst->GDTR.u64Base;
794 pVCpu->cpum.GstCtx.idtr.cbIdt = pVmcbNstGst->IDTR.u32Limit;
795 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcbNstGst->IDTR.u64Base;
796 CPUMSetGuestCR0(pVCpu, pVmcbNstGst->u64CR0);
797 CPUMSetGuestCR4(pVCpu, pVmcbNstGst->u64CR4);
798 pVCpu->cpum.GstCtx.cr3 = pVmcbNstGst->u64CR3;
799 pVCpu->cpum.GstCtx.cr2 = pVmcbNstGst->u64CR2;
800 pVCpu->cpum.GstCtx.dr[6] = pVmcbNstGst->u64DR6;
801 pVCpu->cpum.GstCtx.dr[7] = pVmcbNstGst->u64DR7;
802 pVCpu->cpum.GstCtx.rflags.u64 = pVmcbNstGst->u64RFlags;
803 pVCpu->cpum.GstCtx.rax = pVmcbNstGst->u64RAX;
804 pVCpu->cpum.GstCtx.rsp = pVmcbNstGst->u64RSP;
805 pVCpu->cpum.GstCtx.rip = pVmcbNstGst->u64RIP;
806 CPUMSetGuestEferMsrNoChecks(pVCpu, pVCpu->cpum.GstCtx.msrEFER, uValidEfer);
807 if (pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging)
808 pVCpu->cpum.GstCtx.msrPAT = pVmcbNstGst->u64PAT;
809
810 /* Mask DR6, DR7 bits mandatory set/clear bits. */
811 pVCpu->cpum.GstCtx.dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK);
812 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_RA1_MASK;
813 pVCpu->cpum.GstCtx.dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
814 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
815
816 /*
817 * Check for pending virtual interrupts.
818 */
819 if (pVmcbCtrl->IntCtrl.n.u1VIrqPending)
820 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
821 else
822 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST));
823
824 /*
825 * Update PGM, IEM and others of a world-switch.
826 */
827 VBOXSTRICTRC rcStrict = iemSvmWorldSwitch(pVCpu);
828 if (rcStrict == VINF_SUCCESS)
829 { /* likely */ }
830 else if (RT_SUCCESS(rcStrict))
831 {
832 LogFlow(("iemSvmVmrun: iemSvmWorldSwitch returned %Rrc, setting passup status\n", VBOXSTRICTRC_VAL(rcStrict)));
833 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
834 }
835 else
836 {
837 LogFlow(("iemSvmVmrun: iemSvmWorldSwitch unexpected failure. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
838 return rcStrict;
839 }
840
841 /*
842 * Set the global-interrupt flag to allow interrupts in the guest.
843 */
844 CPUMSetGuestGif(&pVCpu->cpum.GstCtx, true);
845
846 /*
847 * Event injection.
848 */
849 PCSVMEVENT pEventInject = &pVmcbCtrl->EventInject;
850 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = !pEventInject->n.u1Valid;
851 if (pEventInject->n.u1Valid)
852 {
853 uint8_t const uVector = pEventInject->n.u8Vector;
854 TRPMEVENT const enmType = HMSvmEventToTrpmEventType(pEventInject, uVector);
855 uint16_t const uErrorCode = pEventInject->n.u1ErrorCodeValid ? pEventInject->n.u32ErrorCode : 0;
856
857 /* Validate vectors for hardware exceptions, see AMD spec. 15.20 "Event Injection". */
858 if (RT_UNLIKELY(enmType == TRPM_32BIT_HACK))
859 {
860 Log(("iemSvmVmrun: Invalid event type =%#x -> #VMEXIT\n", (uint8_t)pEventInject->n.u3Type));
861 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
862 }
863 if (pEventInject->n.u3Type == SVM_EVENT_EXCEPTION)
864 {
865 if ( uVector == X86_XCPT_NMI
866 || uVector > X86_XCPT_LAST)
867 {
868 Log(("iemSvmVmrun: Invalid vector for hardware exception. uVector=%#x -> #VMEXIT\n", uVector));
869 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
870 }
871 if ( uVector == X86_XCPT_BR
872 && CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
873 {
874 Log(("iemSvmVmrun: Cannot inject #BR when not in long mode -> #VMEXIT\n"));
875 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
876 }
877 /** @todo any others? */
878 }
879
880 /*
881 * Invalidate the exit interrupt-information field here. This field is fully updated
882 * on #VMEXIT as events other than the one below can also cause intercepts during
883 * their injection (e.g. exceptions).
884 */
885 pVmcbCtrl->ExitIntInfo.n.u1Valid = 0;
886
887 /*
888 * Clear the event injection valid bit here. While the AMD spec. mentions that the CPU
889 * clears this bit from the VMCB unconditionally on #VMEXIT, internally the CPU could be
890 * clearing it at any time, most likely before/after injecting the event. Since VirtualBox
891 * doesn't have any virtual-CPU internal representation of this bit, we clear/update the
892 * VMCB here. This also has the added benefit that we avoid the risk of injecting the event
893 * twice if we fallback to executing the nested-guest using hardware-assisted SVM after
894 * injecting the event through IEM here.
895 */
896 pVmcbCtrl->EventInject.n.u1Valid = 0;
897
898 /** @todo NRIP: Software interrupts can only be pushed properly if we support
899 * NRIP for the nested-guest to calculate the instruction length
900 * below. */
901 LogFlow(("iemSvmVmrun: Injecting event: %04x:%08RX64 vec=%#x type=%d uErr=%u cr2=%#RX64 cr3=%#RX64 efer=%#RX64\n",
902 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uVector, enmType, uErrorCode, pVCpu->cpum.GstCtx.cr2,
903 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.msrEFER));
904
905 /*
906 * We shall not inject the event here right away. There may be paging mode related updates
907 * as a result of the world-switch above that are yet to be honored. Instead flag the event
908 * as pending for injection.
909 */
910 TRPMAssertTrap(pVCpu, uVector, enmType);
911 if (pEventInject->n.u1ErrorCodeValid)
912 TRPMSetErrorCode(pVCpu, uErrorCode);
913 if ( enmType == TRPM_TRAP
914 && uVector == X86_XCPT_PF)
915 TRPMSetFaultAddress(pVCpu, pVCpu->cpum.GstCtx.cr2);
916 }
917 else
918 LogFlow(("iemSvmVmrun: Entering nested-guest: %04x:%08RX64 cr0=%#RX64 cr3=%#RX64 cr4=%#RX64 efer=%#RX64 efl=%#x\n",
919 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3,
920 pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER, pVCpu->cpum.GstCtx.rflags.u64));
921
922 LogFlow(("iemSvmVmrun: returns %d\n", VBOXSTRICTRC_VAL(rcStrict)));
923
924# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
925 /* If CLGI/STGI isn't intercepted we force IEM-only nested-guest execution here. */
926 if ( HMIsEnabled(pVM)
927 && HMIsSvmVGifActive(pVM))
928 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
929# endif
930
931 return rcStrict;
932 }
933
934 /* Shouldn't really happen as the caller should've validated the physical address already. */
935 Log(("iemSvmVmrun: Failed to read nested-guest VMCB at %#RGp (rc=%Rrc) -> #VMEXIT\n", GCPhysVmcb, rc));
936 return rc;
937}
938
939
940/**
941 * Checks if the event intercepts and performs the \#VMEXIT if the corresponding
942 * intercept is active.
943 *
944 * @returns Strict VBox status code.
945 * @retval VINF_HM_INTERCEPT_NOT_ACTIVE if the intercept is not active or
946 * we're not executing a nested-guest.
947 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred
948 * successfully.
949 * @retval VERR_SVM_VMEXIT_FAILED if the intercept is active and the \#VMEXIT
950 * failed and a shutdown needs to be initiated for the guest.
951 *
952 * @returns VBox strict status code.
953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
954 * @param u8Vector The interrupt or exception vector.
955 * @param fFlags The exception flags (see IEM_XCPT_FLAGS_XXX).
956 * @param uErr The error-code associated with the exception.
957 * @param uCr2 The CR2 value in case of a \#PF exception.
958 */
959VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2) RT_NOEXCEPT
960{
961 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
962
963 /*
964 * Handle SVM exception and software interrupt intercepts, see AMD spec. 15.12 "Exception Intercepts".
965 *
966 * - NMI intercepts have their own exit code and do not cause SVM_EXIT_XCPT_2 #VMEXITs.
967 * - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts
968 * even when they use a vector in the range 0 to 31.
969 * - ICEBP should not trigger #DB intercept, but its own intercept.
970 * - For #PF exceptions, its intercept is checked before CR2 is written by the exception.
971 */
972 /* Check NMI intercept */
973 if ( u8Vector == X86_XCPT_NMI
974 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
975 && IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI))
976 {
977 Log2(("iemHandleSvmNstGstEventIntercept: NMI intercept -> #VMEXIT\n"));
978 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
979 }
980
981 /* Check ICEBP intercept. */
982 if ( (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
983 && IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_ICEBP))
984 {
985 Log2(("iemHandleSvmNstGstEventIntercept: ICEBP intercept -> #VMEXIT\n"));
986 IEM_SVM_UPDATE_NRIP(pVCpu);
987 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_ICEBP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
988 }
989
990 /* Check CPU exception intercepts. */
991 if ( (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
992 && IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, u8Vector))
993 {
994 Assert(u8Vector <= X86_XCPT_LAST);
995 uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0;
996 uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0;
997 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists
998 && u8Vector == X86_XCPT_PF
999 && !(uErr & X86_TRAP_PF_ID))
1000 {
1001 PSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl;
1002# ifdef IEM_WITH_CODE_TLB
1003 uint8_t const *pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
1004 uint8_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
1005 pVmcbCtrl->cbInstrFetched = RT_MIN(cbInstrBuf, SVM_CTRL_GUEST_INSTR_BYTES_MAX);
1006 if ( pbInstrBuf
1007 && cbInstrBuf > 0)
1008 memcpy(&pVmcbCtrl->abInstr[0], pbInstrBuf, pVmcbCtrl->cbInstrFetched);
1009# else
1010 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1011 pVmcbCtrl->cbInstrFetched = RT_MIN(cbOpcode, SVM_CTRL_GUEST_INSTR_BYTES_MAX);
1012 if (cbOpcode > 0)
1013 memcpy(&pVmcbCtrl->abInstr[0], &pVCpu->iem.s.abOpcode[0], pVmcbCtrl->cbInstrFetched);
1014# endif
1015 }
1016 if (u8Vector == X86_XCPT_BR)
1017 IEM_SVM_UPDATE_NRIP(pVCpu);
1018 Log2(("iemHandleSvmNstGstEventIntercept: Xcpt intercept u32InterceptXcpt=%#RX32 u8Vector=%#x "
1019 "uExitInfo1=%#RX64 uExitInfo2=%#RX64 -> #VMEXIT\n", pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl.u32InterceptXcpt,
1020 u8Vector, uExitInfo1, uExitInfo2));
1021 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_0 + u8Vector, uExitInfo1, uExitInfo2);
1022 }
1023
1024 /* Check software interrupt (INTn) intercepts. */
1025 if ( (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
1026 | IEM_XCPT_FLAGS_BP_INSTR
1027 | IEM_XCPT_FLAGS_ICEBP_INSTR
1028 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
1029 && IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN))
1030 {
1031 uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? u8Vector : 0;
1032 Log2(("iemHandleSvmNstGstEventIntercept: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));
1033 IEM_SVM_UPDATE_NRIP(pVCpu);
1034 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
1035 }
1036
1037 return VINF_SVM_INTERCEPT_NOT_ACTIVE;
1038}
1039
1040
1041/**
1042 * Checks the SVM IO permission bitmap and performs the \#VMEXIT if the
1043 * corresponding intercept is active.
1044 *
1045 * @returns Strict VBox status code.
1046 * @retval VINF_HM_INTERCEPT_NOT_ACTIVE if the intercept is not active or
1047 * we're not executing a nested-guest.
1048 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred
1049 * successfully.
1050 * @retval VERR_SVM_VMEXIT_FAILED if the intercept is active and the \#VMEXIT
1051 * failed and a shutdown needs to be initiated for the guest.
1052 *
1053 * @returns VBox strict status code.
1054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1055 * @param u16Port The IO port being accessed.
1056 * @param enmIoType The type of IO access.
1057 * @param cbReg The IO operand size in bytes.
1058 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
1059 * @param iEffSeg The effective segment number.
1060 * @param fRep Whether this is a repeating IO instruction (REP prefix).
1061 * @param fStrIo Whether this is a string IO instruction.
1062 * @param cbInstr The length of the IO instruction in bytes.
1063 */
1064VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPUCC pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
1065 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr) RT_NOEXCEPT
1066{
1067 Assert(IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT));
1068 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
1069 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
1070
1071 Log3(("iemSvmHandleIOIntercept: u16Port=%#x (%u)\n", u16Port, u16Port));
1072
1073 SVMIOIOEXITINFO IoExitInfo;
1074 bool const fIntercept = CPUMIsSvmIoInterceptSet(pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap, u16Port, enmIoType, cbReg,
1075 cAddrSizeBits, iEffSeg, fRep, fStrIo, &IoExitInfo);
1076 if (fIntercept)
1077 {
1078 Log3(("iemSvmHandleIOIntercept: u16Port=%#x (%u) -> #VMEXIT\n", u16Port, u16Port));
1079 IEM_SVM_UPDATE_NRIP(pVCpu);
1080 return iemSvmVmexit(pVCpu, SVM_EXIT_IOIO, IoExitInfo.u, pVCpu->cpum.GstCtx.rip + cbInstr);
1081 }
1082
1083 /** @todo remove later (for debugging as VirtualBox always traps all IO
1084 * intercepts). */
1085 AssertMsgFailed(("iemSvmHandleIOIntercept: We expect an IO intercept here!\n"));
1086 return VINF_SVM_INTERCEPT_NOT_ACTIVE;
1087}
1088
1089
1090/**
1091 * Checks the SVM MSR permission bitmap and performs the \#VMEXIT if the
1092 * corresponding intercept is active.
1093 *
1094 * @returns Strict VBox status code.
1095 * @retval VINF_HM_INTERCEPT_NOT_ACTIVE if the MSR permission bitmap does not
1096 * specify interception of the accessed MSR @a idMsr.
1097 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred
1098 * successfully.
1099 * @retval VERR_SVM_VMEXIT_FAILED if the intercept is active and the \#VMEXIT
1100 * failed and a shutdown needs to be initiated for the guest.
1101 *
1102 * @param pVCpu The cross context virtual CPU structure.
1103 * @param idMsr The MSR being accessed in the nested-guest.
1104 * @param fWrite Whether this is an MSR write access, @c false implies an
1105 * MSR read.
1106 * @param cbInstr The length of the MSR read/write instruction in bytes.
1107 */
1108VBOXSTRICTRC iemSvmHandleMsrIntercept(PVMCPUCC pVCpu, uint32_t idMsr, bool fWrite) RT_NOEXCEPT
1109{
1110 /*
1111 * Check if any MSRs are being intercepted.
1112 */
1113 Assert(CPUMIsGuestSvmCtrlInterceptSet(pVCpu, IEM_GET_CTX(pVCpu), SVM_CTRL_INTERCEPT_MSR_PROT));
1114 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
1115
1116 uint64_t const uExitInfo1 = fWrite ? SVM_EXIT1_MSR_WRITE : SVM_EXIT1_MSR_READ;
1117
1118 /*
1119 * Get the byte and bit offset of the permission bits corresponding to the MSR.
1120 */
1121 uint16_t offMsrpm;
1122 uint8_t uMsrpmBit;
1123 int rc = CPUMGetSvmMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
1124 if (RT_SUCCESS(rc))
1125 {
1126 Assert(uMsrpmBit == 0 || uMsrpmBit == 2 || uMsrpmBit == 4 || uMsrpmBit == 6);
1127 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
1128 if (fWrite)
1129 ++uMsrpmBit;
1130
1131 /*
1132 * Check if the bit is set, if so, trigger a #VMEXIT.
1133 */
1134 if (pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[offMsrpm] & RT_BIT(uMsrpmBit))
1135 {
1136 IEM_SVM_UPDATE_NRIP(pVCpu);
1137 return iemSvmVmexit(pVCpu, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
1138 }
1139 }
1140 else
1141 {
1142 /*
1143 * This shouldn't happen, but if it does, cause a #VMEXIT and let the "host" (nested hypervisor) deal with it.
1144 */
1145 Log(("iemSvmHandleMsrIntercept: Invalid/out-of-range MSR %#RX32 fWrite=%RTbool -> #VMEXIT\n", idMsr, fWrite));
1146 return iemSvmVmexit(pVCpu, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
1147 }
1148 return VINF_SVM_INTERCEPT_NOT_ACTIVE;
1149}
1150
1151
1152
1153/**
1154 * Implements 'VMRUN'.
1155 */
1156IEM_CIMPL_DEF_0(iemCImpl_vmrun)
1157{
1158# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1159 RT_NOREF2(pVCpu, cbInstr);
1160 return VINF_EM_RAW_EMULATE_INSTR;
1161# else
1162 LogFlow(("iemCImpl_vmrun\n"));
1163 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmrun);
1164
1165 /** @todo Check effective address size using address size prefix. */
1166 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
1167 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
1168 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
1169 {
1170 Log(("vmrun: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
1171 return iemRaiseGeneralProtectionFault0(pVCpu);
1172 }
1173
1174 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMRUN))
1175 {
1176 Log(("vmrun: Guest intercept -> #VMEXIT\n"));
1177 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1178 }
1179
1180 VBOXSTRICTRC rcStrict = iemSvmVmrun(pVCpu, cbInstr, GCPhysVmcb);
1181 if (rcStrict == VERR_SVM_VMEXIT_FAILED)
1182 {
1183 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
1184 rcStrict = VINF_EM_TRIPLE_FAULT;
1185 }
1186 return rcStrict;
1187# endif
1188}
1189
1190
1191/**
1192 * Interface for HM and EM to emulate the VMRUN instruction.
1193 *
1194 * @returns Strict VBox status code.
1195 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1196 * @param cbInstr The instruction length in bytes.
1197 * @thread EMT(pVCpu)
1198 */
1199VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPUCC pVCpu, uint8_t cbInstr)
1200{
1201 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
1202 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
1203
1204 iemInitExec(pVCpu, false /*fBypassHandlers*/);
1205 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
1206 Assert(!pVCpu->iem.s.cActiveMappings);
1207 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
1208}
1209
1210
1211/**
1212 * Implements 'VMLOAD'.
1213 */
1214IEM_CIMPL_DEF_0(iemCImpl_vmload)
1215{
1216# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1217 RT_NOREF2(pVCpu, cbInstr);
1218 return VINF_EM_RAW_EMULATE_INSTR;
1219# else
1220 LogFlow(("iemCImpl_vmload\n"));
1221 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload);
1222
1223 /** @todo Check effective address size using address size prefix. */
1224 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
1225 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
1226 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
1227 {
1228 Log(("vmload: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
1229 return iemRaiseGeneralProtectionFault0(pVCpu);
1230 }
1231
1232 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
1233 {
1234 Log(("vmload: Guest intercept -> #VMEXIT\n"));
1235 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1236 }
1237
1238 SVMVMCBSTATESAVE VmcbNstGst;
1239 VBOXSTRICTRC rcStrict = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcbNstGst, GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest),
1240 sizeof(SVMVMCBSTATESAVE));
1241 if (rcStrict == VINF_SUCCESS)
1242 {
1243 LogFlow(("vmload: Loading VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode));
1244 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs);
1245 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs);
1246 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, TR, tr);
1247 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, LDTR, ldtr);
1248
1249 pVCpu->cpum.GstCtx.msrKERNELGSBASE = VmcbNstGst.u64KernelGSBase;
1250 pVCpu->cpum.GstCtx.msrSTAR = VmcbNstGst.u64STAR;
1251 pVCpu->cpum.GstCtx.msrLSTAR = VmcbNstGst.u64LSTAR;
1252 pVCpu->cpum.GstCtx.msrCSTAR = VmcbNstGst.u64CSTAR;
1253 pVCpu->cpum.GstCtx.msrSFMASK = VmcbNstGst.u64SFMASK;
1254
1255 pVCpu->cpum.GstCtx.SysEnter.cs = VmcbNstGst.u64SysEnterCS;
1256 pVCpu->cpum.GstCtx.SysEnter.esp = VmcbNstGst.u64SysEnterESP;
1257 pVCpu->cpum.GstCtx.SysEnter.eip = VmcbNstGst.u64SysEnterEIP;
1258
1259 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1260 }
1261 return rcStrict;
1262# endif
1263}
1264
1265
1266/**
1267 * Interface for HM and EM to emulate the VMLOAD instruction.
1268 *
1269 * @returns Strict VBox status code.
1270 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1271 * @param cbInstr The instruction length in bytes.
1272 * @thread EMT(pVCpu)
1273 */
1274VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPUCC pVCpu, uint8_t cbInstr)
1275{
1276 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
1277
1278 iemInitExec(pVCpu, false /*fBypassHandlers*/);
1279 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
1280 Assert(!pVCpu->iem.s.cActiveMappings);
1281 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
1282}
1283
1284
1285/**
1286 * Implements 'VMSAVE'.
1287 */
1288IEM_CIMPL_DEF_0(iemCImpl_vmsave)
1289{
1290# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1291 RT_NOREF2(pVCpu, cbInstr);
1292 return VINF_EM_RAW_EMULATE_INSTR;
1293# else
1294 LogFlow(("iemCImpl_vmsave\n"));
1295 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmsave);
1296
1297 /** @todo Check effective address size using address size prefix. */
1298 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
1299 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
1300 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
1301 {
1302 Log(("vmsave: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
1303 return iemRaiseGeneralProtectionFault0(pVCpu);
1304 }
1305
1306 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
1307 {
1308 Log(("vmsave: Guest intercept -> #VMEXIT\n"));
1309 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1310 }
1311
1312 SVMVMCBSTATESAVE VmcbNstGst;
1313 VBOXSTRICTRC rcStrict = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcbNstGst, GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest),
1314 sizeof(SVMVMCBSTATESAVE));
1315 if (rcStrict == VINF_SUCCESS)
1316 {
1317 LogFlow(("vmsave: Saving VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode));
1318 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_FS | CPUMCTX_EXTRN_GS | CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_LDTR
1319 | CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_SYSENTER_MSRS);
1320
1321 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs);
1322 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs);
1323 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, TR, tr);
1324 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, LDTR, ldtr);
1325
1326 VmcbNstGst.u64KernelGSBase = pVCpu->cpum.GstCtx.msrKERNELGSBASE;
1327 VmcbNstGst.u64STAR = pVCpu->cpum.GstCtx.msrSTAR;
1328 VmcbNstGst.u64LSTAR = pVCpu->cpum.GstCtx.msrLSTAR;
1329 VmcbNstGst.u64CSTAR = pVCpu->cpum.GstCtx.msrCSTAR;
1330 VmcbNstGst.u64SFMASK = pVCpu->cpum.GstCtx.msrSFMASK;
1331
1332 VmcbNstGst.u64SysEnterCS = pVCpu->cpum.GstCtx.SysEnter.cs;
1333 VmcbNstGst.u64SysEnterESP = pVCpu->cpum.GstCtx.SysEnter.esp;
1334 VmcbNstGst.u64SysEnterEIP = pVCpu->cpum.GstCtx.SysEnter.eip;
1335
1336 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest), &VmcbNstGst,
1337 sizeof(SVMVMCBSTATESAVE));
1338 if (rcStrict == VINF_SUCCESS)
1339 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1340 }
1341 return rcStrict;
1342# endif
1343}
1344
1345
1346/**
1347 * Interface for HM and EM to emulate the VMSAVE instruction.
1348 *
1349 * @returns Strict VBox status code.
1350 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1351 * @param cbInstr The instruction length in bytes.
1352 * @thread EMT(pVCpu)
1353 */
1354VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPUCC pVCpu, uint8_t cbInstr)
1355{
1356 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
1357
1358 iemInitExec(pVCpu, false /*fBypassHandlers*/);
1359 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
1360 Assert(!pVCpu->iem.s.cActiveMappings);
1361 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
1362}
1363
1364
1365/**
1366 * Implements 'CLGI'.
1367 */
1368IEM_CIMPL_DEF_0(iemCImpl_clgi)
1369{
1370# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1371 RT_NOREF2(pVCpu, cbInstr);
1372 return VINF_EM_RAW_EMULATE_INSTR;
1373# else
1374 LogFlow(("iemCImpl_clgi\n"));
1375 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, clgi);
1376 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
1377 {
1378 Log(("clgi: Guest intercept -> #VMEXIT\n"));
1379 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1380 }
1381
1382 CPUMSetGuestGif(&pVCpu->cpum.GstCtx, false);
1383 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1384
1385# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
1386 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
1387# else
1388 return VINF_SUCCESS;
1389# endif
1390# endif
1391}
1392
1393
1394/**
1395 * Interface for HM and EM to emulate the CLGI instruction.
1396 *
1397 * @returns Strict VBox status code.
1398 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1399 * @param cbInstr The instruction length in bytes.
1400 * @thread EMT(pVCpu)
1401 */
1402VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPUCC pVCpu, uint8_t cbInstr)
1403{
1404 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
1405
1406 iemInitExec(pVCpu, false /*fBypassHandlers*/);
1407 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
1408 Assert(!pVCpu->iem.s.cActiveMappings);
1409 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
1410}
1411
1412
1413/**
1414 * Implements 'STGI'.
1415 */
1416IEM_CIMPL_DEF_0(iemCImpl_stgi)
1417{
1418# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1419 RT_NOREF2(pVCpu, cbInstr);
1420 return VINF_EM_RAW_EMULATE_INSTR;
1421# else
1422 LogFlow(("iemCImpl_stgi\n"));
1423 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, stgi);
1424 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI))
1425 {
1426 Log2(("stgi: Guest intercept -> #VMEXIT\n"));
1427 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1428 }
1429
1430 CPUMSetGuestGif(&pVCpu->cpum.GstCtx, true);
1431 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1432
1433# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
1434 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
1435# else
1436 return VINF_SUCCESS;
1437# endif
1438# endif
1439}
1440
1441
1442/**
1443 * Interface for HM and EM to emulate the STGI instruction.
1444 *
1445 * @returns Strict VBox status code.
1446 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1447 * @param cbInstr The instruction length in bytes.
1448 * @thread EMT(pVCpu)
1449 */
1450VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPUCC pVCpu, uint8_t cbInstr)
1451{
1452 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
1453
1454 iemInitExec(pVCpu, false /*fBypassHandlers*/);
1455 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
1456 Assert(!pVCpu->iem.s.cActiveMappings);
1457 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
1458}
1459
1460
1461/**
1462 * Implements 'INVLPGA'.
1463 */
1464IEM_CIMPL_DEF_0(iemCImpl_invlpga)
1465{
1466 /** @todo Check effective address size using address size prefix. */
1467 RTGCPTR const GCPtrPage = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
1468 /** @todo PGM needs virtual ASID support. */
1469# if 0
1470 uint32_t const uAsid = pVCpu->cpum.GstCtx.ecx;
1471# endif
1472
1473 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
1474 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
1475 {
1476 Log2(("invlpga: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
1477 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1478 }
1479
1480 PGMInvalidatePage(pVCpu, GCPtrPage);
1481 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1482 return VINF_SUCCESS;
1483}
1484
1485
1486/**
1487 * Interface for HM and EM to emulate the INVLPGA instruction.
1488 *
1489 * @returns Strict VBox status code.
1490 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1491 * @param cbInstr The instruction length in bytes.
1492 * @thread EMT(pVCpu)
1493 */
1494VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPUCC pVCpu, uint8_t cbInstr)
1495{
1496 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
1497
1498 iemInitExec(pVCpu, false /*fBypassHandlers*/);
1499 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
1500 Assert(!pVCpu->iem.s.cActiveMappings);
1501 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
1502}
1503
1504
1505/**
1506 * Implements 'SKINIT'.
1507 */
1508IEM_CIMPL_DEF_0(iemCImpl_skinit)
1509{
1510 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
1511
1512 uint32_t uIgnore;
1513 uint32_t fFeaturesECX;
1514 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0 /* iSubLeaf */, -1 /*f64BitMode*/, &uIgnore, &uIgnore, &fFeaturesECX, &uIgnore);
1515 if (!(fFeaturesECX & X86_CPUID_AMD_FEATURE_ECX_SKINIT))
1516 return iemRaiseUndefinedOpcode(pVCpu);
1517
1518 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SKINIT))
1519 {
1520 Log2(("skinit: Guest intercept -> #VMEXIT\n"));
1521 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SKINIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1522 }
1523
1524 RT_NOREF(cbInstr);
1525 return VERR_IEM_INSTR_NOT_IMPLEMENTED;
1526}
1527
1528
1529/**
1530 * Implements SVM's implementation of PAUSE.
1531 */
1532IEM_CIMPL_DEF_0(iemCImpl_svm_pause)
1533{
1534 bool fCheckIntercept = true;
1535 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilter)
1536 {
1537 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
1538
1539 /* TSC based pause-filter thresholding. */
1540 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilterThreshold
1541 && pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold > 0)
1542 {
1543 uint64_t const uTick = TMCpuTickGet(pVCpu);
1544 if (uTick - pVCpu->cpum.GstCtx.hwvirt.svm.uPrevPauseTick > pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold)
1545 pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter = CPUMGetGuestSvmPauseFilterCount(pVCpu, IEM_GET_CTX(pVCpu));
1546 pVCpu->cpum.GstCtx.hwvirt.svm.uPrevPauseTick = uTick;
1547 }
1548
1549 /* Simple pause-filter counter. */
1550 if (pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter > 0)
1551 {
1552 --pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter;
1553 fCheckIntercept = false;
1554 }
1555 }
1556
1557 if (fCheckIntercept)
1558 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_PAUSE, SVM_EXIT_PAUSE, 0, 0);
1559
1560 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1561 return VINF_SUCCESS;
1562}
1563
1564#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
1565
1566/**
1567 * Common code for iemCImpl_vmmcall and iemCImpl_vmcall (latter in IEMAllCImplVmxInstr.cpp.h).
1568 */
1569IEM_CIMPL_DEF_1(iemCImpl_Hypercall, uint16_t, uDisOpcode)
1570{
1571 if (EMAreHypercallInstructionsEnabled(pVCpu))
1572 {
1573 NOREF(uDisOpcode);
1574 VBOXSTRICTRC rcStrict = GIMHypercallEx(pVCpu, IEM_GET_CTX(pVCpu), uDisOpcode, cbInstr);
1575 if (RT_SUCCESS(rcStrict))
1576 {
1577 if (rcStrict == VINF_SUCCESS)
1578 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1579 if ( rcStrict == VINF_SUCCESS
1580 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING)
1581 return VINF_SUCCESS;
1582 AssertMsgReturn(rcStrict == VINF_GIM_R3_HYPERCALL, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IEM_IPE_4);
1583 return rcStrict;
1584 }
1585 AssertMsgReturn( rcStrict == VERR_GIM_HYPERCALL_ACCESS_DENIED
1586 || rcStrict == VERR_GIM_HYPERCALLS_NOT_AVAILABLE
1587 || rcStrict == VERR_GIM_NOT_ENABLED
1588 || rcStrict == VERR_GIM_HYPERCALL_MEMORY_READ_FAILED
1589 || rcStrict == VERR_GIM_HYPERCALL_MEMORY_WRITE_FAILED,
1590 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IEM_IPE_4);
1591
1592 /* Raise #UD on all failures. */
1593 }
1594 return iemRaiseUndefinedOpcode(pVCpu);
1595}
1596
1597
1598/**
1599 * Implements 'VMMCALL'.
1600 */
1601IEM_CIMPL_DEF_0(iemCImpl_vmmcall)
1602{
1603 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL))
1604 {
1605 Log(("vmmcall: Guest intercept -> #VMEXIT\n"));
1606 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1607 }
1608
1609 /* This is a little bit more complicated than the VT-x version because HM/SVM may
1610 patch MOV CR8 instructions to speed up APIC.TPR access for 32-bit windows guests. */
1611 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1612 if (VM_IS_HM_ENABLED(pVM))
1613 {
1614 int rc = HMHCMaybeMovTprSvmHypercall(pVM, pVCpu);
1615 if (RT_SUCCESS(rc))
1616 {
1617 Log(("vmmcall: MovTpr\n"));
1618 return VINF_SUCCESS;
1619 }
1620 }
1621
1622 /* Join forces with vmcall. */
1623 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMMCALL);
1624}
1625
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use