VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp@ 8878

Last change on this file since 8878 was 8878, checked in by vboxsync, 17 years ago

Don't automatically flush the TLB when we remain on the same cpu (on entry).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 70.2 KB
Line 
1/* $Id: HWSVMR0.cpp 8878 2008-05-16 10:59:52Z vboxsync $ */
2/** @file
3 * HWACCM SVM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_HWACCM
27#include <VBox/hwaccm.h>
28#include "HWACCMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/x86.h>
31#include <VBox/hwacc_svm.h>
32#include <VBox/pgm.h>
33#include <VBox/pdm.h>
34#include <VBox/err.h>
35#include <VBox/log.h>
36#include <VBox/selm.h>
37#include <VBox/iom.h>
38#include <VBox/dis.h>
39#include <VBox/dbgf.h>
40#include <VBox/disopcode.h>
41#include <iprt/param.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/cpuset.h>
45#include <iprt/mp.h>
46#include "HWSVMR0.h"
47
48static int SVMR0InterpretInvpg(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uASID);
49
50/**
51 * Sets up and activates AMD-V on the current CPU
52 *
53 * @returns VBox status code.
54 * @param pCpu CPU info struct
55 * @param pVM The VM to operate on.
56 * @param pvPageCpu Pointer to the global cpu page
57 * @param pPageCpuPhys Physical address of the global cpu page
58 */
59HWACCMR0DECL(int) SVMR0EnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
60{
61 AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
62 AssertReturn(pVM, VERR_INVALID_PARAMETER);
63 AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
64
65 /* We must turn on AMD-V and setup the host state physical address, as those MSRs are per-cpu/core. */
66
67#ifdef LOG_ENABLED
68 SUPR0Printf("SVMR0EnableCpu cpu %d page (%x) %x\n", pCpu->idCpu, pvPageCpu, (uint32_t)pPageCpuPhys);
69#endif
70
71 /* Turn on AMD-V in the EFER MSR. */
72 uint64_t val = ASMRdMsr(MSR_K6_EFER);
73 if (!(val & MSR_K6_EFER_SVME))
74 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
75
76 /* Write the physical page address where the CPU will store the host state while executing the VM. */
77 ASMWrMsr(MSR_K8_VM_HSAVE_PA, pPageCpuPhys);
78
79 pCpu->uCurrentASID = 0; /* we'll aways increment this the first time (host uses ASID 0) */
80 return VINF_SUCCESS;
81}
82
83/**
84 * Deactivates AMD-V on the current CPU
85 *
86 * @returns VBox status code.
87 * @param pCpu CPU info struct
88 * @param pvPageCpu Pointer to the global cpu page
89 * @param pPageCpuPhys Physical address of the global cpu page
90 */
91HWACCMR0DECL(int) SVMR0DisableCpu(PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
92{
93 AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
94 AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
95
96#ifdef LOG_ENABLED
97 SUPR0Printf("SVMR0DisableCpu cpu %d\n", pCpu->idCpu);
98#endif
99
100 /* Turn off AMD-V in the EFER MSR. */
101 uint64_t val = ASMRdMsr(MSR_K6_EFER);
102 ASMWrMsr(MSR_K6_EFER, val & ~MSR_K6_EFER_SVME);
103
104 /* Invalidate host state physical address. */
105 ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
106 pCpu->uCurrentASID = 0;
107
108 return VINF_SUCCESS;
109}
110
111/**
112 * Does Ring-0 per VM AMD-V init.
113 *
114 * @returns VBox status code.
115 * @param pVM The VM to operate on.
116 */
117HWACCMR0DECL(int) SVMR0InitVM(PVM pVM)
118{
119 int rc;
120
121 /* Allocate one page for the VM control block (VMCB). */
122 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjVMCB, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
123 if (RT_FAILURE(rc))
124 return rc;
125
126 pVM->hwaccm.s.svm.pVMCB = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjVMCB);
127 pVM->hwaccm.s.svm.pVMCBPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjVMCB, 0);
128 ASMMemZero32(pVM->hwaccm.s.svm.pVMCB, PAGE_SIZE);
129
130 /* Allocate one page for the host context */
131 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjVMCBHost, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
132 if (RT_FAILURE(rc))
133 return rc;
134
135 pVM->hwaccm.s.svm.pVMCBHost = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjVMCBHost);
136 pVM->hwaccm.s.svm.pVMCBHostPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjVMCBHost, 0);
137 ASMMemZero32(pVM->hwaccm.s.svm.pVMCBHost, PAGE_SIZE);
138
139 /* Allocate 12 KB for the IO bitmap (doesn't seem to be a way to convince SVM not to use it) */
140 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjIOBitmap, 3 << PAGE_SHIFT, true /* executable R0 mapping */);
141 if (RT_FAILURE(rc))
142 return rc;
143
144 pVM->hwaccm.s.svm.pIOBitmap = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjIOBitmap);
145 pVM->hwaccm.s.svm.pIOBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjIOBitmap, 0);
146 /* Set all bits to intercept all IO accesses. */
147 ASMMemFill32(pVM->hwaccm.s.svm.pIOBitmap, PAGE_SIZE*3, 0xffffffff);
148
149 /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */
150 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjMSRBitmap, 2 << PAGE_SHIFT, true /* executable R0 mapping */);
151 if (RT_FAILURE(rc))
152 return rc;
153
154 pVM->hwaccm.s.svm.pMSRBitmap = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjMSRBitmap);
155 pVM->hwaccm.s.svm.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjMSRBitmap, 0);
156 /* Set all bits to intercept all MSR accesses. */
157 ASMMemFill32(pVM->hwaccm.s.svm.pMSRBitmap, PAGE_SIZE*2, 0xffffffff);
158
159 /* Erratum 170 which requires a forced TLB flush for each world switch:
160 * See http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
161 *
162 * All BH-G1/2 and DH-G1/2 models include a fix:
163 * Athlon X2: 0x6b 1/2
164 * 0x68 1/2
165 * Athlon 64: 0x7f 1
166 * 0x6f 2
167 * Sempron: 0x7f 1/2
168 * 0x6f 2
169 * 0x6c 2
170 * 0x7c 2
171 * Turion 64: 0x68 2
172 *
173 */
174 uint32_t u32Dummy;
175 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
176 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
177 u32BaseFamily= (u32Version >> 8) & 0xf;
178 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
179 u32Model = ((u32Version >> 4) & 0xf);
180 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
181 u32Stepping = u32Version & 0xf;
182 if ( u32Family == 0xf
183 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
184 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
185 {
186 Log(("SVMR0InitVM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
187 pVM->hwaccm.s.svm.fAlwaysFlushTLB = true;
188 }
189
190 /* Invalidate the last cpu we were running on. */
191 pVM->hwaccm.s.svm.idLastCpu = NIL_RTCPUID;
192 return VINF_SUCCESS;
193}
194
195/**
196 * Does Ring-0 per VM AMD-V termination.
197 *
198 * @returns VBox status code.
199 * @param pVM The VM to operate on.
200 */
201HWACCMR0DECL(int) SVMR0TermVM(PVM pVM)
202{
203 if (pVM->hwaccm.s.svm.pMemObjVMCB)
204 {
205 RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjVMCB, false);
206 pVM->hwaccm.s.svm.pVMCB = 0;
207 pVM->hwaccm.s.svm.pVMCBPhys = 0;
208 pVM->hwaccm.s.svm.pMemObjVMCB = 0;
209 }
210 if (pVM->hwaccm.s.svm.pMemObjVMCBHost)
211 {
212 RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjVMCBHost, false);
213 pVM->hwaccm.s.svm.pVMCBHost = 0;
214 pVM->hwaccm.s.svm.pVMCBHostPhys = 0;
215 pVM->hwaccm.s.svm.pMemObjVMCBHost = 0;
216 }
217 if (pVM->hwaccm.s.svm.pMemObjIOBitmap)
218 {
219 RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjIOBitmap, false);
220 pVM->hwaccm.s.svm.pIOBitmap = 0;
221 pVM->hwaccm.s.svm.pIOBitmapPhys = 0;
222 pVM->hwaccm.s.svm.pMemObjIOBitmap = 0;
223 }
224 if (pVM->hwaccm.s.svm.pMemObjMSRBitmap)
225 {
226 RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjMSRBitmap, false);
227 pVM->hwaccm.s.svm.pMSRBitmap = 0;
228 pVM->hwaccm.s.svm.pMSRBitmapPhys = 0;
229 pVM->hwaccm.s.svm.pMemObjMSRBitmap = 0;
230 }
231 return VINF_SUCCESS;
232}
233
234/**
235 * Sets up AMD-V for the specified VM
236 *
237 * @returns VBox status code.
238 * @param pVM The VM to operate on.
239 */
240HWACCMR0DECL(int) SVMR0SetupVM(PVM pVM)
241{
242 int rc = VINF_SUCCESS;
243 SVM_VMCB *pVMCB;
244
245 AssertReturn(pVM, VERR_INVALID_PARAMETER);
246
247 Assert(pVM->hwaccm.s.svm.fSupported);
248
249 pVMCB = (SVM_VMCB *)pVM->hwaccm.s.svm.pVMCB;
250 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
251
252 /* Program the control fields. Most of them never have to be changed again. */
253 /* CR0/3/4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */
254 /** @note CR0 & CR4 can be safely read when guest and shadow copies are identical. */
255 pVMCB->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(3) | RT_BIT(4) | RT_BIT(8);
256
257 /*
258 * CR0/3/4 writes must be intercepted for obvious reasons.
259 */
260 pVMCB->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(3) | RT_BIT(4) | RT_BIT(8);
261
262 /* Intercept all DRx reads and writes. */
263 pVMCB->ctrl.u16InterceptRdDRx = RT_BIT(0) | RT_BIT(1) | RT_BIT(2) | RT_BIT(3) | RT_BIT(4) | RT_BIT(5) | RT_BIT(6) | RT_BIT(7);
264 pVMCB->ctrl.u16InterceptWrDRx = RT_BIT(0) | RT_BIT(1) | RT_BIT(2) | RT_BIT(3) | RT_BIT(4) | RT_BIT(5) | RT_BIT(6) | RT_BIT(7);
265
266 /* Currently we don't care about DRx reads or writes. DRx registers are trashed.
267 * All breakpoints are automatically cleared when the VM exits.
268 */
269
270 /** @todo nested paging */
271 pVMCB->ctrl.u32InterceptException = HWACCM_SVM_TRAP_MASK;
272
273 pVMCB->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR
274 | SVM_CTRL1_INTERCEPT_VINTR
275 | SVM_CTRL1_INTERCEPT_NMI
276 | SVM_CTRL1_INTERCEPT_SMI
277 | SVM_CTRL1_INTERCEPT_INIT
278 | SVM_CTRL1_INTERCEPT_CR0 /** @todo redundant? */
279 | SVM_CTRL1_INTERCEPT_RDPMC
280 | SVM_CTRL1_INTERCEPT_CPUID
281 | SVM_CTRL1_INTERCEPT_RSM
282 | SVM_CTRL1_INTERCEPT_HLT
283 | SVM_CTRL1_INTERCEPT_INOUT_BITMAP
284 | SVM_CTRL1_INTERCEPT_MSR_SHADOW
285 | SVM_CTRL1_INTERCEPT_INVLPG
286 | SVM_CTRL1_INTERCEPT_INVLPGA /* AMD only */
287 | SVM_CTRL1_INTERCEPT_SHUTDOWN /* fatal */
288 | SVM_CTRL1_INTERCEPT_FERR_FREEZE; /* Legacy FPU FERR handling. */
289 ;
290 pVMCB->ctrl.u32InterceptCtrl2 = SVM_CTRL2_INTERCEPT_VMRUN /* required */
291 | SVM_CTRL2_INTERCEPT_VMMCALL
292 | SVM_CTRL2_INTERCEPT_VMLOAD
293 | SVM_CTRL2_INTERCEPT_VMSAVE
294 | SVM_CTRL2_INTERCEPT_STGI
295 | SVM_CTRL2_INTERCEPT_CLGI
296 | SVM_CTRL2_INTERCEPT_SKINIT
297 | SVM_CTRL2_INTERCEPT_RDTSCP /* AMD only; we don't support this one */
298 | SVM_CTRL2_INTERCEPT_WBINVD
299 | SVM_CTRL2_INTERCEPT_MWAIT_UNCOND; /* don't execute mwait or else we'll idle inside the guest (host thinks the cpu load is high) */
300 ;
301 Log(("pVMCB->ctrl.u32InterceptException = %x\n", pVMCB->ctrl.u32InterceptException));
302 Log(("pVMCB->ctrl.u32InterceptCtrl1 = %x\n", pVMCB->ctrl.u32InterceptCtrl1));
303 Log(("pVMCB->ctrl.u32InterceptCtrl2 = %x\n", pVMCB->ctrl.u32InterceptCtrl2));
304
305 /* Virtualize masking of INTR interrupts. */
306 pVMCB->ctrl.IntCtrl.n.u1VIrqMasking = 1;
307
308 /* Set IO and MSR bitmap addresses. */
309 pVMCB->ctrl.u64IOPMPhysAddr = pVM->hwaccm.s.svm.pIOBitmapPhys;
310 pVMCB->ctrl.u64MSRPMPhysAddr = pVM->hwaccm.s.svm.pMSRBitmapPhys;
311
312 /* Enable nested paging. */
313 /** @todo how to detect support for this?? */
314 pVMCB->ctrl.u64NestedPaging = 0; /** @todo SVM_NESTED_PAGING_ENABLE; */
315
316 /* No LBR virtualization. */
317 pVMCB->ctrl.u64LBRVirt = 0;
318
319 /** The ASID must start at 1; the host uses 0. */
320 pVMCB->ctrl.TLBCtrl.n.u32ASID = 1;
321
322 return rc;
323}
324
325
326/**
327 * Injects an event (trap or external interrupt)
328 *
329 * @param pVM The VM to operate on.
330 * @param pVMCB SVM control block
331 * @param pCtx CPU Context
332 * @param pIntInfo SVM interrupt info
333 */
334inline void SVMR0InjectEvent(PVM pVM, SVM_VMCB *pVMCB, CPUMCTX *pCtx, SVM_EVENT* pEvent)
335{
336#ifdef VBOX_STRICT
337 if (pEvent->n.u8Vector == 0xE)
338 Log(("SVM: Inject int %d at %VGv error code=%08x CR2=%08x intInfo=%08x\n", pEvent->n.u8Vector, pCtx->eip, pEvent->n.u32ErrorCode, pCtx->cr2, pEvent->au64[0]));
339 else
340 if (pEvent->n.u8Vector < 0x20)
341 Log(("SVM: Inject int %d at %VGv error code=%08x\n", pEvent->n.u8Vector, pCtx->eip, pEvent->n.u32ErrorCode));
342 else
343 {
344 Log(("INJ-EI: %x at %VGv\n", pEvent->n.u8Vector, pCtx->eip));
345 Assert(!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS));
346 Assert(pCtx->eflags.u32 & X86_EFL_IF);
347 }
348#endif
349
350 /* Set event injection state. */
351 pVMCB->ctrl.EventInject.au64[0] = pEvent->au64[0];
352}
353
354
355/**
356 * Checks for pending guest interrupts and injects them
357 *
358 * @returns VBox status code.
359 * @param pVM The VM to operate on.
360 * @param pVMCB SVM control block
361 * @param pCtx CPU Context
362 */
363static int SVMR0CheckPendingInterrupt(PVM pVM, SVM_VMCB *pVMCB, CPUMCTX *pCtx)
364{
365 int rc;
366
367 /* Dispatch any pending interrupts. (injected before, but a VM exit occurred prematurely) */
368 if (pVM->hwaccm.s.Event.fPending)
369 {
370 SVM_EVENT Event;
371
372 Log(("Reinjecting event %08x %08x at %VGv\n", pVM->hwaccm.s.Event.intInfo, pVM->hwaccm.s.Event.errCode, pCtx->eip));
373 STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntReinject);
374 Event.au64[0] = pVM->hwaccm.s.Event.intInfo;
375 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
376
377 pVM->hwaccm.s.Event.fPending = false;
378 return VINF_SUCCESS;
379 }
380
381 /* When external interrupts are pending, we should exit the VM when IF is set. */
382 if ( !TRPMHasTrap(pVM)
383 && VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)))
384 {
385 if (!(pCtx->eflags.u32 & X86_EFL_IF))
386 {
387 if (!pVMCB->ctrl.IntCtrl.n.u1VIrqValid)
388 {
389 Log(("Enable irq window exit!\n"));
390 /** @todo use virtual interrupt method to inject a pending irq; dispatched as soon as guest.IF is set. */
391 pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;
392 pVMCB->ctrl.IntCtrl.n.u1VIrqValid = 1;
393 pVMCB->ctrl.IntCtrl.n.u1IgnoreTPR = 1; /* ignore the priority in the TPR; just deliver it */
394 pVMCB->ctrl.IntCtrl.n.u8VIrqVector = 0; /* don't care */
395 }
396 }
397 else
398 if (!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
399 {
400 uint8_t u8Interrupt;
401
402 rc = PDMGetInterrupt(pVM, &u8Interrupt);
403 Log(("Dispatch interrupt: u8Interrupt=%x (%d) rc=%Vrc\n", u8Interrupt, u8Interrupt, rc));
404 if (VBOX_SUCCESS(rc))
405 {
406 rc = TRPMAssertTrap(pVM, u8Interrupt, TRPM_HARDWARE_INT);
407 AssertRC(rc);
408 }
409 else
410 {
411 /* Can only happen in rare cases where a pending interrupt is cleared behind our back */
412 Assert(!VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)));
413 STAM_COUNTER_INC(&pVM->hwaccm.s.StatSwitchGuestIrq);
414 /* Just continue */
415 }
416 }
417 else
418 Log(("Pending interrupt blocked at %VGv by VM_FF_INHIBIT_INTERRUPTS!!\n", pCtx->eip));
419 }
420
421#ifdef VBOX_STRICT
422 if (TRPMHasTrap(pVM))
423 {
424 uint8_t u8Vector;
425 rc = TRPMQueryTrapAll(pVM, &u8Vector, 0, 0, 0);
426 AssertRC(rc);
427 }
428#endif
429
430 if ( pCtx->eflags.u32 & X86_EFL_IF
431 && (!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
432 && TRPMHasTrap(pVM)
433 )
434 {
435 uint8_t u8Vector;
436 int rc;
437 TRPMEVENT enmType;
438 SVM_EVENT Event;
439 uint32_t u32ErrorCode;
440
441 Event.au64[0] = 0;
442
443 /* If a new event is pending, then dispatch it now. */
444 rc = TRPMQueryTrapAll(pVM, &u8Vector, &enmType, &u32ErrorCode, 0);
445 AssertRC(rc);
446 Assert(pCtx->eflags.Bits.u1IF == 1 || enmType == TRPM_TRAP);
447 Assert(enmType != TRPM_SOFTWARE_INT);
448
449 /* Clear the pending trap. */
450 rc = TRPMResetTrap(pVM);
451 AssertRC(rc);
452
453 Event.n.u8Vector = u8Vector;
454 Event.n.u1Valid = 1;
455 Event.n.u32ErrorCode = u32ErrorCode;
456
457 if (enmType == TRPM_TRAP)
458 {
459 switch (u8Vector) {
460 case 8:
461 case 10:
462 case 11:
463 case 12:
464 case 13:
465 case 14:
466 case 17:
467 /* Valid error codes. */
468 Event.n.u1ErrorCodeValid = 1;
469 break;
470 default:
471 break;
472 }
473 if (u8Vector == X86_XCPT_NMI)
474 Event.n.u3Type = SVM_EVENT_NMI;
475 else
476 Event.n.u3Type = SVM_EVENT_EXCEPTION;
477 }
478 else
479 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
480
481 STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntInject);
482 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
483 } /* if (interrupts can be dispatched) */
484
485 return VINF_SUCCESS;
486}
487
488
489/**
490 * Loads the guest state
491 *
492 * @returns VBox status code.
493 * @param pVM The VM to operate on.
494 * @param pCtx Guest context
495 */
496HWACCMR0DECL(int) SVMR0LoadGuestState(PVM pVM, CPUMCTX *pCtx)
497{
498 RTGCUINTPTR val;
499 SVM_VMCB *pVMCB;
500
501 if (pVM == NULL)
502 return VERR_INVALID_PARAMETER;
503
504 /* Setup AMD SVM. */
505 Assert(pVM->hwaccm.s.svm.fSupported);
506
507 pVMCB = (SVM_VMCB *)pVM->hwaccm.s.svm.pVMCB;
508 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
509
510 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
511 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)
512 {
513 SVM_WRITE_SELREG(CS, cs);
514 SVM_WRITE_SELREG(SS, ss);
515 SVM_WRITE_SELREG(DS, ds);
516 SVM_WRITE_SELREG(ES, es);
517 SVM_WRITE_SELREG(FS, fs);
518 SVM_WRITE_SELREG(GS, gs);
519 }
520
521 /* Guest CPU context: LDTR. */
522 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)
523 {
524 SVM_WRITE_SELREG(LDTR, ldtr);
525 }
526
527 /* Guest CPU context: TR. */
528 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)
529 {
530 SVM_WRITE_SELREG(TR, tr);
531 }
532
533 /* Guest CPU context: GDTR. */
534 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)
535 {
536 pVMCB->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
537 pVMCB->guest.GDTR.u64Base = pCtx->gdtr.pGdt;
538 }
539
540 /* Guest CPU context: IDTR. */
541 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)
542 {
543 pVMCB->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
544 pVMCB->guest.IDTR.u64Base = pCtx->idtr.pIdt;
545 }
546
547 /*
548 * Sysenter MSRs
549 */
550 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SYSENTER_MSR)
551 {
552 pVMCB->guest.u64SysEnterCS = pCtx->SysEnter.cs;
553 pVMCB->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
554 pVMCB->guest.u64SysEnterESP = pCtx->SysEnter.esp;
555 }
556
557 /* Control registers */
558 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)
559 {
560 val = pCtx->cr0;
561 if (CPUMIsGuestFPUStateActive(pVM) == false)
562 {
563 /* Always use #NM exceptions to load the FPU/XMM state on demand. */
564 val |= X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;
565 }
566 else
567 {
568 Assert(pVM->hwaccm.s.svm.fResumeVM == true);
569 /** @todo check if we support the old style mess correctly. */
570 if (!(val & X86_CR0_NE))
571 {
572 Log(("Forcing X86_CR0_NE!!!\n"));
573
574 /* Also catch floating point exceptions as we need to report them to the guest in a different way. */
575 if (!pVM->hwaccm.s.fFPUOldStyleOverride)
576 {
577 pVMCB->ctrl.u32InterceptException |= RT_BIT(16);
578 pVM->hwaccm.s.fFPUOldStyleOverride = true;
579 }
580 }
581 val |= X86_CR0_NE; /* always turn on the native mechanism to report FPU errors (old style uses interrupts) */
582 }
583 if (!(val & X86_CR0_CD))
584 val &= ~X86_CR0_NW; /* Illegal when cache is turned on. */
585
586 val |= X86_CR0_PG; /* Paging is always enabled; even when the guest is running in real mode or PE without paging. */
587 val |= X86_CR0_WP; /* Must set this as we rely on protect various pages and supervisor writes must be caught. */
588 pVMCB->guest.u64CR0 = val;
589 }
590 /* CR2 as well */
591 pVMCB->guest.u64CR2 = pCtx->cr2;
592
593 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)
594 {
595 /* Save our shadow CR3 register. */
596 pVMCB->guest.u64CR3 = PGMGetHyperCR3(pVM);
597 }
598
599 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)
600 {
601 val = pCtx->cr4;
602 switch(pVM->hwaccm.s.enmShadowMode)
603 {
604 case PGMMODE_REAL:
605 case PGMMODE_PROTECTED: /* Protected mode, no paging. */
606 AssertFailed();
607 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
608
609 case PGMMODE_32_BIT: /* 32-bit paging. */
610 break;
611
612 case PGMMODE_PAE: /* PAE paging. */
613 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
614 /** @todo use normal 32 bits paging */
615 val |= X86_CR4_PAE;
616 break;
617
618 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
619 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
620 AssertFailed();
621 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
622
623 default: /* shut up gcc */
624 AssertFailed();
625 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
626 }
627 pVMCB->guest.u64CR4 = val;
628 }
629
630 /* Debug registers. */
631 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)
632 {
633 /** @todo DR0-6 */
634 val = pCtx->dr7;
635 val &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
636 val |= 0x400; /* must be one */
637#ifdef VBOX_STRICT
638 val = 0x400;
639#endif
640 pVMCB->guest.u64DR7 = val;
641
642 pVMCB->guest.u64DR6 = pCtx->dr6;
643 }
644
645 /* EIP, ESP and EFLAGS */
646 pVMCB->guest.u64RIP = pCtx->eip;
647 pVMCB->guest.u64RSP = pCtx->esp;
648 pVMCB->guest.u64RFlags = pCtx->eflags.u32;
649
650 /* Set CPL */
651 pVMCB->guest.u8CPL = pCtx->ssHid.Attr.n.u2Dpl;
652
653 /* RAX/EAX too, as VMRUN uses RAX as an implicit parameter. */
654 pVMCB->guest.u64RAX = pCtx->eax;
655
656 /* vmrun will fail otherwise. */
657 pVMCB->guest.u64EFER = MSR_K6_EFER_SVME;
658
659 /** TSC offset. */
660 if (TMCpuTickCanUseRealTSC(pVM, &pVMCB->ctrl.u64TSCOffset))
661 pVMCB->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;
662 else
663 pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
664
665 /** @todo 64 bits stuff (?):
666 * - STAR
667 * - LSTAR
668 * - CSTAR
669 * - SFMASK
670 * - KernelGSBase
671 */
672
673#ifdef DEBUG
674 /* Intercept X86_XCPT_DB if stepping is enabled */
675 if (DBGFIsStepping(pVM))
676 pVMCB->ctrl.u32InterceptException |= RT_BIT(1);
677 else
678 pVMCB->ctrl.u32InterceptException &= ~RT_BIT(1);
679#endif
680
681 /* Done. */
682 pVM->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;
683
684 return VINF_SUCCESS;
685}
686
687
688/**
689 * Runs guest code in an SVM VM.
690 *
691 * @todo This can be much more efficient, when we only sync that which has actually changed. (this is the first attempt only)
692 *
693 * @returns VBox status code.
694 * @param pVM The VM to operate on.
695 * @param pCtx Guest context
696 * @param pCpu CPU info struct
697 */
698HWACCMR0DECL(int) SVMR0RunGuestCode(PVM pVM, CPUMCTX *pCtx, PHWACCM_CPUINFO pCpu)
699{
700 int rc = VINF_SUCCESS;
701 uint64_t exitCode = (uint64_t)SVM_EXIT_INVALID;
702 SVM_VMCB *pVMCB;
703 bool fGuestStateSynced = false;
704 unsigned cResume = 0;
705
706 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatEntry, x);
707
708 AssertReturn(pCpu->fSVMConfigured, VERR_EM_INTERNAL_ERROR);
709
710 pVMCB = (SVM_VMCB *)pVM->hwaccm.s.svm.pVMCB;
711 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
712
713 /* We can jump to this point to resume execution after determining that a VM-exit is innocent.
714 */
715ResumeExecution:
716 /* Safety precaution; looping for too long here can have a very bad effect on the host */
717 if (++cResume > HWACCM_MAX_RESUME_LOOPS)
718 {
719 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitMaxResume);
720 rc = VINF_EM_RAW_INTERRUPT;
721 goto end;
722 }
723
724 /* Check for irq inhibition due to instruction fusing (sti, mov ss). */
725 if (VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
726 {
727 Log(("VM_FF_INHIBIT_INTERRUPTS at %VGv successor %VGv\n", pCtx->eip, EMGetInhibitInterruptsPC(pVM)));
728 if (pCtx->eip != EMGetInhibitInterruptsPC(pVM))
729 {
730 /** @note we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here.
731 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
732 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
733 * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
734 */
735 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
736 /* Irq inhibition is no longer active; clear the corresponding SVM state. */
737 pVMCB->ctrl.u64IntShadow = 0;
738 }
739 }
740 else
741 {
742 /* Irq inhibition is no longer active; clear the corresponding SVM state. */
743 pVMCB->ctrl.u64IntShadow = 0;
744 }
745
746 /* Check for pending actions that force us to go back to ring 3. */
747#ifdef DEBUG
748 /* Intercept X86_XCPT_DB if stepping is enabled */
749 if (!DBGFIsStepping(pVM))
750#endif
751 {
752 if (VM_FF_ISPENDING(pVM, VM_FF_TO_R3 | VM_FF_TIMER))
753 {
754 VM_FF_CLEAR(pVM, VM_FF_TO_R3);
755 STAM_COUNTER_INC(&pVM->hwaccm.s.StatSwitchToR3);
756 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
757 rc = VINF_EM_RAW_TO_R3;
758 goto end;
759 }
760 }
761
762 /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */
763 if (VM_FF_ISPENDING(pVM, VM_FF_REQUEST))
764 {
765 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
766 rc = VINF_EM_PENDING_REQUEST;
767 goto end;
768 }
769
770 /* When external interrupts are pending, we should exit the VM when IF is set. */
771 /** @note *after* VM_FF_INHIBIT_INTERRUPTS check!!! */
772 rc = SVMR0CheckPendingInterrupt(pVM, pVMCB, pCtx);
773 if (VBOX_FAILURE(rc))
774 {
775 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
776 goto end;
777 }
778
779 /* Load the guest state */
780 rc = SVMR0LoadGuestState(pVM, pCtx);
781 if (rc != VINF_SUCCESS)
782 {
783 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
784 goto end;
785 }
786 fGuestStateSynced = true;
787
788 /* All done! Let's start VM execution. */
789 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatInGC, x);
790
791 /* Make sure we flush the TLB when required. */
792 if ( pVM->hwaccm.s.svm.fForceTLBFlush
793 && !pVM->hwaccm.s.svm.fAlwaysFlushTLB)
794 {
795 if (++pCpu->uCurrentASID >= pVM->hwaccm.s.svm.u32MaxASID)
796 {
797 pCpu->uCurrentASID = 1; /* start at 1; host uses 0 */
798 pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = 1; /* wrap around; flush TLB */
799 }
800 else
801 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushASID);
802 }
803 else
804 {
805 Assert(pVM->hwaccm.s.svm.fForceTLBFlush == pVM->hwaccm.s.svm.fAlwaysFlushTLB);
806 pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = pVM->hwaccm.s.svm.fForceTLBFlush;
807 }
808
809 Assert(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.svm.u32MaxASID);
810 pVMCB->ctrl.TLBCtrl.n.u32ASID = pCpu->uCurrentASID;
811
812#ifdef VBOX_WITH_STATISTICS
813 if (pVMCB->ctrl.TLBCtrl.n.u1TLBFlush)
814 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBWorldSwitch);
815 else
816 STAM_COUNTER_INC(&pVM->hwaccm.s.StatNoFlushTLBWorldSwitch);
817#endif
818
819 /* In case we execute a goto ResumeExecution later on. */
820 pVM->hwaccm.s.svm.fResumeVM = true;
821 pVM->hwaccm.s.svm.fForceTLBFlush = pVM->hwaccm.s.svm.fAlwaysFlushTLB;
822
823 Assert(sizeof(pVM->hwaccm.s.svm.pVMCBPhys) == 8);
824 Assert(pVMCB->ctrl.u32InterceptCtrl2 == ( SVM_CTRL2_INTERCEPT_VMRUN /* required */
825 | SVM_CTRL2_INTERCEPT_VMMCALL
826 | SVM_CTRL2_INTERCEPT_VMLOAD
827 | SVM_CTRL2_INTERCEPT_VMSAVE
828 | SVM_CTRL2_INTERCEPT_STGI
829 | SVM_CTRL2_INTERCEPT_CLGI
830 | SVM_CTRL2_INTERCEPT_SKINIT
831 | SVM_CTRL2_INTERCEPT_RDTSCP /* AMD only; we don't support this one */
832 | SVM_CTRL2_INTERCEPT_WBINVD
833 | SVM_CTRL2_INTERCEPT_MWAIT_UNCOND /* don't execute mwait or else we'll idle inside the guest (host thinks the cpu load is high) */
834 ));
835 Assert(pVMCB->ctrl.IntCtrl.n.u1VIrqMasking);
836 Assert(pVMCB->ctrl.u64IOPMPhysAddr == pVM->hwaccm.s.svm.pIOBitmapPhys);
837 Assert(pVMCB->ctrl.u64MSRPMPhysAddr == pVM->hwaccm.s.svm.pMSRBitmapPhys);
838 Assert(pVMCB->ctrl.u64NestedPaging == 0);
839 Assert(pVMCB->ctrl.u64LBRVirt == 0);
840
841 SVMVMRun(pVM->hwaccm.s.svm.pVMCBHostPhys, pVM->hwaccm.s.svm.pVMCBPhys, pCtx);
842 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatInGC, x);
843
844 /**
845 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
846 * IMPORTANT: WE CAN'T DO ANY LOGGING OR OPERATIONS THAT CAN DO A LONGJMP BACK TO RING 3 *BEFORE* WE'VE SYNCED BACK (MOST OF) THE GUEST STATE
847 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
848 */
849
850 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatExit, x);
851
852 /* Reason for the VM exit */
853 exitCode = pVMCB->ctrl.u64ExitCode;
854
855 if (exitCode == (uint64_t)SVM_EXIT_INVALID) /* Invalid guest state. */
856 {
857 HWACCMDumpRegs(pCtx);
858#ifdef DEBUG
859 Log(("ctrl.u16InterceptRdCRx %x\n", pVMCB->ctrl.u16InterceptRdCRx));
860 Log(("ctrl.u16InterceptWrCRx %x\n", pVMCB->ctrl.u16InterceptWrCRx));
861 Log(("ctrl.u16InterceptRdDRx %x\n", pVMCB->ctrl.u16InterceptRdDRx));
862 Log(("ctrl.u16InterceptWrDRx %x\n", pVMCB->ctrl.u16InterceptWrDRx));
863 Log(("ctrl.u32InterceptException %x\n", pVMCB->ctrl.u32InterceptException));
864 Log(("ctrl.u32InterceptCtrl1 %x\n", pVMCB->ctrl.u32InterceptCtrl1));
865 Log(("ctrl.u32InterceptCtrl2 %x\n", pVMCB->ctrl.u32InterceptCtrl2));
866 Log(("ctrl.u64IOPMPhysAddr %VX64\n", pVMCB->ctrl.u64IOPMPhysAddr));
867 Log(("ctrl.u64MSRPMPhysAddr %VX64\n", pVMCB->ctrl.u64MSRPMPhysAddr));
868 Log(("ctrl.u64TSCOffset %VX64\n", pVMCB->ctrl.u64TSCOffset));
869
870 Log(("ctrl.TLBCtrl.u32ASID %x\n", pVMCB->ctrl.TLBCtrl.n.u32ASID));
871 Log(("ctrl.TLBCtrl.u1TLBFlush %x\n", pVMCB->ctrl.TLBCtrl.n.u1TLBFlush));
872 Log(("ctrl.TLBCtrl.u7Reserved %x\n", pVMCB->ctrl.TLBCtrl.n.u7Reserved));
873 Log(("ctrl.TLBCtrl.u24Reserved %x\n", pVMCB->ctrl.TLBCtrl.n.u24Reserved));
874
875 Log(("ctrl.IntCtrl.u8VTPR %x\n", pVMCB->ctrl.IntCtrl.n.u8VTPR));
876 Log(("ctrl.IntCtrl.u1VIrqValid %x\n", pVMCB->ctrl.IntCtrl.n.u1VIrqValid));
877 Log(("ctrl.IntCtrl.u7Reserved %x\n", pVMCB->ctrl.IntCtrl.n.u7Reserved));
878 Log(("ctrl.IntCtrl.u4VIrqPriority %x\n", pVMCB->ctrl.IntCtrl.n.u4VIrqPriority));
879 Log(("ctrl.IntCtrl.u1IgnoreTPR %x\n", pVMCB->ctrl.IntCtrl.n.u1IgnoreTPR));
880 Log(("ctrl.IntCtrl.u3Reserved %x\n", pVMCB->ctrl.IntCtrl.n.u3Reserved));
881 Log(("ctrl.IntCtrl.u1VIrqMasking %x\n", pVMCB->ctrl.IntCtrl.n.u1VIrqMasking));
882 Log(("ctrl.IntCtrl.u7Reserved2 %x\n", pVMCB->ctrl.IntCtrl.n.u7Reserved2));
883 Log(("ctrl.IntCtrl.u8VIrqVector %x\n", pVMCB->ctrl.IntCtrl.n.u8VIrqVector));
884 Log(("ctrl.IntCtrl.u24Reserved %x\n", pVMCB->ctrl.IntCtrl.n.u24Reserved));
885
886 Log(("ctrl.u64IntShadow %VX64\n", pVMCB->ctrl.u64IntShadow));
887 Log(("ctrl.u64ExitCode %VX64\n", pVMCB->ctrl.u64ExitCode));
888 Log(("ctrl.u64ExitInfo1 %VX64\n", pVMCB->ctrl.u64ExitInfo1));
889 Log(("ctrl.u64ExitInfo2 %VX64\n", pVMCB->ctrl.u64ExitInfo2));
890 Log(("ctrl.ExitIntInfo.u8Vector %x\n", pVMCB->ctrl.ExitIntInfo.n.u8Vector));
891 Log(("ctrl.ExitIntInfo.u3Type %x\n", pVMCB->ctrl.ExitIntInfo.n.u3Type));
892 Log(("ctrl.ExitIntInfo.u1ErrorCodeValid %x\n", pVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid));
893 Log(("ctrl.ExitIntInfo.u19Reserved %x\n", pVMCB->ctrl.ExitIntInfo.n.u19Reserved));
894 Log(("ctrl.ExitIntInfo.u1Valid %x\n", pVMCB->ctrl.ExitIntInfo.n.u1Valid));
895 Log(("ctrl.ExitIntInfo.u32ErrorCode %x\n", pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode));
896 Log(("ctrl.u64NestedPaging %VX64\n", pVMCB->ctrl.u64NestedPaging));
897 Log(("ctrl.EventInject.u8Vector %x\n", pVMCB->ctrl.EventInject.n.u8Vector));
898 Log(("ctrl.EventInject.u3Type %x\n", pVMCB->ctrl.EventInject.n.u3Type));
899 Log(("ctrl.EventInject.u1ErrorCodeValid %x\n", pVMCB->ctrl.EventInject.n.u1ErrorCodeValid));
900 Log(("ctrl.EventInject.u19Reserved %x\n", pVMCB->ctrl.EventInject.n.u19Reserved));
901 Log(("ctrl.EventInject.u1Valid %x\n", pVMCB->ctrl.EventInject.n.u1Valid));
902 Log(("ctrl.EventInject.u32ErrorCode %x\n", pVMCB->ctrl.EventInject.n.u32ErrorCode));
903
904 Log(("ctrl.u64HostCR3 %VX64\n", pVMCB->ctrl.u64HostCR3));
905 Log(("ctrl.u64LBRVirt %VX64\n", pVMCB->ctrl.u64LBRVirt));
906
907 Log(("guest.CS.u16Sel %04X\n", pVMCB->guest.CS.u16Sel));
908 Log(("guest.CS.u16Attr %04X\n", pVMCB->guest.CS.u16Attr));
909 Log(("guest.CS.u32Limit %X\n", pVMCB->guest.CS.u32Limit));
910 Log(("guest.CS.u64Base %VX64\n", pVMCB->guest.CS.u64Base));
911 Log(("guest.DS.u16Sel %04X\n", pVMCB->guest.DS.u16Sel));
912 Log(("guest.DS.u16Attr %04X\n", pVMCB->guest.DS.u16Attr));
913 Log(("guest.DS.u32Limit %X\n", pVMCB->guest.DS.u32Limit));
914 Log(("guest.DS.u64Base %VX64\n", pVMCB->guest.DS.u64Base));
915 Log(("guest.ES.u16Sel %04X\n", pVMCB->guest.ES.u16Sel));
916 Log(("guest.ES.u16Attr %04X\n", pVMCB->guest.ES.u16Attr));
917 Log(("guest.ES.u32Limit %X\n", pVMCB->guest.ES.u32Limit));
918 Log(("guest.ES.u64Base %VX64\n", pVMCB->guest.ES.u64Base));
919 Log(("guest.FS.u16Sel %04X\n", pVMCB->guest.FS.u16Sel));
920 Log(("guest.FS.u16Attr %04X\n", pVMCB->guest.FS.u16Attr));
921 Log(("guest.FS.u32Limit %X\n", pVMCB->guest.FS.u32Limit));
922 Log(("guest.FS.u64Base %VX64\n", pVMCB->guest.FS.u64Base));
923 Log(("guest.GS.u16Sel %04X\n", pVMCB->guest.GS.u16Sel));
924 Log(("guest.GS.u16Attr %04X\n", pVMCB->guest.GS.u16Attr));
925 Log(("guest.GS.u32Limit %X\n", pVMCB->guest.GS.u32Limit));
926 Log(("guest.GS.u64Base %VX64\n", pVMCB->guest.GS.u64Base));
927
928 Log(("guest.GDTR.u32Limit %X\n", pVMCB->guest.GDTR.u32Limit));
929 Log(("guest.GDTR.u64Base %VX64\n", pVMCB->guest.GDTR.u64Base));
930
931 Log(("guest.LDTR.u16Sel %04X\n", pVMCB->guest.LDTR.u16Sel));
932 Log(("guest.LDTR.u16Attr %04X\n", pVMCB->guest.LDTR.u16Attr));
933 Log(("guest.LDTR.u32Limit %X\n", pVMCB->guest.LDTR.u32Limit));
934 Log(("guest.LDTR.u64Base %VX64\n", pVMCB->guest.LDTR.u64Base));
935
936 Log(("guest.IDTR.u32Limit %X\n", pVMCB->guest.IDTR.u32Limit));
937 Log(("guest.IDTR.u64Base %VX64\n", pVMCB->guest.IDTR.u64Base));
938
939 Log(("guest.TR.u16Sel %04X\n", pVMCB->guest.TR.u16Sel));
940 Log(("guest.TR.u16Attr %04X\n", pVMCB->guest.TR.u16Attr));
941 Log(("guest.TR.u32Limit %X\n", pVMCB->guest.TR.u32Limit));
942 Log(("guest.TR.u64Base %VX64\n", pVMCB->guest.TR.u64Base));
943
944 Log(("guest.u8CPL %X\n", pVMCB->guest.u8CPL));
945 Log(("guest.u64CR0 %VX64\n", pVMCB->guest.u64CR0));
946 Log(("guest.u64CR2 %VX64\n", pVMCB->guest.u64CR2));
947 Log(("guest.u64CR3 %VX64\n", pVMCB->guest.u64CR3));
948 Log(("guest.u64CR4 %VX64\n", pVMCB->guest.u64CR4));
949 Log(("guest.u64DR6 %VX64\n", pVMCB->guest.u64DR6));
950 Log(("guest.u64DR7 %VX64\n", pVMCB->guest.u64DR7));
951
952 Log(("guest.u64RIP %VX64\n", pVMCB->guest.u64RIP));
953 Log(("guest.u64RSP %VX64\n", pVMCB->guest.u64RSP));
954 Log(("guest.u64RAX %VX64\n", pVMCB->guest.u64RAX));
955 Log(("guest.u64RFlags %VX64\n", pVMCB->guest.u64RFlags));
956
957 Log(("guest.u64SysEnterCS %VX64\n", pVMCB->guest.u64SysEnterCS));
958 Log(("guest.u64SysEnterEIP %VX64\n", pVMCB->guest.u64SysEnterEIP));
959 Log(("guest.u64SysEnterESP %VX64\n", pVMCB->guest.u64SysEnterESP));
960
961 Log(("guest.u64EFER %VX64\n", pVMCB->guest.u64EFER));
962 Log(("guest.u64STAR %VX64\n", pVMCB->guest.u64STAR));
963 Log(("guest.u64LSTAR %VX64\n", pVMCB->guest.u64LSTAR));
964 Log(("guest.u64CSTAR %VX64\n", pVMCB->guest.u64CSTAR));
965 Log(("guest.u64SFMASK %VX64\n", pVMCB->guest.u64SFMASK));
966 Log(("guest.u64KernelGSBase %VX64\n", pVMCB->guest.u64KernelGSBase));
967 Log(("guest.u64GPAT %VX64\n", pVMCB->guest.u64GPAT));
968 Log(("guest.u64DBGCTL %VX64\n", pVMCB->guest.u64DBGCTL));
969 Log(("guest.u64BR_FROM %VX64\n", pVMCB->guest.u64BR_FROM));
970 Log(("guest.u64BR_TO %VX64\n", pVMCB->guest.u64BR_TO));
971 Log(("guest.u64LASTEXCPFROM %VX64\n", pVMCB->guest.u64LASTEXCPFROM));
972 Log(("guest.u64LASTEXCPTO %VX64\n", pVMCB->guest.u64LASTEXCPTO));
973
974#endif
975 rc = VERR_SVM_UNABLE_TO_START_VM;
976 goto end;
977 }
978
979 /* Let's first sync back eip, esp, and eflags. */
980 pCtx->eip = pVMCB->guest.u64RIP;
981 pCtx->esp = pVMCB->guest.u64RSP;
982 pCtx->eflags.u32 = pVMCB->guest.u64RFlags;
983 /* eax is saved/restore across the vmrun instruction */
984 pCtx->eax = pVMCB->guest.u64RAX;
985
986 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
987 SVM_READ_SELREG(SS, ss);
988 SVM_READ_SELREG(CS, cs);
989 SVM_READ_SELREG(DS, ds);
990 SVM_READ_SELREG(ES, es);
991 SVM_READ_SELREG(FS, fs);
992 SVM_READ_SELREG(GS, gs);
993
994 /** @note no reason to sync back the CRx and DRx registers. They can't be changed by the guest. */
995
996 /** @note NOW IT'S SAFE FOR LOGGING! */
997
998 /* Take care of instruction fusing (sti, mov ss) */
999 if (pVMCB->ctrl.u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)
1000 {
1001 Log(("uInterruptState %x eip=%VGv\n", pVMCB->ctrl.u64IntShadow, pCtx->eip));
1002 EMSetInhibitInterruptsPC(pVM, pCtx->eip);
1003 }
1004 else
1005 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
1006
1007 Log2(("exitCode = %x\n", exitCode));
1008
1009 /* Check if an injected event was interrupted prematurely. */
1010 pVM->hwaccm.s.Event.intInfo = pVMCB->ctrl.ExitIntInfo.au64[0];
1011 if ( pVMCB->ctrl.ExitIntInfo.n.u1Valid
1012 && pVMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT /* we don't care about 'int xx' as the instruction will be restarted. */)
1013 {
1014 Log(("Pending inject %VX64 at %08x exit=%08x\n", pVM->hwaccm.s.Event.intInfo, pCtx->eip, exitCode));
1015 pVM->hwaccm.s.Event.fPending = true;
1016 /* Error code present? (redundant) */
1017 if (pVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid)
1018 {
1019 pVM->hwaccm.s.Event.errCode = pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode;
1020 }
1021 else
1022 pVM->hwaccm.s.Event.errCode = 0;
1023 }
1024 STAM_COUNTER_INC(&pVM->hwaccm.s.pStatExitReasonR0[exitCode & MASK_EXITREASON_STAT]);
1025
1026 /* Deal with the reason of the VM-exit. */
1027 switch (exitCode)
1028 {
1029 case SVM_EXIT_EXCEPTION_0: case SVM_EXIT_EXCEPTION_1: case SVM_EXIT_EXCEPTION_2: case SVM_EXIT_EXCEPTION_3:
1030 case SVM_EXIT_EXCEPTION_4: case SVM_EXIT_EXCEPTION_5: case SVM_EXIT_EXCEPTION_6: case SVM_EXIT_EXCEPTION_7:
1031 case SVM_EXIT_EXCEPTION_8: case SVM_EXIT_EXCEPTION_9: case SVM_EXIT_EXCEPTION_A: case SVM_EXIT_EXCEPTION_B:
1032 case SVM_EXIT_EXCEPTION_C: case SVM_EXIT_EXCEPTION_D: case SVM_EXIT_EXCEPTION_E: case SVM_EXIT_EXCEPTION_F:
1033 case SVM_EXIT_EXCEPTION_10: case SVM_EXIT_EXCEPTION_11: case SVM_EXIT_EXCEPTION_12: case SVM_EXIT_EXCEPTION_13:
1034 case SVM_EXIT_EXCEPTION_14: case SVM_EXIT_EXCEPTION_15: case SVM_EXIT_EXCEPTION_16: case SVM_EXIT_EXCEPTION_17:
1035 case SVM_EXIT_EXCEPTION_18: case SVM_EXIT_EXCEPTION_19: case SVM_EXIT_EXCEPTION_1A: case SVM_EXIT_EXCEPTION_1B:
1036 case SVM_EXIT_EXCEPTION_1C: case SVM_EXIT_EXCEPTION_1D: case SVM_EXIT_EXCEPTION_1E: case SVM_EXIT_EXCEPTION_1F:
1037 {
1038 /* Pending trap. */
1039 SVM_EVENT Event;
1040 uint32_t vector = exitCode - SVM_EXIT_EXCEPTION_0;
1041
1042 Log2(("Hardware/software interrupt %d\n", vector));
1043 switch (vector)
1044 {
1045#ifdef DEBUG
1046 case X86_XCPT_DB:
1047 rc = DBGFR0Trap01Handler(pVM, CPUMCTX2CORE(pCtx), pVMCB->guest.u64DR6);
1048 Assert(rc != VINF_EM_RAW_GUEST_TRAP);
1049 break;
1050#endif
1051
1052 case X86_XCPT_NM:
1053 {
1054 uint32_t oldCR0;
1055
1056 Log(("#NM fault at %VGv\n", pCtx->eip));
1057
1058 /** @todo don't intercept #NM exceptions anymore when we've activated the guest FPU state. */
1059 oldCR0 = ASMGetCR0();
1060 /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
1061 rc = CPUMHandleLazyFPU(pVM);
1062 if (rc == VINF_SUCCESS)
1063 {
1064 Assert(CPUMIsGuestFPUStateActive(pVM));
1065
1066 /* CPUMHandleLazyFPU could have changed CR0; restore it. */
1067 ASMSetCR0(oldCR0);
1068
1069 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowNM);
1070
1071 /* Continue execution. */
1072 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1073 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1074
1075 goto ResumeExecution;
1076 }
1077
1078 Log(("Forward #NM fault to the guest\n"));
1079 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestNM);
1080
1081 Event.au64[0] = 0;
1082 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1083 Event.n.u1Valid = 1;
1084 Event.n.u8Vector = X86_XCPT_NM;
1085
1086 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1087 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1088 goto ResumeExecution;
1089 }
1090
1091 case X86_XCPT_PF: /* Page fault */
1092 {
1093 uint32_t errCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1094 RTGCUINTPTR uFaultAddress = pVMCB->ctrl.u64ExitInfo2; /* EXITINFO2 = fault address */
1095
1096 Log2(("Page fault at %VGv cr2=%VGv error code %x\n", pCtx->eip, uFaultAddress, errCode));
1097 /* Exit qualification contains the linear address of the page fault. */
1098 TRPMAssertTrap(pVM, X86_XCPT_PF, TRPM_TRAP);
1099 TRPMSetErrorCode(pVM, errCode);
1100 TRPMSetFaultAddress(pVM, uFaultAddress);
1101
1102 /* Forward it to our trap handler first, in case our shadow pages are out of sync. */
1103 rc = PGMTrap0eHandler(pVM, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);
1104 Log2(("PGMTrap0eHandler %VGv returned %Vrc\n", pCtx->eip, rc));
1105 if (rc == VINF_SUCCESS)
1106 { /* We've successfully synced our shadow pages, so let's just continue execution. */
1107 Log2(("Shadow page fault at %VGv cr2=%VGv error code %x\n", pCtx->eip, uFaultAddress, errCode));
1108 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowPF);
1109
1110 TRPMResetTrap(pVM);
1111
1112 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1113 goto ResumeExecution;
1114 }
1115 else
1116 if (rc == VINF_EM_RAW_GUEST_TRAP)
1117 { /* A genuine pagefault.
1118 * Forward the trap to the guest by injecting the exception and resuming execution.
1119 */
1120 Log2(("Forward page fault to the guest\n"));
1121 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestPF);
1122 /* The error code might have been changed. */
1123 errCode = TRPMGetErrorCode(pVM);
1124
1125 TRPMResetTrap(pVM);
1126
1127 /* Now we must update CR2. */
1128 pCtx->cr2 = uFaultAddress;
1129
1130 Event.au64[0] = 0;
1131 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1132 Event.n.u1Valid = 1;
1133 Event.n.u8Vector = X86_XCPT_PF;
1134 Event.n.u1ErrorCodeValid = 1;
1135 Event.n.u32ErrorCode = errCode;
1136
1137 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1138
1139 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1140 goto ResumeExecution;
1141 }
1142#ifdef VBOX_STRICT
1143 if (rc != VINF_EM_RAW_EMULATE_INSTR)
1144 Log(("PGMTrap0eHandler failed with %d\n", rc));
1145#endif
1146 /* Need to go back to the recompiler to emulate the instruction. */
1147 TRPMResetTrap(pVM);
1148 break;
1149 }
1150
1151 case X86_XCPT_MF: /* Floating point exception. */
1152 {
1153 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestMF);
1154 if (!(pCtx->cr0 & X86_CR0_NE))
1155 {
1156 /* old style FPU error reporting needs some extra work. */
1157 /** @todo don't fall back to the recompiler, but do it manually. */
1158 rc = VINF_EM_RAW_EMULATE_INSTR;
1159 break;
1160 }
1161 Log(("Trap %x at %VGv\n", vector, pCtx->eip));
1162
1163 Event.au64[0] = 0;
1164 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1165 Event.n.u1Valid = 1;
1166 Event.n.u8Vector = X86_XCPT_MF;
1167
1168 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1169
1170 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1171 goto ResumeExecution;
1172 }
1173
1174#ifdef VBOX_STRICT
1175 case X86_XCPT_GP: /* General protection failure exception.*/
1176 case X86_XCPT_UD: /* Unknown opcode exception. */
1177 case X86_XCPT_DE: /* Debug exception. */
1178 case X86_XCPT_SS: /* Stack segment exception. */
1179 case X86_XCPT_NP: /* Segment not present exception. */
1180 {
1181 Event.au64[0] = 0;
1182 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1183 Event.n.u1Valid = 1;
1184 Event.n.u8Vector = vector;
1185
1186 switch(vector)
1187 {
1188 case X86_XCPT_GP:
1189 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestGP);
1190 Event.n.u1ErrorCodeValid = 1;
1191 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1192 break;
1193 case X86_XCPT_DE:
1194 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestDE);
1195 break;
1196 case X86_XCPT_UD:
1197 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestUD);
1198 break;
1199 case X86_XCPT_SS:
1200 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestSS);
1201 Event.n.u1ErrorCodeValid = 1;
1202 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1203 break;
1204 case X86_XCPT_NP:
1205 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestNP);
1206 Event.n.u1ErrorCodeValid = 1;
1207 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1208 break;
1209 }
1210 Log(("Trap %x at %VGv\n", vector, pCtx->eip));
1211 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1212
1213 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1214 goto ResumeExecution;
1215 }
1216#endif
1217 default:
1218 AssertMsgFailed(("Unexpected vm-exit caused by exception %x\n", vector));
1219 rc = VERR_EM_INTERNAL_ERROR;
1220 break;
1221
1222 } /* switch (vector) */
1223 break;
1224 }
1225
1226 case SVM_EXIT_VINTR:
1227 /* A virtual interrupt is about to be delivered, which means IF=1. */
1228 Log(("SVM_EXIT_VINTR IF=%d\n", pCtx->eflags.Bits.u1IF));
1229 pVMCB->ctrl.IntCtrl.n.u1VIrqValid = 0;
1230 pVMCB->ctrl.IntCtrl.n.u1IgnoreTPR = 0;
1231 pVMCB->ctrl.IntCtrl.n.u8VIrqVector = 0;
1232 goto ResumeExecution;
1233
1234 case SVM_EXIT_FERR_FREEZE:
1235 case SVM_EXIT_INTR:
1236 case SVM_EXIT_NMI:
1237 case SVM_EXIT_SMI:
1238 case SVM_EXIT_INIT:
1239 /* External interrupt; leave to allow it to be dispatched again. */
1240 rc = VINF_EM_RAW_INTERRUPT;
1241 break;
1242
1243 case SVM_EXIT_WBINVD:
1244 case SVM_EXIT_INVD: /* Guest software attempted to execute INVD. */
1245 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitInvd);
1246 /* Skip instruction and continue directly. */
1247 pCtx->eip += 2; /** @note hardcoded opcode size! */
1248 /* Continue execution.*/
1249 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1250 goto ResumeExecution;
1251
1252 case SVM_EXIT_CPUID: /* Guest software attempted to execute CPUID. */
1253 {
1254 Log2(("SVM: Cpuid %x\n", pCtx->eax));
1255 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCpuid);
1256 rc = EMInterpretCpuId(pVM, CPUMCTX2CORE(pCtx));
1257 if (rc == VINF_SUCCESS)
1258 {
1259 /* Update EIP and continue execution. */
1260 pCtx->eip += 2; /** @note hardcoded opcode size! */
1261 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1262 goto ResumeExecution;
1263 }
1264 AssertMsgFailed(("EMU: cpuid failed with %Vrc\n", rc));
1265 rc = VINF_EM_RAW_EMULATE_INSTR;
1266 break;
1267 }
1268
1269 case SVM_EXIT_RDTSC: /* Guest software attempted to execute RDTSC. */
1270 {
1271 Log2(("SVM: Rdtsc\n"));
1272 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitRdtsc);
1273 rc = EMInterpretRdtsc(pVM, CPUMCTX2CORE(pCtx));
1274 if (rc == VINF_SUCCESS)
1275 {
1276 /* Update EIP and continue execution. */
1277 pCtx->eip += 2; /** @note hardcoded opcode size! */
1278 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1279 goto ResumeExecution;
1280 }
1281 AssertMsgFailed(("EMU: rdtsc failed with %Vrc\n", rc));
1282 rc = VINF_EM_RAW_EMULATE_INSTR;
1283 break;
1284 }
1285
1286 case SVM_EXIT_INVLPG: /* Guest software attempted to execute INVPG. */
1287 {
1288 Log2(("SVM: invlpg\n"));
1289 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitInvpg);
1290
1291 /* Truly a pita. Why can't SVM give the same information as VMX? */
1292 rc = SVMR0InterpretInvpg(pVM, CPUMCTX2CORE(pCtx), pVMCB->ctrl.TLBCtrl.n.u32ASID);
1293 if (rc == VINF_SUCCESS)
1294 {
1295 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushPageInvlpg);
1296 goto ResumeExecution; /* eip already updated */
1297 }
1298 break;
1299 }
1300
1301 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR1: case SVM_EXIT_WRITE_CR2: case SVM_EXIT_WRITE_CR3:
1302 case SVM_EXIT_WRITE_CR4: case SVM_EXIT_WRITE_CR5: case SVM_EXIT_WRITE_CR6: case SVM_EXIT_WRITE_CR7:
1303 case SVM_EXIT_WRITE_CR8: case SVM_EXIT_WRITE_CR9: case SVM_EXIT_WRITE_CR10: case SVM_EXIT_WRITE_CR11:
1304 case SVM_EXIT_WRITE_CR12: case SVM_EXIT_WRITE_CR13: case SVM_EXIT_WRITE_CR14: case SVM_EXIT_WRITE_CR15:
1305 {
1306 uint32_t cbSize;
1307
1308 Log2(("SVM: %VGv mov cr%d, \n", pCtx->eip, exitCode - SVM_EXIT_WRITE_CR0));
1309 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCRxWrite);
1310 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);
1311
1312 switch (exitCode - SVM_EXIT_WRITE_CR0)
1313 {
1314 case 0:
1315 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1316 break;
1317 case 2:
1318 break;
1319 case 3:
1320 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
1321 break;
1322 case 4:
1323 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
1324 break;
1325 default:
1326 AssertFailed();
1327 }
1328 /* Check if a sync operation is pending. */
1329 if ( rc == VINF_SUCCESS /* don't bother if we are going to ring 3 anyway */
1330 && VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
1331 {
1332 rc = PGMSyncCR3(pVM, CPUMGetGuestCR0(pVM), CPUMGetGuestCR3(pVM), CPUMGetGuestCR4(pVM), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
1333 AssertRC(rc);
1334
1335 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBCRxChange);
1336
1337 /** @note Force a TLB flush. SVM requires us to do it manually. */
1338 pVM->hwaccm.s.svm.fForceTLBFlush = true;
1339 }
1340 if (rc == VINF_SUCCESS)
1341 {
1342 /* EIP has been updated already. */
1343
1344 /* Only resume if successful. */
1345 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1346 goto ResumeExecution;
1347 }
1348 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1349 break;
1350 }
1351
1352 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR1: case SVM_EXIT_READ_CR2: case SVM_EXIT_READ_CR3:
1353 case SVM_EXIT_READ_CR4: case SVM_EXIT_READ_CR5: case SVM_EXIT_READ_CR6: case SVM_EXIT_READ_CR7:
1354 case SVM_EXIT_READ_CR8: case SVM_EXIT_READ_CR9: case SVM_EXIT_READ_CR10: case SVM_EXIT_READ_CR11:
1355 case SVM_EXIT_READ_CR12: case SVM_EXIT_READ_CR13: case SVM_EXIT_READ_CR14: case SVM_EXIT_READ_CR15:
1356 {
1357 uint32_t cbSize;
1358
1359 Log2(("SVM: %VGv mov x, cr%d\n", pCtx->eip, exitCode - SVM_EXIT_READ_CR0));
1360 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCRxRead);
1361 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);
1362 if (rc == VINF_SUCCESS)
1363 {
1364 /* EIP has been updated already. */
1365
1366 /* Only resume if successful. */
1367 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1368 goto ResumeExecution;
1369 }
1370 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1371 break;
1372 }
1373
1374 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
1375 case SVM_EXIT_WRITE_DR4: case SVM_EXIT_WRITE_DR5: case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7:
1376 case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9: case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11:
1377 case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
1378 {
1379 uint32_t cbSize;
1380
1381 Log2(("SVM: %VGv mov dr%d, x\n", pCtx->eip, exitCode - SVM_EXIT_WRITE_DR0));
1382 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitDRxRead);
1383 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);
1384 if (rc == VINF_SUCCESS)
1385 {
1386 /* EIP has been updated already. */
1387
1388 /* Only resume if successful. */
1389 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1390 goto ResumeExecution;
1391 }
1392 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1393 break;
1394 }
1395
1396 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
1397 case SVM_EXIT_READ_DR4: case SVM_EXIT_READ_DR5: case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7:
1398 case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9: case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11:
1399 case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13: case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
1400 {
1401 uint32_t cbSize;
1402
1403 Log2(("SVM: %VGv mov dr%d, x\n", pCtx->eip, exitCode - SVM_EXIT_READ_DR0));
1404 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitDRxRead);
1405 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);
1406 if (rc == VINF_SUCCESS)
1407 {
1408 /* EIP has been updated already. */
1409
1410 /* Only resume if successful. */
1411 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1412 goto ResumeExecution;
1413 }
1414 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1415 break;
1416 }
1417
1418 /* Note: We'll get a #GP if the IO instruction isn't allowed (IOPL or TSS bitmap); no need to double check. */
1419 case SVM_EXIT_IOIO: /* I/O instruction. */
1420 {
1421 SVM_IOIO_EXIT IoExitInfo;
1422 uint32_t uIOSize, uAndVal;
1423
1424 IoExitInfo.au32[0] = pVMCB->ctrl.u64ExitInfo1;
1425
1426 /** @todo could use a lookup table here */
1427 if (IoExitInfo.n.u1OP8)
1428 {
1429 uIOSize = 1;
1430 uAndVal = 0xff;
1431 }
1432 else
1433 if (IoExitInfo.n.u1OP16)
1434 {
1435 uIOSize = 2;
1436 uAndVal = 0xffff;
1437 }
1438 else
1439 if (IoExitInfo.n.u1OP32)
1440 {
1441 uIOSize = 4;
1442 uAndVal = 0xffffffff;
1443 }
1444 else
1445 {
1446 AssertFailed(); /* should be fatal. */
1447 rc = VINF_EM_RAW_EMULATE_INSTR;
1448 break;
1449 }
1450
1451 if (IoExitInfo.n.u1STR)
1452 {
1453 /* ins/outs */
1454 uint32_t prefix = 0;
1455 if (IoExitInfo.n.u1REP)
1456 prefix |= PREFIX_REP;
1457
1458 if (IoExitInfo.n.u1Type == 0)
1459 {
1460 Log2(("IOMInterpretOUTSEx %VGv %x size=%d\n", pCtx->eip, IoExitInfo.n.u16Port, uIOSize));
1461 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOStringWrite);
1462 rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, prefix, uIOSize);
1463 }
1464 else
1465 {
1466 Log2(("IOMInterpretINSEx %VGv %x size=%d\n", pCtx->eip, IoExitInfo.n.u16Port, uIOSize));
1467 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOStringRead);
1468 rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, prefix, uIOSize);
1469 }
1470 }
1471 else
1472 {
1473 /* normal in/out */
1474 Assert(!IoExitInfo.n.u1REP);
1475
1476 if (IoExitInfo.n.u1Type == 0)
1477 {
1478 Log2(("IOMIOPortWrite %VGv %x %x size=%d\n", pCtx->eip, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize));
1479 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOWrite);
1480 rc = IOMIOPortWrite(pVM, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize);
1481 }
1482 else
1483 {
1484 uint32_t u32Val = 0;
1485
1486 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIORead);
1487 rc = IOMIOPortRead(pVM, IoExitInfo.n.u16Port, &u32Val, uIOSize);
1488 if (IOM_SUCCESS(rc))
1489 {
1490 /* Write back to the EAX register. */
1491 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
1492 Log2(("IOMIOPortRead %VGv %x %x size=%d\n", pCtx->eip, IoExitInfo.n.u16Port, u32Val & uAndVal, uIOSize));
1493 }
1494 }
1495 }
1496 /*
1497 * Handled the I/O return codes.
1498 * (The unhandled cases end up with rc == VINF_EM_RAW_EMULATE_INSTR.)
1499 */
1500 if (IOM_SUCCESS(rc))
1501 {
1502 /* Update EIP and continue execution. */
1503 pCtx->eip = pVMCB->ctrl.u64ExitInfo2; /* RIP/EIP of the next instruction is saved in EXITINFO2. */
1504 if (RT_LIKELY(rc == VINF_SUCCESS))
1505 {
1506 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1507 goto ResumeExecution;
1508 }
1509 Log2(("EM status from IO at %VGv %x size %d: %Vrc\n", pCtx->eip, IoExitInfo.n.u16Port, uIOSize, rc));
1510 break;
1511 }
1512
1513#ifdef VBOX_STRICT
1514 if (rc == VINF_IOM_HC_IOPORT_READ)
1515 Assert(IoExitInfo.n.u1Type != 0);
1516 else if (rc == VINF_IOM_HC_IOPORT_WRITE)
1517 Assert(IoExitInfo.n.u1Type == 0);
1518 else
1519 AssertMsg(VBOX_FAILURE(rc) || rc == VINF_EM_RAW_EMULATE_INSTR || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Vrc\n", rc));
1520#endif
1521 Log2(("Failed IO at %VGv %x size %d\n", pCtx->eip, IoExitInfo.n.u16Port, uIOSize));
1522 break;
1523 }
1524
1525 case SVM_EXIT_HLT:
1526 /** Check if external interrupts are pending; if so, don't switch back. */
1527 if (VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)))
1528 {
1529 pCtx->eip++; /* skip hlt */
1530 goto ResumeExecution;
1531 }
1532
1533 rc = VINF_EM_RAW_EMULATE_INSTR_HLT;
1534 break;
1535
1536 case SVM_EXIT_RSM:
1537 case SVM_EXIT_INVLPGA:
1538 case SVM_EXIT_VMRUN:
1539 case SVM_EXIT_VMMCALL:
1540 case SVM_EXIT_VMLOAD:
1541 case SVM_EXIT_VMSAVE:
1542 case SVM_EXIT_STGI:
1543 case SVM_EXIT_CLGI:
1544 case SVM_EXIT_SKINIT:
1545 case SVM_EXIT_RDTSCP:
1546 {
1547 /* Unsupported instructions. */
1548 SVM_EVENT Event;
1549
1550 Event.au64[0] = 0;
1551 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1552 Event.n.u1Valid = 1;
1553 Event.n.u8Vector = X86_XCPT_UD;
1554
1555 Log(("Forced #UD trap at %VGv\n", pCtx->eip));
1556 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1557
1558 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1559 goto ResumeExecution;
1560 }
1561
1562 /* Emulate in ring 3. */
1563 case SVM_EXIT_MONITOR:
1564 case SVM_EXIT_RDPMC:
1565 case SVM_EXIT_PAUSE:
1566 case SVM_EXIT_MWAIT_UNCOND:
1567 case SVM_EXIT_MWAIT_ARMED:
1568 case SVM_EXIT_MSR:
1569 rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED;
1570 break;
1571
1572 case SVM_EXIT_NPF:
1573 AssertFailed(); /* unexpected */
1574 break;
1575
1576 case SVM_EXIT_SHUTDOWN:
1577 rc = VINF_EM_RESET; /* Triple fault equals a reset. */
1578 break;
1579
1580 case SVM_EXIT_IDTR_READ:
1581 case SVM_EXIT_GDTR_READ:
1582 case SVM_EXIT_LDTR_READ:
1583 case SVM_EXIT_TR_READ:
1584 case SVM_EXIT_IDTR_WRITE:
1585 case SVM_EXIT_GDTR_WRITE:
1586 case SVM_EXIT_LDTR_WRITE:
1587 case SVM_EXIT_TR_WRITE:
1588 case SVM_EXIT_CR0_SEL_WRITE:
1589 default:
1590 /* Unexpected exit codes. */
1591 rc = VERR_EM_INTERNAL_ERROR;
1592 AssertMsgFailed(("Unexpected exit code %x\n", exitCode)); /* Can't happen. */
1593 break;
1594 }
1595
1596end:
1597 if (fGuestStateSynced)
1598 {
1599 /* Remaining guest CPU context: TR, IDTR, GDTR, LDTR. */
1600 SVM_READ_SELREG(LDTR, ldtr);
1601 SVM_READ_SELREG(TR, tr);
1602
1603 pCtx->gdtr.cbGdt = pVMCB->guest.GDTR.u32Limit;
1604 pCtx->gdtr.pGdt = pVMCB->guest.GDTR.u64Base;
1605
1606 pCtx->idtr.cbIdt = pVMCB->guest.IDTR.u32Limit;
1607 pCtx->idtr.pIdt = pVMCB->guest.IDTR.u64Base;
1608
1609 /*
1610 * System MSRs
1611 */
1612 pCtx->SysEnter.cs = pVMCB->guest.u64SysEnterCS;
1613 pCtx->SysEnter.eip = pVMCB->guest.u64SysEnterEIP;
1614 pCtx->SysEnter.esp = pVMCB->guest.u64SysEnterESP;
1615 }
1616
1617 /* Signal changes for the recompiler. */
1618 CPUMSetChangedFlags(pVM, CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_LDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_TR | CPUM_CHANGED_HIDDEN_SEL_REGS);
1619
1620 /* If we executed vmrun and an external irq was pending, then we don't have to do a full sync the next time. */
1621 if (exitCode == SVM_EXIT_INTR)
1622 {
1623 STAM_COUNTER_INC(&pVM->hwaccm.s.StatPendingHostIrq);
1624 /* On the next entry we'll only sync the host context. */
1625 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;
1626 }
1627 else
1628 {
1629 /* On the next entry we'll sync everything. */
1630 /** @todo we can do better than this */
1631 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
1632 }
1633
1634 /* translate into a less severe return code */
1635 if (rc == VERR_EM_INTERPRETER)
1636 rc = VINF_EM_RAW_EMULATE_INSTR;
1637
1638 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1639 return rc;
1640}
1641
1642/**
1643 * Enters the AMD-V session
1644 *
1645 * @returns VBox status code.
1646 * @param pVM The VM to operate on.
1647 * @param pCpu CPU info struct
1648 */
1649HWACCMR0DECL(int) SVMR0Enter(PVM pVM, PHWACCM_CPUINFO pCpu)
1650{
1651 Assert(pVM->hwaccm.s.svm.fSupported);
1652
1653 if (pVM->hwaccm.s.svm.idLastCpu != pCpu->idCpu)
1654 {
1655 /* Force a TLB flush on VM entry. */
1656 pVM->hwaccm.s.svm.fForceTLBFlush = true;
1657 }
1658 pVM->hwaccm.s.svm.idLastCpu = pCpu->idCpu;
1659
1660 pVM->hwaccm.s.svm.fResumeVM = false;
1661
1662 /* Force to reload LDTR, so we'll execute VMLoad to load additional guest state. */
1663 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_LDTR;
1664
1665 return VINF_SUCCESS;
1666}
1667
1668
1669/**
1670 * Leaves the AMD-V session
1671 *
1672 * @returns VBox status code.
1673 * @param pVM The VM to operate on.
1674 */
1675HWACCMR0DECL(int) SVMR0Leave(PVM pVM)
1676{
1677 Assert(pVM->hwaccm.s.svm.fSupported);
1678 return VINF_SUCCESS;
1679}
1680
1681
1682static int svmInterpretInvlPg(PVM pVM, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame, uint32_t uASID)
1683{
1684 OP_PARAMVAL param1;
1685 RTGCPTR addr;
1686
1687 int rc = DISQueryParamVal(pRegFrame, pCpu, &pCpu->param1, &param1, PARAM_SOURCE);
1688 if(VBOX_FAILURE(rc))
1689 return VERR_EM_INTERPRETER;
1690
1691 switch(param1.type)
1692 {
1693 case PARMTYPE_IMMEDIATE:
1694 case PARMTYPE_ADDRESS:
1695 if(!(param1.flags & PARAM_VAL32))
1696 return VERR_EM_INTERPRETER;
1697 addr = (RTGCPTR)param1.val.val32;
1698 break;
1699
1700 default:
1701 return VERR_EM_INTERPRETER;
1702 }
1703
1704 /** @todo is addr always a flat linear address or ds based
1705 * (in absence of segment override prefixes)????
1706 */
1707 rc = PGMInvalidatePage(pVM, addr);
1708 if (VBOX_SUCCESS(rc))
1709 {
1710 /* Manually invalidate the page for the VM's TLB. */
1711 Log(("SVMInvlpgA %VGv ASID=%d\n", addr, uASID));
1712 SVMInvlpgA(addr, uASID);
1713 return VINF_SUCCESS;
1714 }
1715 Assert(rc == VERR_REM_FLUSHED_PAGES_OVERFLOW);
1716 return rc;
1717}
1718
1719/**
1720 * Interprets INVLPG
1721 *
1722 * @returns VBox status code.
1723 * @retval VINF_* Scheduling instructions.
1724 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1725 * @retval VERR_* Fatal errors.
1726 *
1727 * @param pVM The VM handle.
1728 * @param pRegFrame The register frame.
1729 * @param ASID Tagged TLB id for the guest
1730 *
1731 * Updates the EIP if an instruction was executed successfully.
1732 */
1733static int SVMR0InterpretInvpg(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uASID)
1734{
1735 /*
1736 * Only allow 32-bit code.
1737 */
1738 if (SELMIsSelector32Bit(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid))
1739 {
1740 RTGCPTR pbCode;
1741 int rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &pbCode);
1742 if (VBOX_SUCCESS(rc))
1743 {
1744 uint32_t cbOp;
1745 DISCPUSTATE Cpu;
1746
1747 Cpu.mode = CPUMODE_32BIT;
1748 rc = EMInterpretDisasOneEx(pVM, pbCode, pRegFrame, &Cpu, &cbOp);
1749 Assert(VBOX_FAILURE(rc) || Cpu.pCurInstr->opcode == OP_INVLPG);
1750 if (VBOX_SUCCESS(rc) && Cpu.pCurInstr->opcode == OP_INVLPG)
1751 {
1752 Assert(cbOp == Cpu.opsize);
1753 rc = svmInterpretInvlPg(pVM, &Cpu, pRegFrame, uASID);
1754 if (VBOX_SUCCESS(rc))
1755 {
1756 pRegFrame->eip += cbOp; /* Move on to the next instruction. */
1757 }
1758 return rc;
1759 }
1760 }
1761 }
1762 return VERR_EM_INTERPRETER;
1763}
1764
1765
1766/**
1767 * Invalidates a guest page
1768 *
1769 * @returns VBox status code.
1770 * @param pVM The VM to operate on.
1771 * @param GCVirt Page to invalidate
1772 */
1773HWACCMR0DECL(int) SVMR0InvalidatePage(PVM pVM, RTGCPTR GCVirt)
1774{
1775 bool fFlushPending = pVM->hwaccm.s.svm.fAlwaysFlushTLB | pVM->hwaccm.s.svm.fForceTLBFlush;
1776
1777 /* Skip it if a TLB flush is already pending. */
1778 if (!fFlushPending)
1779 {
1780 SVM_VMCB *pVMCB;
1781
1782 Log2(("SVMR0InvalidatePage %VGv\n", GCVirt));
1783 AssertReturn(pVM, VERR_INVALID_PARAMETER);
1784 Assert(pVM->hwaccm.s.svm.fSupported);
1785
1786 pVMCB = (SVM_VMCB *)pVM->hwaccm.s.svm.pVMCB;
1787 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
1788
1789 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushPageManual);
1790 SVMInvlpgA(GCVirt, pVMCB->ctrl.TLBCtrl.n.u32ASID);
1791 }
1792 return VINF_SUCCESS;
1793}
1794
1795/**
1796 * Flushes the guest TLB
1797 *
1798 * @returns VBox status code.
1799 * @param pVM The VM to operate on.
1800 */
1801HWACCMR0DECL(int) SVMR0FlushTLB(PVM pVM)
1802{
1803 Log2(("SVMR0FlushTLB\n"));
1804 pVM->hwaccm.s.svm.fForceTLBFlush = true;
1805 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBManual);
1806 return VINF_SUCCESS;
1807}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette