VirtualBox

source: vbox/trunk/src/VBox/VMM/HWACCM.cpp@ 30037

Last change on this file since 30037 was 29737, checked in by vboxsync, 14 years ago

Fixed dropping back to the recompiler too often when running unrestricted guest code that switches mode very often.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 130.4 KB
Line 
1/* $Id: HWACCM.cpp 29737 2010-05-21 14:07:52Z vboxsync $ */
2/** @file
3 * HWACCM - Intel/AMD VM Hardware Support Manager
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HWACCM
22#include <VBox/cpum.h>
23#include <VBox/stam.h>
24#include <VBox/mm.h>
25#include <VBox/pdmapi.h>
26#include <VBox/pgm.h>
27#include <VBox/ssm.h>
28#include <VBox/trpm.h>
29#include <VBox/dbgf.h>
30#include <VBox/iom.h>
31#include <VBox/patm.h>
32#include <VBox/csam.h>
33#include <VBox/selm.h>
34#include <VBox/rem.h>
35#include <VBox/hwacc_vmx.h>
36#include <VBox/hwacc_svm.h>
37#include "HWACCMInternal.h"
38#include <VBox/vm.h>
39#include <VBox/err.h>
40#include <VBox/param.h>
41
42#include <iprt/assert.h>
43#include <VBox/log.h>
44#include <iprt/asm.h>
45#include <iprt/asm-amd64-x86.h>
46#include <iprt/string.h>
47#include <iprt/env.h>
48#include <iprt/thread.h>
49
50/*******************************************************************************
51* Global Variables *
52*******************************************************************************/
53#ifdef VBOX_WITH_STATISTICS
54# define EXIT_REASON(def, val, str) #def " - " #val " - " str
55# define EXIT_REASON_NIL() NULL
56/** Exit reason descriptions for VT-x, used to describe statistics. */
57static const char * const g_apszVTxExitReasons[MAX_EXITREASON_STAT] =
58{
59 EXIT_REASON(VMX_EXIT_EXCEPTION , 0, "Exception or non-maskable interrupt (NMI)."),
60 EXIT_REASON(VMX_EXIT_EXTERNAL_IRQ , 1, "External interrupt."),
61 EXIT_REASON(VMX_EXIT_TRIPLE_FAULT , 2, "Triple fault."),
62 EXIT_REASON(VMX_EXIT_INIT_SIGNAL , 3, "INIT signal."),
63 EXIT_REASON(VMX_EXIT_SIPI , 4, "Start-up IPI (SIPI)."),
64 EXIT_REASON(VMX_EXIT_IO_SMI_IRQ , 5, "I/O system-management interrupt (SMI)."),
65 EXIT_REASON(VMX_EXIT_SMI_IRQ , 6, "Other SMI."),
66 EXIT_REASON(VMX_EXIT_IRQ_WINDOW , 7, "Interrupt window."),
67 EXIT_REASON_NIL(),
68 EXIT_REASON(VMX_EXIT_TASK_SWITCH , 9, "Task switch."),
69 EXIT_REASON(VMX_EXIT_CPUID , 10, "Guest software attempted to execute CPUID."),
70 EXIT_REASON_NIL(),
71 EXIT_REASON(VMX_EXIT_HLT , 12, "Guest software attempted to execute HLT."),
72 EXIT_REASON(VMX_EXIT_INVD , 13, "Guest software attempted to execute INVD."),
73 EXIT_REASON(VMX_EXIT_INVPG , 14, "Guest software attempted to execute INVPG."),
74 EXIT_REASON(VMX_EXIT_RDPMC , 15, "Guest software attempted to execute RDPMC."),
75 EXIT_REASON(VMX_EXIT_RDTSC , 16, "Guest software attempted to execute RDTSC."),
76 EXIT_REASON(VMX_EXIT_RSM , 17, "Guest software attempted to execute RSM in SMM."),
77 EXIT_REASON(VMX_EXIT_VMCALL , 18, "Guest software executed VMCALL."),
78 EXIT_REASON(VMX_EXIT_VMCLEAR , 19, "Guest software executed VMCLEAR."),
79 EXIT_REASON(VMX_EXIT_VMLAUNCH , 20, "Guest software executed VMLAUNCH."),
80 EXIT_REASON(VMX_EXIT_VMPTRLD , 21, "Guest software executed VMPTRLD."),
81 EXIT_REASON(VMX_EXIT_VMPTRST , 22, "Guest software executed VMPTRST."),
82 EXIT_REASON(VMX_EXIT_VMREAD , 23, "Guest software executed VMREAD."),
83 EXIT_REASON(VMX_EXIT_VMRESUME , 24, "Guest software executed VMRESUME."),
84 EXIT_REASON(VMX_EXIT_VMWRITE , 25, "Guest software executed VMWRITE."),
85 EXIT_REASON(VMX_EXIT_VMXOFF , 26, "Guest software executed VMXOFF."),
86 EXIT_REASON(VMX_EXIT_VMXON , 27, "Guest software executed VMXON."),
87 EXIT_REASON(VMX_EXIT_CRX_MOVE , 28, "Control-register accesses."),
88 EXIT_REASON(VMX_EXIT_DRX_MOVE , 29, "Debug-register accesses."),
89 EXIT_REASON(VMX_EXIT_PORT_IO , 30, "I/O instruction."),
90 EXIT_REASON(VMX_EXIT_RDMSR , 31, "RDMSR. Guest software attempted to execute RDMSR."),
91 EXIT_REASON(VMX_EXIT_WRMSR , 32, "WRMSR. Guest software attempted to execute WRMSR."),
92 EXIT_REASON(VMX_EXIT_ERR_INVALID_GUEST_STATE, 33, "VM-entry failure due to invalid guest state."),
93 EXIT_REASON(VMX_EXIT_ERR_MSR_LOAD , 34, "VM-entry failure due to MSR loading."),
94 EXIT_REASON_NIL(),
95 EXIT_REASON(VMX_EXIT_MWAIT , 36, "Guest software executed MWAIT."),
96 EXIT_REASON_NIL(),
97 EXIT_REASON_NIL(),
98 EXIT_REASON(VMX_EXIT_MONITOR , 39, "Guest software attempted to execute MONITOR."),
99 EXIT_REASON(VMX_EXIT_PAUSE , 40, "Guest software attempted to execute PAUSE."),
100 EXIT_REASON(VMX_EXIT_ERR_MACHINE_CHECK , 41, "VM-entry failure due to machine-check."),
101 EXIT_REASON_NIL(),
102 EXIT_REASON(VMX_EXIT_TPR , 43, "TPR below threshold. Guest software executed MOV to CR8."),
103 EXIT_REASON(VMX_EXIT_APIC_ACCESS , 44, "APIC access. Guest software attempted to access memory at a physical address on the APIC-access page."),
104 EXIT_REASON_NIL(),
105 EXIT_REASON(VMX_EXIT_XDTR_ACCESS , 46, "Access to GDTR or IDTR. Guest software attempted to execute LGDT, LIDT, SGDT, or SIDT."),
106 EXIT_REASON(VMX_EXIT_TR_ACCESS , 47, "Access to LDTR or TR. Guest software attempted to execute LLDT, LTR, SLDT, or STR."),
107 EXIT_REASON(VMX_EXIT_EPT_VIOLATION , 48, "EPT violation. An attempt to access memory with a guest-physical address was disallowed by the configuration of the EPT paging structures."),
108 EXIT_REASON(VMX_EXIT_EPT_MISCONFIG , 49, "EPT misconfiguration. An attempt to access memory with a guest-physical address encountered a misconfigured EPT paging-structure entry."),
109 EXIT_REASON(VMX_EXIT_INVEPT , 50, "INVEPT. Guest software attempted to execute INVEPT."),
110 EXIT_REASON_NIL(),
111 EXIT_REASON(VMX_EXIT_PREEMPTION_TIMER , 52, "VMX-preemption timer expired. The preemption timer counted down to zero."),
112 EXIT_REASON(VMX_EXIT_INVVPID , 53, "INVVPID. Guest software attempted to execute INVVPID."),
113 EXIT_REASON(VMX_EXIT_WBINVD , 54, "WBINVD. Guest software attempted to execute WBINVD."),
114 EXIT_REASON(VMX_EXIT_XSETBV , 55, "XSETBV. Guest software attempted to execute XSETBV."),
115 EXIT_REASON_NIL()
116};
117/** Exit reason descriptions for AMD-V, used to describe statistics. */
118static const char * const g_apszAmdVExitReasons[MAX_EXITREASON_STAT] =
119{
120 EXIT_REASON(SVM_EXIT_READ_CR0 , 0, "Read CR0."),
121 EXIT_REASON(SVM_EXIT_READ_CR1 , 1, "Read CR1."),
122 EXIT_REASON(SVM_EXIT_READ_CR2 , 2, "Read CR2."),
123 EXIT_REASON(SVM_EXIT_READ_CR3 , 3, "Read CR3."),
124 EXIT_REASON(SVM_EXIT_READ_CR4 , 4, "Read CR4."),
125 EXIT_REASON(SVM_EXIT_READ_CR5 , 5, "Read CR5."),
126 EXIT_REASON(SVM_EXIT_READ_CR6 , 6, "Read CR6."),
127 EXIT_REASON(SVM_EXIT_READ_CR7 , 7, "Read CR7."),
128 EXIT_REASON(SVM_EXIT_READ_CR8 , 8, "Read CR8."),
129 EXIT_REASON(SVM_EXIT_READ_CR9 , 9, "Read CR9."),
130 EXIT_REASON(SVM_EXIT_READ_CR10 , 10, "Read CR10."),
131 EXIT_REASON(SVM_EXIT_READ_CR11 , 11, "Read CR11."),
132 EXIT_REASON(SVM_EXIT_READ_CR12 , 12, "Read CR12."),
133 EXIT_REASON(SVM_EXIT_READ_CR13 , 13, "Read CR13."),
134 EXIT_REASON(SVM_EXIT_READ_CR14 , 14, "Read CR14."),
135 EXIT_REASON(SVM_EXIT_READ_CR15 , 15, "Read CR15."),
136 EXIT_REASON(SVM_EXIT_WRITE_CR0 , 16, "Write CR0."),
137 EXIT_REASON(SVM_EXIT_WRITE_CR1 , 17, "Write CR1."),
138 EXIT_REASON(SVM_EXIT_WRITE_CR2 , 18, "Write CR2."),
139 EXIT_REASON(SVM_EXIT_WRITE_CR3 , 19, "Write CR3."),
140 EXIT_REASON(SVM_EXIT_WRITE_CR4 , 20, "Write CR4."),
141 EXIT_REASON(SVM_EXIT_WRITE_CR5 , 21, "Write CR5."),
142 EXIT_REASON(SVM_EXIT_WRITE_CR6 , 22, "Write CR6."),
143 EXIT_REASON(SVM_EXIT_WRITE_CR7 , 23, "Write CR7."),
144 EXIT_REASON(SVM_EXIT_WRITE_CR8 , 24, "Write CR8."),
145 EXIT_REASON(SVM_EXIT_WRITE_CR9 , 25, "Write CR9."),
146 EXIT_REASON(SVM_EXIT_WRITE_CR10 , 26, "Write CR10."),
147 EXIT_REASON(SVM_EXIT_WRITE_CR11 , 27, "Write CR11."),
148 EXIT_REASON(SVM_EXIT_WRITE_CR12 , 28, "Write CR12."),
149 EXIT_REASON(SVM_EXIT_WRITE_CR13 , 29, "Write CR13."),
150 EXIT_REASON(SVM_EXIT_WRITE_CR14 , 30, "Write CR14."),
151 EXIT_REASON(SVM_EXIT_WRITE_CR15 , 31, "Write CR15."),
152 EXIT_REASON(SVM_EXIT_READ_DR0 , 32, "Read DR0."),
153 EXIT_REASON(SVM_EXIT_READ_DR1 , 33, "Read DR1."),
154 EXIT_REASON(SVM_EXIT_READ_DR2 , 34, "Read DR2."),
155 EXIT_REASON(SVM_EXIT_READ_DR3 , 35, "Read DR3."),
156 EXIT_REASON(SVM_EXIT_READ_DR4 , 36, "Read DR4."),
157 EXIT_REASON(SVM_EXIT_READ_DR5 , 37, "Read DR5."),
158 EXIT_REASON(SVM_EXIT_READ_DR6 , 38, "Read DR6."),
159 EXIT_REASON(SVM_EXIT_READ_DR7 , 39, "Read DR7."),
160 EXIT_REASON(SVM_EXIT_READ_DR8 , 40, "Read DR8."),
161 EXIT_REASON(SVM_EXIT_READ_DR9 , 41, "Read DR9."),
162 EXIT_REASON(SVM_EXIT_READ_DR10 , 42, "Read DR10."),
163 EXIT_REASON(SVM_EXIT_READ_DR11 , 43, "Read DR11"),
164 EXIT_REASON(SVM_EXIT_READ_DR12 , 44, "Read DR12."),
165 EXIT_REASON(SVM_EXIT_READ_DR13 , 45, "Read DR13."),
166 EXIT_REASON(SVM_EXIT_READ_DR14 , 46, "Read DR14."),
167 EXIT_REASON(SVM_EXIT_READ_DR15 , 47, "Read DR15."),
168 EXIT_REASON(SVM_EXIT_WRITE_DR0 , 48, "Write DR0."),
169 EXIT_REASON(SVM_EXIT_WRITE_DR1 , 49, "Write DR1."),
170 EXIT_REASON(SVM_EXIT_WRITE_DR2 , 50, "Write DR2."),
171 EXIT_REASON(SVM_EXIT_WRITE_DR3 , 51, "Write DR3."),
172 EXIT_REASON(SVM_EXIT_WRITE_DR4 , 52, "Write DR4."),
173 EXIT_REASON(SVM_EXIT_WRITE_DR5 , 53, "Write DR5."),
174 EXIT_REASON(SVM_EXIT_WRITE_DR6 , 54, "Write DR6."),
175 EXIT_REASON(SVM_EXIT_WRITE_DR7 , 55, "Write DR7."),
176 EXIT_REASON(SVM_EXIT_WRITE_DR8 , 56, "Write DR8."),
177 EXIT_REASON(SVM_EXIT_WRITE_DR9 , 57, "Write DR9."),
178 EXIT_REASON(SVM_EXIT_WRITE_DR10 , 58, "Write DR10."),
179 EXIT_REASON(SVM_EXIT_WRITE_DR11 , 59, "Write DR11."),
180 EXIT_REASON(SVM_EXIT_WRITE_DR12 , 60, "Write DR12."),
181 EXIT_REASON(SVM_EXIT_WRITE_DR13 , 61, "Write DR13."),
182 EXIT_REASON(SVM_EXIT_WRITE_DR14 , 62, "Write DR14."),
183 EXIT_REASON(SVM_EXIT_WRITE_DR15 , 63, "Write DR15."),
184 EXIT_REASON(SVM_EXIT_EXCEPTION_0 , 64, "Exception Vector 0 (0x0)."),
185 EXIT_REASON(SVM_EXIT_EXCEPTION_1 , 65, "Exception Vector 1 (0x1)."),
186 EXIT_REASON(SVM_EXIT_EXCEPTION_2 , 66, "Exception Vector 2 (0x2)."),
187 EXIT_REASON(SVM_EXIT_EXCEPTION_3 , 67, "Exception Vector 3 (0x3)."),
188 EXIT_REASON(SVM_EXIT_EXCEPTION_4 , 68, "Exception Vector 4 (0x4)."),
189 EXIT_REASON(SVM_EXIT_EXCEPTION_5 , 69, "Exception Vector 5 (0x5)."),
190 EXIT_REASON(SVM_EXIT_EXCEPTION_6 , 70, "Exception Vector 6 (0x6)."),
191 EXIT_REASON(SVM_EXIT_EXCEPTION_7 , 71, "Exception Vector 7 (0x7)."),
192 EXIT_REASON(SVM_EXIT_EXCEPTION_8 , 72, "Exception Vector 8 (0x8)."),
193 EXIT_REASON(SVM_EXIT_EXCEPTION_9 , 73, "Exception Vector 9 (0x9)."),
194 EXIT_REASON(SVM_EXIT_EXCEPTION_A , 74, "Exception Vector 10 (0xA)."),
195 EXIT_REASON(SVM_EXIT_EXCEPTION_B , 75, "Exception Vector 11 (0xB)."),
196 EXIT_REASON(SVM_EXIT_EXCEPTION_C , 76, "Exception Vector 12 (0xC)."),
197 EXIT_REASON(SVM_EXIT_EXCEPTION_D , 77, "Exception Vector 13 (0xD)."),
198 EXIT_REASON(SVM_EXIT_EXCEPTION_E , 78, "Exception Vector 14 (0xE)."),
199 EXIT_REASON(SVM_EXIT_EXCEPTION_F , 79, "Exception Vector 15 (0xF)."),
200 EXIT_REASON(SVM_EXIT_EXCEPTION_10 , 80, "Exception Vector 16 (0x10)."),
201 EXIT_REASON(SVM_EXIT_EXCEPTION_11 , 81, "Exception Vector 17 (0x11)."),
202 EXIT_REASON(SVM_EXIT_EXCEPTION_12 , 82, "Exception Vector 18 (0x12)."),
203 EXIT_REASON(SVM_EXIT_EXCEPTION_13 , 83, "Exception Vector 19 (0x13)."),
204 EXIT_REASON(SVM_EXIT_EXCEPTION_14 , 84, "Exception Vector 20 (0x14)."),
205 EXIT_REASON(SVM_EXIT_EXCEPTION_15 , 85, "Exception Vector 22 (0x15)."),
206 EXIT_REASON(SVM_EXIT_EXCEPTION_16 , 86, "Exception Vector 22 (0x16)."),
207 EXIT_REASON(SVM_EXIT_EXCEPTION_17 , 87, "Exception Vector 23 (0x17)."),
208 EXIT_REASON(SVM_EXIT_EXCEPTION_18 , 88, "Exception Vector 24 (0x18)."),
209 EXIT_REASON(SVM_EXIT_EXCEPTION_19 , 89, "Exception Vector 25 (0x19)."),
210 EXIT_REASON(SVM_EXIT_EXCEPTION_1A , 90, "Exception Vector 26 (0x1A)."),
211 EXIT_REASON(SVM_EXIT_EXCEPTION_1B , 91, "Exception Vector 27 (0x1B)."),
212 EXIT_REASON(SVM_EXIT_EXCEPTION_1C , 92, "Exception Vector 28 (0x1C)."),
213 EXIT_REASON(SVM_EXIT_EXCEPTION_1D , 93, "Exception Vector 29 (0x1D)."),
214 EXIT_REASON(SVM_EXIT_EXCEPTION_1E , 94, "Exception Vector 30 (0x1E)."),
215 EXIT_REASON(SVM_EXIT_EXCEPTION_1F , 95, "Exception Vector 31 (0x1F)."),
216 EXIT_REASON(SVM_EXIT_EXCEPTION_INTR , 96, "Physical maskable interrupt."),
217 EXIT_REASON(SVM_EXIT_EXCEPTION_NMI , 97, "Physical non-maskable interrupt."),
218 EXIT_REASON(SVM_EXIT_EXCEPTION_SMI , 98, "System management interrupt."),
219 EXIT_REASON(SVM_EXIT_EXCEPTION_INIT , 99, "Physical INIT signal."),
220 EXIT_REASON(SVM_EXIT_EXCEPTION_VINTR ,100, "Visual interrupt."),
221 EXIT_REASON(SVM_EXIT_EXCEPTION_CR0_SEL_WRITE ,101, "Write to CR0 that changed any bits other than CR0.TS or CR0.MP."),
222 EXIT_REASON(SVM_EXIT_EXCEPTION_IDTR_READ ,102, "Read IDTR"),
223 EXIT_REASON(SVM_EXIT_EXCEPTION_GDTR_READ ,103, "Read GDTR"),
224 EXIT_REASON(SVM_EXIT_EXCEPTION_LDTR_READ ,104, "Read LDTR."),
225 EXIT_REASON(SVM_EXIT_EXCEPTION_TR_READ ,105, "Read TR."),
226 EXIT_REASON(SVM_EXIT_EXCEPTION_TR_READ ,106, "Write IDTR."),
227 EXIT_REASON(SVM_EXIT_EXCEPTION_TR_READ ,107, "Write GDTR."),
228 EXIT_REASON(SVM_EXIT_EXCEPTION_TR_READ ,108, "Write LDTR."),
229 EXIT_REASON(SVM_EXIT_EXCEPTION_TR_READ ,109, "Write TR."),
230 EXIT_REASON(SVM_EXIT_RDTSC ,110, "RDTSC instruction."),
231 EXIT_REASON(SVM_EXIT_RDPMC ,111, "RDPMC instruction."),
232 EXIT_REASON(SVM_EXIT_PUSHF ,112, "PUSHF instruction."),
233 EXIT_REASON(SVM_EXIT_POPF ,113, "POPF instruction."),
234 EXIT_REASON(SVM_EXIT_CPUID ,114, "CPUID instruction."),
235 EXIT_REASON(SVM_EXIT_RSM ,115, "RSM instruction."),
236 EXIT_REASON(SVM_EXIT_IRET ,116, "IRET instruction."),
237 EXIT_REASON(SVM_EXIT_SWINT ,117, "Software interrupt (INTn instructions)."),
238 EXIT_REASON(SVM_EXIT_INVD ,118, "INVD instruction."),
239 EXIT_REASON(SVM_EXIT_PAUSE ,119, "PAUSE instruction."),
240 EXIT_REASON(SVM_EXIT_HLT ,120, "HLT instruction."),
241 EXIT_REASON(SVM_EXIT_INVLPG ,121, "INVLPG instruction."),
242 EXIT_REASON(SVM_EXIT_INVLPGA ,122, "INVLPGA instruction."),
243 EXIT_REASON(SVM_EXIT_IOIO ,123, "IN/OUT accessing protected port (EXITINFO1 field provides more information)."),
244 EXIT_REASON(SVM_EXIT_MSR ,124, "RDMSR or WRMSR access to protected MSR."),
245 EXIT_REASON(SVM_EXIT_TASK_SWITCH ,125, "Task switch."),
246 EXIT_REASON(SVM_EXIT_FERR_FREEZE ,126, "FP legacy handling enabled, and processor is frozen in an x87/mmx instruction waiting for an interrupt"),
247 EXIT_REASON(SVM_EXIT_TASK_SHUTDOWN ,127, "Shutdown."),
248 EXIT_REASON(SVM_EXIT_TASK_VMRUN ,128, "VMRUN instruction."),
249 EXIT_REASON(SVM_EXIT_TASK_VMCALL ,129, "VMCALL instruction."),
250 EXIT_REASON(SVM_EXIT_TASK_VMLOAD ,130, "VMLOAD instruction."),
251 EXIT_REASON(SVM_EXIT_TASK_VMSAVE ,131, "VMSAVE instruction."),
252 EXIT_REASON(SVM_EXIT_TASK_STGI ,132, "STGI instruction."),
253 EXIT_REASON(SVM_EXIT_TASK_CLGI ,133, "CLGI instruction."),
254 EXIT_REASON(SVM_EXIT_TASK_SKINIT ,134, "SKINIT instruction."),
255 EXIT_REASON(SVM_EXIT_TASK_RDTSCP ,135, "RDTSCP instruction."),
256 EXIT_REASON(SVM_EXIT_TASK_ICEBP ,136, "ICEBP instruction."),
257 EXIT_REASON(SVM_EXIT_TASK_WBINVD ,137, "WBINVD instruction."),
258 EXIT_REASON(SVM_EXIT_TASK_MONITOR ,138, "MONITOR instruction."),
259 EXIT_REASON(SVM_EXIT_MWAIT_UNCOND ,139, "MWAIT instruction unconditional."),
260 EXIT_REASON(SVM_EXIT_MWAIT_ARMED ,140, "MWAIT instruction when armed."),
261 EXIT_REASON(SVM_EXIT_NPF ,1024, "Nested paging: host-level page fault occurred (EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault)."),
262 EXIT_REASON_NIL()
263};
264# undef EXIT_REASON
265# undef EXIT_REASON_NIL
266#endif /* VBOX_WITH_STATISTICS */
267
268/*******************************************************************************
269* Internal Functions *
270*******************************************************************************/
271static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM);
272static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
273
274
275/**
276 * Initializes the HWACCM.
277 *
278 * @returns VBox status code.
279 * @param pVM The VM to operate on.
280 */
281VMMR3DECL(int) HWACCMR3Init(PVM pVM)
282{
283 LogFlow(("HWACCMR3Init\n"));
284
285 /*
286 * Assert alignment and sizes.
287 */
288 AssertCompileMemberAlignment(VM, hwaccm.s, 32);
289 AssertCompile(sizeof(pVM->hwaccm.s) <= sizeof(pVM->hwaccm.padding));
290
291 /* Some structure checks. */
292 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, u8Reserved3) == 0xC0, ("u8Reserved3 offset = %x\n", RT_OFFSETOF(SVM_VMCB, u8Reserved3)));
293 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.EventInject) == 0xA8, ("ctrl.EventInject offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.EventInject)));
294 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo) == 0x88, ("ctrl.ExitIntInfo offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo)));
295 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl) == 0x58, ("ctrl.TLBCtrl offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl)));
296
297 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest) == 0x400, ("guest offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest)));
298 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved4) == 0x4A0, ("guest.u8Reserved4 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved4)));
299 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved6) == 0x4D8, ("guest.u8Reserved6 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved6)));
300 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved7) == 0x580, ("guest.u8Reserved7 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved7)));
301 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved9) == 0x648, ("guest.u8Reserved9 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved9)));
302 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64GPAT) == 0x668, ("guest.u64GPAT offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64GPAT)));
303 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, u8Reserved10) == 0x698, ("u8Reserved3 offset = %x\n", RT_OFFSETOF(SVM_VMCB, u8Reserved10)));
304 AssertReleaseMsg(sizeof(SVM_VMCB) == 0x1000, ("SVM_VMCB size = %x\n", sizeof(SVM_VMCB)));
305
306
307 /*
308 * Register the saved state data unit.
309 */
310 int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HWACCM_SSM_VERSION, sizeof(HWACCM),
311 NULL, NULL, NULL,
312 NULL, hwaccmR3Save, NULL,
313 NULL, hwaccmR3Load, NULL);
314 if (RT_FAILURE(rc))
315 return rc;
316
317 /* Misc initialisation. */
318 pVM->hwaccm.s.vmx.fSupported = false;
319 pVM->hwaccm.s.svm.fSupported = false;
320 pVM->hwaccm.s.vmx.fEnabled = false;
321 pVM->hwaccm.s.svm.fEnabled = false;
322
323 pVM->hwaccm.s.fNestedPaging = false;
324 pVM->hwaccm.s.fLargePages = false;
325
326 /* Disabled by default. */
327 pVM->fHWACCMEnabled = false;
328
329 /*
330 * Check CFGM options.
331 */
332 PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
333 PCFGMNODE pHWVirtExt = CFGMR3GetChild(pRoot, "HWVirtExt/");
334 /* Nested paging: disabled by default. */
335 rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableNestedPaging", &pVM->hwaccm.s.fAllowNestedPaging, false);
336 AssertRC(rc);
337
338 /* Large pages: disabled by default. */
339 rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableLargePages", &pVM->hwaccm.s.fLargePages, false);
340 AssertRC(rc);
341
342 /* VT-x VPID: disabled by default. */
343 rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableVPID", &pVM->hwaccm.s.vmx.fAllowVPID, false);
344 AssertRC(rc);
345
346 /* HWACCM support must be explicitely enabled in the configuration file. */
347 rc = CFGMR3QueryBoolDef(pHWVirtExt, "Enabled", &pVM->hwaccm.s.fAllowed, false);
348 AssertRC(rc);
349
350 /* TPR patching for 32 bits (Windows) guests with IO-APIC: disabled by default. */
351 rc = CFGMR3QueryBoolDef(pHWVirtExt, "TPRPatchingEnabled", &pVM->hwaccm.s.fTRPPatchingAllowed, false);
352 AssertRC(rc);
353
354#ifdef RT_OS_DARWIN
355 if (VMMIsHwVirtExtForced(pVM) != pVM->hwaccm.s.fAllowed)
356#else
357 if (VMMIsHwVirtExtForced(pVM) && !pVM->hwaccm.s.fAllowed)
358#endif
359 {
360 AssertLogRelMsgFailed(("VMMIsHwVirtExtForced=%RTbool fAllowed=%RTbool\n",
361 VMMIsHwVirtExtForced(pVM), pVM->hwaccm.s.fAllowed));
362 return VERR_HWACCM_CONFIG_MISMATCH;
363 }
364
365 if (VMMIsHwVirtExtForced(pVM))
366 pVM->fHWACCMEnabled = true;
367
368#if HC_ARCH_BITS == 32
369 /* 64-bit mode is configurable and it depends on both the kernel mode and VT-x.
370 * (To use the default, don't set 64bitEnabled in CFGM.) */
371 rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hwaccm.s.fAllow64BitGuests, false);
372 AssertLogRelRCReturn(rc, rc);
373 if (pVM->hwaccm.s.fAllow64BitGuests)
374 {
375# ifdef RT_OS_DARWIN
376 if (!VMMIsHwVirtExtForced(pVM))
377# else
378 if (!pVM->hwaccm.s.fAllowed)
379# endif
380 return VM_SET_ERROR(pVM, VERR_INVALID_PARAMETER, "64-bit guest support was requested without also enabling HWVirtEx (VT-x/AMD-V).");
381 }
382#else
383 /* On 64-bit hosts 64-bit guest support is enabled by default, but allow this to be overridden
384 * via VBoxInternal/HWVirtExt/64bitEnabled=0. (ConsoleImpl2.cpp doesn't set this to false for 64-bit.) */
385 rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hwaccm.s.fAllow64BitGuests, true);
386 AssertLogRelRCReturn(rc, rc);
387#endif
388
389
390 /** Determine the init method for AMD-V and VT-x; either one global init for each host CPU
391 * or local init each time we wish to execute guest code.
392 *
393 * Default false for Mac OS X and Windows due to the higher risk of conflicts with other hypervisors.
394 */
395 rc = CFGMR3QueryBoolDef(pHWVirtExt, "Exclusive", &pVM->hwaccm.s.fGlobalInit,
396#if defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS)
397 false
398#else
399 true
400#endif
401 );
402
403 /* Max number of resume loops. */
404 rc = CFGMR3QueryU32Def(pHWVirtExt, "MaxResumeLoops", &pVM->hwaccm.s.cMaxResumeLoops, 0 /* set by R0 later */);
405 AssertRC(rc);
406
407 return VINF_SUCCESS;
408}
409
410/**
411 * Initializes the per-VCPU HWACCM.
412 *
413 * @returns VBox status code.
414 * @param pVM The VM to operate on.
415 */
416VMMR3DECL(int) HWACCMR3InitCPU(PVM pVM)
417{
418 LogFlow(("HWACCMR3InitCPU\n"));
419
420 for (VMCPUID i = 0; i < pVM->cCpus; i++)
421 {
422 PVMCPU pVCpu = &pVM->aCpus[i];
423
424 pVCpu->hwaccm.s.fActive = false;
425 }
426
427#ifdef VBOX_WITH_STATISTICS
428 STAM_REG(pVM, &pVM->hwaccm.s.StatTPRPatchSuccess, STAMTYPE_COUNTER, "/HWACCM/TPR/Patch/Success", STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
429 STAM_REG(pVM, &pVM->hwaccm.s.StatTPRPatchFailure, STAMTYPE_COUNTER, "/HWACCM/TPR/Patch/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
430 STAM_REG(pVM, &pVM->hwaccm.s.StatTPRReplaceSuccess, STAMTYPE_COUNTER, "/HWACCM/TPR/Replace/Success",STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
431 STAM_REG(pVM, &pVM->hwaccm.s.StatTPRReplaceFailure, STAMTYPE_COUNTER, "/HWACCM/TPR/Replace/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
432
433 /*
434 * Statistics.
435 */
436 for (VMCPUID i = 0; i < pVM->cCpus; i++)
437 {
438 PVMCPU pVCpu = &pVM->aCpus[i];
439 int rc;
440
441 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of RTMpPokeCpu",
442 "/PROF/HWACCM/CPU%d/Poke", i);
443 AssertRC(rc);
444 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatSpinPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of poke wait",
445 "/PROF/HWACCM/CPU%d/PokeWait", i);
446 AssertRC(rc);
447 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatSpinPokeFailed, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of poke wait when RTMpPokeCpu fails",
448 "/PROF/HWACCM/CPU%d/PokeWaitFailed", i);
449 AssertRC(rc);
450 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatEntry, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode entry",
451 "/PROF/HWACCM/CPU%d/SwitchToGC", i);
452 AssertRC(rc);
453 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 1",
454 "/PROF/HWACCM/CPU%d/SwitchFromGC_1", i);
455 AssertRC(rc);
456 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 2",
457 "/PROF/HWACCM/CPU%d/SwitchFromGC_2", i);
458 AssertRC(rc);
459# if 1 /* temporary for tracking down darwin holdup. */
460 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - I/O",
461 "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub1", i);
462 AssertRC(rc);
463 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - CRx RWs",
464 "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub2", i);
465 AssertRC(rc);
466 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - Exceptions",
467 "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub3", i);
468 AssertRC(rc);
469# endif
470 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatInGC, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of vmlaunch",
471 "/PROF/HWACCM/CPU%d/InGC", i);
472 AssertRC(rc);
473
474# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
475 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatWorldSwitch3264, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of the 32/64 switcher",
476 "/PROF/HWACCM/CPU%d/Switcher3264", i);
477 AssertRC(rc);
478# endif
479
480# define HWACCM_REG_COUNTER(a, b) \
481 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Profiling of vmlaunch", b, i); \
482 AssertRC(rc);
483
484 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowNM, "/HWACCM/CPU%d/Exit/Trap/Shw/#NM");
485 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestNM, "/HWACCM/CPU%d/Exit/Trap/Gst/#NM");
486 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowPF, "/HWACCM/CPU%d/Exit/Trap/Shw/#PF");
487 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestPF, "/HWACCM/CPU%d/Exit/Trap/Gst/#PF");
488 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestUD, "/HWACCM/CPU%d/Exit/Trap/Gst/#UD");
489 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestSS, "/HWACCM/CPU%d/Exit/Trap/Gst/#SS");
490 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestNP, "/HWACCM/CPU%d/Exit/Trap/Gst/#NP");
491 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestGP, "/HWACCM/CPU%d/Exit/Trap/Gst/#GP");
492 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestMF, "/HWACCM/CPU%d/Exit/Trap/Gst/#MF");
493 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestDE, "/HWACCM/CPU%d/Exit/Trap/Gst/#DE");
494 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestDB, "/HWACCM/CPU%d/Exit/Trap/Gst/#DB");
495 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInvpg, "/HWACCM/CPU%d/Exit/Instr/Invlpg");
496 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInvd, "/HWACCM/CPU%d/Exit/Instr/Invd");
497 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCpuid, "/HWACCM/CPU%d/Exit/Instr/Cpuid");
498 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdtsc, "/HWACCM/CPU%d/Exit/Instr/Rdtsc");
499 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdpmc, "/HWACCM/CPU%d/Exit/Instr/Rdpmc");
500 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdmsr, "/HWACCM/CPU%d/Exit/Instr/Rdmsr");
501 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitWrmsr, "/HWACCM/CPU%d/Exit/Instr/Wrmsr");
502 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMwait, "/HWACCM/CPU%d/Exit/Instr/Mwait");
503 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMonitor, "/HWACCM/CPU%d/Exit/Instr/Monitor");
504 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitDRxWrite, "/HWACCM/CPU%d/Exit/Instr/DR/Write");
505 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitDRxRead, "/HWACCM/CPU%d/Exit/Instr/DR/Read");
506 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCLTS, "/HWACCM/CPU%d/Exit/Instr/CLTS");
507 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitLMSW, "/HWACCM/CPU%d/Exit/Instr/LMSW");
508 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCli, "/HWACCM/CPU%d/Exit/Instr/Cli");
509 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitSti, "/HWACCM/CPU%d/Exit/Instr/Sti");
510 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPushf, "/HWACCM/CPU%d/Exit/Instr/Pushf");
511 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPopf, "/HWACCM/CPU%d/Exit/Instr/Popf");
512 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIret, "/HWACCM/CPU%d/Exit/Instr/Iret");
513 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInt, "/HWACCM/CPU%d/Exit/Instr/Int");
514 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitHlt, "/HWACCM/CPU%d/Exit/Instr/Hlt");
515 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOWrite, "/HWACCM/CPU%d/Exit/IO/Write");
516 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIORead, "/HWACCM/CPU%d/Exit/IO/Read");
517 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOStringWrite, "/HWACCM/CPU%d/Exit/IO/WriteString");
518 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOStringRead, "/HWACCM/CPU%d/Exit/IO/ReadString");
519 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIrqWindow, "/HWACCM/CPU%d/Exit/IrqWindow");
520 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMaxResume, "/HWACCM/CPU%d/Exit/MaxResume");
521 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPreemptPending, "/HWACCM/CPU%d/Exit/PreemptPending");
522
523 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchGuestIrq, "/HWACCM/CPU%d/Switch/IrqPending");
524 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchToR3, "/HWACCM/CPU%d/Switch/ToR3");
525
526 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatIntInject, "/HWACCM/CPU%d/Irq/Inject");
527 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatIntReinject, "/HWACCM/CPU%d/Irq/Reinject");
528 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatPendingHostIrq, "/HWACCM/CPU%d/Irq/PendingOnHost");
529
530 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPage, "/HWACCM/CPU%d/Flush/Page");
531 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPageManual, "/HWACCM/CPU%d/Flush/Page/Virt");
532 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPhysPageManual, "/HWACCM/CPU%d/Flush/Page/Phys");
533 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLB, "/HWACCM/CPU%d/Flush/TLB");
534 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBManual, "/HWACCM/CPU%d/Flush/TLB/Manual");
535 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBCRxChange, "/HWACCM/CPU%d/Flush/TLB/CRx");
536 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPageInvlpg, "/HWACCM/CPU%d/Flush/Page/Invlpg");
537 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch, "/HWACCM/CPU%d/Flush/TLB/Switch");
538 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch, "/HWACCM/CPU%d/Flush/TLB/Skipped");
539 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushASID, "/HWACCM/CPU%d/Flush/TLB/ASID");
540 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBInvlpga, "/HWACCM/CPU%d/Flush/TLB/PhysInvl");
541 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdown, "/HWACCM/CPU%d/Flush/Shootdown/Page");
542 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdownFlush, "/HWACCM/CPU%d/Flush/Shootdown/TLB");
543
544 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCOffset, "/HWACCM/CPU%d/TSC/Offset");
545 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCIntercept, "/HWACCM/CPU%d/TSC/Intercept");
546 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCInterceptOverFlow, "/HWACCM/CPU%d/TSC/InterceptOverflow");
547
548 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxArmed, "/HWACCM/CPU%d/Debug/Armed");
549 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxContextSwitch, "/HWACCM/CPU%d/Debug/ContextSwitch");
550 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxIOCheck, "/HWACCM/CPU%d/Debug/IOCheck");
551
552 for (unsigned j=0;j<RT_ELEMENTS(pVCpu->hwaccm.s.StatExitCRxWrite);j++)
553 {
554 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitCRxWrite[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx writes",
555 "/HWACCM/CPU%d/Exit/Instr/CR/Write/%x", i, j);
556 AssertRC(rc);
557 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitCRxRead[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx reads",
558 "/HWACCM/CPU%d/Exit/Instr/CR/Read/%x", i, j);
559 AssertRC(rc);
560 }
561
562#undef HWACCM_REG_COUNTER
563
564 pVCpu->hwaccm.s.paStatExitReason = NULL;
565
566 rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT*sizeof(*pVCpu->hwaccm.s.paStatExitReason), 0, MM_TAG_HWACCM, (void **)&pVCpu->hwaccm.s.paStatExitReason);
567 AssertRC(rc);
568 if (RT_SUCCESS(rc))
569 {
570 const char * const *papszDesc = ASMIsIntelCpu() ? &g_apszVTxExitReasons[0] : &g_apszAmdVExitReasons[0];
571 for (int j=0;j<MAX_EXITREASON_STAT;j++)
572 {
573 if (papszDesc[j])
574 {
575 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
576 papszDesc[j], "/HWACCM/CPU%d/Exit/Reason/%02x", i, j);
577 AssertRC(rc);
578 }
579 }
580 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitReasonNPF, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Nested page fault", "/HWACCM/CPU%d/Exit/Reason/#NPF", i);
581 AssertRC(rc);
582 }
583 pVCpu->hwaccm.s.paStatExitReasonR0 = MMHyperR3ToR0(pVM, pVCpu->hwaccm.s.paStatExitReason);
584# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
585 Assert(pVCpu->hwaccm.s.paStatExitReasonR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
586# else
587 Assert(pVCpu->hwaccm.s.paStatExitReasonR0 != NIL_RTR0PTR);
588# endif
589
590 rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * 256, 8, MM_TAG_HWACCM, (void **)&pVCpu->hwaccm.s.paStatInjectedIrqs);
591 AssertRCReturn(rc, rc);
592 pVCpu->hwaccm.s.paStatInjectedIrqsR0 = MMHyperR3ToR0(pVM, pVCpu->hwaccm.s.paStatInjectedIrqs);
593# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
594 Assert(pVCpu->hwaccm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
595# else
596 Assert(pVCpu->hwaccm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR);
597# endif
598 for (unsigned j = 0; j < 255; j++)
599 STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.paStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Forwarded interrupts.",
600 (j < 0x20) ? "/HWACCM/CPU%d/Interrupt/Trap/%02X" : "/HWACCM/CPU%d/Interrupt/IRQ/%02X", i, j);
601
602 }
603#endif /* VBOX_WITH_STATISTICS */
604
605#ifdef VBOX_WITH_CRASHDUMP_MAGIC
606 /* Magic marker for searching in crash dumps. */
607 for (VMCPUID i = 0; i < pVM->cCpus; i++)
608 {
609 PVMCPU pVCpu = &pVM->aCpus[i];
610
611 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
612 strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
613 pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
614 }
615#endif
616 return VINF_SUCCESS;
617}
618
619/**
620 * Turns off normal raw mode features
621 *
622 * @param pVM The VM to operate on.
623 */
624static void hwaccmR3DisableRawMode(PVM pVM)
625{
626 /* Disable PATM & CSAM. */
627 PATMR3AllowPatching(pVM, false);
628 CSAMDisableScanning(pVM);
629
630 /* Turn off IDT/LDT/GDT and TSS monitoring and sycing. */
631 SELMR3DisableMonitoring(pVM);
632 TRPMR3DisableMonitoring(pVM);
633
634 /* Disable the switcher code (safety precaution). */
635 VMMR3DisableSwitcher(pVM);
636
637 /* Disable mapping of the hypervisor into the shadow page table. */
638 PGMR3MappingsDisable(pVM);
639
640 /* Disable the switcher */
641 VMMR3DisableSwitcher(pVM);
642
643 /* Reinit the paging mode to force the new shadow mode. */
644 for (VMCPUID i = 0; i < pVM->cCpus; i++)
645 {
646 PVMCPU pVCpu = &pVM->aCpus[i];
647
648 PGMR3ChangeMode(pVM, pVCpu, PGMMODE_REAL);
649 }
650}
651
652/**
653 * Initialize VT-x or AMD-V.
654 *
655 * @returns VBox status code.
656 * @param pVM The VM handle.
657 */
658VMMR3DECL(int) HWACCMR3InitFinalizeR0(PVM pVM)
659{
660 int rc;
661
662 /* Hack to allow users to work around broken BIOSes that incorrectly set EFER.SVME, which makes us believe somebody else
663 * is already using AMD-V.
664 */
665 if ( !pVM->hwaccm.s.vmx.fSupported
666 && !pVM->hwaccm.s.svm.fSupported
667 && pVM->hwaccm.s.lLastError == VERR_SVM_IN_USE /* implies functional AMD-V */
668 && RTEnvExist("VBOX_HWVIRTEX_IGNORE_SVM_IN_USE"))
669 {
670 LogRel(("HWACCM: VBOX_HWVIRTEX_IGNORE_SVM_IN_USE active!\n"));
671 pVM->hwaccm.s.svm.fSupported = true;
672 pVM->hwaccm.s.svm.fIgnoreInUseError = true;
673 }
674 else
675 if ( !pVM->hwaccm.s.vmx.fSupported
676 && !pVM->hwaccm.s.svm.fSupported)
677 {
678 LogRel(("HWACCM: No VT-x or AMD-V CPU extension found. Reason %Rrc\n", pVM->hwaccm.s.lLastError));
679 LogRel(("HWACCM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
680
681 if (VMMIsHwVirtExtForced(pVM))
682 {
683 switch (pVM->hwaccm.s.lLastError)
684 {
685 case VERR_VMX_NO_VMX:
686 return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is not available.");
687 case VERR_VMX_IN_VMX_ROOT_MODE:
688 return VM_SET_ERROR(pVM, VERR_VMX_IN_VMX_ROOT_MODE, "VT-x is being used by another hypervisor.");
689 case VERR_SVM_IN_USE:
690 return VM_SET_ERROR(pVM, VERR_SVM_IN_USE, "AMD-V is being used by another hypervisor.");
691 case VERR_SVM_NO_SVM:
692 return VM_SET_ERROR(pVM, VERR_SVM_NO_SVM, "AMD-V is not available.");
693 case VERR_SVM_DISABLED:
694 return VM_SET_ERROR(pVM, VERR_SVM_DISABLED, "AMD-V is disabled in the BIOS.");
695 default:
696 return pVM->hwaccm.s.lLastError;
697 }
698 }
699 return VINF_SUCCESS;
700 }
701
702 if (pVM->hwaccm.s.vmx.fSupported)
703 {
704 rc = SUPR3QueryVTxSupported();
705 if (RT_FAILURE(rc))
706 {
707#ifdef RT_OS_LINUX
708 LogRel(("HWACCM: The host kernel does not support VT-x -- Linux 2.6.13 or newer required!\n"));
709#else
710 LogRel(("HWACCM: The host kernel does not support VT-x!\n"));
711#endif
712 if ( pVM->cCpus > 1
713 || VMMIsHwVirtExtForced(pVM))
714 return rc;
715
716 /* silently fall back to raw mode */
717 return VINF_SUCCESS;
718 }
719 }
720
721 if (!pVM->hwaccm.s.fAllowed)
722 return VINF_SUCCESS; /* nothing to do */
723
724 /* Enable VT-x or AMD-V on all host CPUs. */
725 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_ENABLE, 0, NULL);
726 if (RT_FAILURE(rc))
727 {
728 LogRel(("HWACCMR3InitFinalize: SUPR3CallVMMR0Ex VMMR0_DO_HWACC_ENABLE failed with %Rrc\n", rc));
729 return rc;
730 }
731 Assert(!pVM->fHWACCMEnabled || VMMIsHwVirtExtForced(pVM));
732
733 pVM->hwaccm.s.fHasIoApic = PDMHasIoApic(pVM);
734 /* No TPR patching is required when the IO-APIC is not enabled for this VM. (Main should have taken care of this already) */
735 if (!pVM->hwaccm.s.fHasIoApic)
736 {
737 Assert(!pVM->hwaccm.s.fTRPPatchingAllowed); /* paranoia */
738 pVM->hwaccm.s.fTRPPatchingAllowed = false;
739 }
740
741 if (pVM->hwaccm.s.vmx.fSupported)
742 {
743 Log(("pVM->hwaccm.s.vmx.fSupported = %d\n", pVM->hwaccm.s.vmx.fSupported));
744
745 if ( pVM->hwaccm.s.fInitialized == false
746 && pVM->hwaccm.s.vmx.msr.feature_ctrl != 0)
747 {
748 uint64_t val;
749 RTGCPHYS GCPhys = 0;
750
751 LogRel(("HWACCM: Host CR4=%08X\n", pVM->hwaccm.s.vmx.hostCR4));
752 LogRel(("HWACCM: MSR_IA32_FEATURE_CONTROL = %RX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
753 LogRel(("HWACCM: MSR_IA32_VMX_BASIC_INFO = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_basic_info));
754 LogRel(("HWACCM: VMCS id = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
755 LogRel(("HWACCM: VMCS size = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
756 LogRel(("HWACCM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hwaccm.s.vmx.msr.vmx_basic_info) ? "< 4 GB" : "None"));
757 LogRel(("HWACCM: VMCS memory type = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
758 LogRel(("HWACCM: Dual monitor treatment = %d\n", MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
759
760 LogRel(("HWACCM: MSR_IA32_VMX_PINBASED_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.u));
761 val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
762 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
763 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT\n"));
764 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
765 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT\n"));
766 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI)
767 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI\n"));
768 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
769 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER\n"));
770 val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
771 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
772 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT *must* be set\n"));
773 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
774 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT *must* be set\n"));
775 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI)
776 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI *must* be set\n"));
777 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
778 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER *must* be set\n"));
779
780 LogRel(("HWACCM: MSR_IA32_VMX_PROCBASED_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.u));
781 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1;
782 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
783 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT\n"));
784 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
785 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET\n"));
786 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
787 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT\n"));
788 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
789 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT\n"));
790 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
791 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT\n"));
792 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
793 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT\n"));
794 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
795 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT\n"));
796 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT)
797 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT\n"));
798 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT)
799 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT\n"));
800 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
801 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT\n"));
802 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
803 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT\n"));
804 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
805 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW\n"));
806 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT)
807 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT\n"));
808 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
809 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT\n"));
810 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
811 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT\n"));
812 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
813 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS\n"));
814 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
815 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG\n"));
816 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
817 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS\n"));
818 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
819 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT\n"));
820 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
821 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT\n"));
822 if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
823 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL\n"));
824
825 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
826 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
827 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT *must* be set\n"));
828 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
829 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET *must* be set\n"));
830 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
831 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT *must* be set\n"));
832 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
833 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT *must* be set\n"));
834 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
835 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT *must* be set\n"));
836 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
837 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT *must* be set\n"));
838 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
839 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT *must* be set\n"));
840 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT)
841 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT *must* be set\n"));
842 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT)
843 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT *must* be set\n"));
844 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
845 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT *must* be set\n"));
846 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
847 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT *must* be set\n"));
848 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
849 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW *must* be set\n"));
850 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT)
851 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT *must* be set\n"));
852 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
853 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT *must* be set\n"));
854 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
855 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT *must* be set\n"));
856 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
857 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS *must* be set\n"));
858 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
859 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG *must* be set\n"));
860 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
861 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS *must* be set\n"));
862 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
863 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT *must* be set\n"));
864 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
865 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT *must* be set\n"));
866 if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
867 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL *must* be set\n"));
868
869 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
870 {
871 LogRel(("HWACCM: MSR_IA32_VMX_PROCBASED_CTLS2 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.u));
872 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;
873 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
874 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC\n"));
875 if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
876 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_EPT\n"));
877 if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
878 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT\n"));
879 if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT)
880 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT\n"));
881 if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
882 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC\n"));
883 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
884 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VPID\n"));
885 if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
886 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT\n"));
887 if (val & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE)
888 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE\n"));
889 if (val & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT)
890 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT\n"));
891
892 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;
893 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
894 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC *must* be set\n"));
895 if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
896 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT *must* be set\n"));
897 if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT)
898 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT *must* be set\n"));
899 if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
900 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC *must* be set\n"));
901 if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
902 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_EPT *must* be set\n"));
903 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
904 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VPID *must* be set\n"));
905 if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
906 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT *must* be set\n"));
907 if (val & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE)
908 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE *must* be set\n"));
909 if (val & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT)
910 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT *must* be set\n"));
911 }
912
913 LogRel(("HWACCM: MSR_IA32_VMX_ENTRY_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_entry.u));
914 val = pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1;
915 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
916 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG\n"));
917 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
918 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE\n"));
919 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
920 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM\n"));
921 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
922 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON\n"));
923 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR)
924 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR\n"));
925 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR)
926 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR\n"));
927 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
928 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR\n"));
929 val = pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0;
930 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
931 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG *must* be set\n"));
932 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
933 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE *must* be set\n"));
934 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
935 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM *must* be set\n"));
936 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
937 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON *must* be set\n"));
938 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR)
939 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR *must* be set\n"));
940 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR)
941 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR *must* be set\n"));
942 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
943 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR *must* be set\n"));
944
945 LogRel(("HWACCM: MSR_IA32_VMX_EXIT_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_exit.u));
946 val = pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1;
947 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG)
948 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG\n"));
949 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
950 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64\n"));
951 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
952 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ\n"));
953 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR)
954 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR\n"));
955 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR)
956 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR\n"));
957 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR)
958 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR\n"));
959 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
960 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR\n"));
961 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
962 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER\n"));
963 val = pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0;
964 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG)
965 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG *must* be set\n"));
966 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
967 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64 *must* be set\n"));
968 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
969 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ *must* be set\n"));
970 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR)
971 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR *must* be set\n"));
972 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR)
973 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR *must* be set\n"));
974 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR)
975 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR *must* be set\n"));
976 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
977 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR *must* be set\n"));
978 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
979 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER *must* be set\n"));
980
981 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps)
982 {
983 LogRel(("HWACCM: MSR_IA32_VMX_EPT_VPID_CAPS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_eptcaps));
984
985 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY)
986 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY\n"));
987 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY)
988 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY\n"));
989 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY)
990 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY\n"));
991 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS)
992 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS\n"));
993 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS)
994 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS\n"));
995 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS)
996 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS\n"));
997 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS)
998 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS\n"));
999 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS)
1000 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS\n"));
1001 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_UC)
1002 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_UC\n"));
1003 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WC)
1004 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WC\n"));
1005 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WT)
1006 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WT\n"));
1007 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WP)
1008 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WP\n"));
1009 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WB)
1010 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WB\n"));
1011 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_21_BITS)
1012 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_21_BITS\n"));
1013 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_30_BITS)
1014 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_30_BITS\n"));
1015 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_39_BITS)
1016 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_39_BITS\n"));
1017 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_48_BITS)
1018 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_48_BITS\n"));
1019 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT)
1020 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT\n"));
1021 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_INDIV)
1022 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_INDIV\n"));
1023 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_CONTEXT)
1024 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_CONTEXT\n"));
1025 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL)
1026 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL\n"));
1027 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID)
1028 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID\n"));
1029 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV)
1030 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV\n"));
1031 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT)
1032 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT\n"));
1033 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL)
1034 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL\n"));
1035 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT_GLOBAL)
1036 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT_GLOBAL\n"));
1037 }
1038
1039 LogRel(("HWACCM: MSR_IA32_VMX_MISC = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_misc));
1040 LogRel(("HWACCM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc)));
1041 LogRel(("HWACCM: MSR_IA32_VMX_MISC_ACTIVITY_STATES %x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hwaccm.s.vmx.msr.vmx_misc)));
1042 LogRel(("HWACCM: MSR_IA32_VMX_MISC_CR3_TARGET %x\n", MSR_IA32_VMX_MISC_CR3_TARGET(pVM->hwaccm.s.vmx.msr.vmx_misc)));
1043 LogRel(("HWACCM: MSR_IA32_VMX_MISC_MAX_MSR %x\n", MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc)));
1044 LogRel(("HWACCM: MSR_IA32_VMX_MISC_MSEG_ID %x\n", MSR_IA32_VMX_MISC_MSEG_ID(pVM->hwaccm.s.vmx.msr.vmx_misc)));
1045
1046 LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED0 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0));
1047 LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED1 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1));
1048 LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED0 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0));
1049 LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED1 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1));
1050 LogRel(("HWACCM: MSR_IA32_VMX_VMCS_ENUM = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum));
1051
1052 LogRel(("HWACCM: TPR shadow physaddr = %RHp\n", pVM->hwaccm.s.vmx.pAPICPhys));
1053
1054 /* Paranoia */
1055 AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc) >= 512);
1056
1057 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1058 {
1059 LogRel(("HWACCM: VCPU%d: MSR bitmap physaddr = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pMSRBitmapPhys));
1060 LogRel(("HWACCM: VCPU%d: VMCS physaddr = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys));
1061 }
1062
1063#ifdef HWACCM_VTX_WITH_EPT
1064 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
1065 pVM->hwaccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging;
1066#endif /* HWACCM_VTX_WITH_EPT */
1067#ifdef HWACCM_VTX_WITH_VPID
1068 if ( (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
1069 && !pVM->hwaccm.s.fNestedPaging) /* VPID and EPT are mutually exclusive. */
1070 pVM->hwaccm.s.vmx.fVPID = pVM->hwaccm.s.vmx.fAllowVPID;
1071#endif /* HWACCM_VTX_WITH_VPID */
1072
1073 /* Unrestricted guest execution relies on EPT. */
1074 if ( pVM->hwaccm.s.fNestedPaging
1075 && (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE))
1076 {
1077 pVM->hwaccm.s.vmx.fUnrestrictedGuest = true;
1078 }
1079
1080 /* Only try once. */
1081 pVM->hwaccm.s.fInitialized = true;
1082
1083 if (!pVM->hwaccm.s.vmx.fUnrestrictedGuest)
1084 {
1085 /* Allocate three pages for the TSS we need for real mode emulation. (2 pages for the IO bitmap) */
1086 rc = PDMR3VMMDevHeapAlloc(pVM, HWACCM_VTX_TOTAL_DEVHEAP_MEM, (RTR3PTR *)&pVM->hwaccm.s.vmx.pRealModeTSS);
1087 if (RT_SUCCESS(rc))
1088 {
1089 /* The I/O bitmap starts right after the virtual interrupt redirection bitmap. */
1090 ASMMemZero32(pVM->hwaccm.s.vmx.pRealModeTSS, sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS));
1091 pVM->hwaccm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS);
1092 /* Bit set to 0 means redirection enabled. */
1093 memset(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap, 0x0, sizeof(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap));
1094 /* Allow all port IO, so the VT-x IO intercepts do their job. */
1095 memset(pVM->hwaccm.s.vmx.pRealModeTSS + 1, 0, PAGE_SIZE*2);
1096 *((unsigned char *)pVM->hwaccm.s.vmx.pRealModeTSS + HWACCM_VTX_TSS_SIZE - 2) = 0xff;
1097
1098 /* Construct a 1024 element page directory with 4 MB pages for the identity mapped page table used in
1099 * real and protected mode without paging with EPT.
1100 */
1101 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hwaccm.s.vmx.pRealModeTSS + PAGE_SIZE * 3);
1102 for (unsigned i=0;i<X86_PG_ENTRIES;i++)
1103 {
1104 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable->a[i].u = _4M * i;
1105 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_G;
1106 }
1107
1108 /* We convert it here every time as pci regions could be reconfigured. */
1109 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pRealModeTSS, &GCPhys);
1110 AssertRC(rc);
1111 LogRel(("HWACCM: Real Mode TSS guest physaddr = %RGp\n", GCPhys));
1112
1113 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
1114 AssertRC(rc);
1115 LogRel(("HWACCM: Non-Paging Mode EPT CR3 = %RGp\n", GCPhys));
1116 }
1117 else
1118 {
1119 LogRel(("HWACCM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc));
1120 pVM->hwaccm.s.vmx.pRealModeTSS = NULL;
1121 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable = NULL;
1122 }
1123 }
1124
1125 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
1126 AssertRC(rc);
1127 if (rc == VINF_SUCCESS)
1128 {
1129 pVM->fHWACCMEnabled = true;
1130 pVM->hwaccm.s.vmx.fEnabled = true;
1131 hwaccmR3DisableRawMode(pVM);
1132
1133 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1134#ifdef VBOX_ENABLE_64_BITS_GUESTS
1135 if (pVM->hwaccm.s.fAllow64BitGuests)
1136 {
1137 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1138 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1139 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* 64 bits only on Intel CPUs */
1140 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1141 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
1142 }
1143 else
1144 /* Turn on NXE if PAE has been enabled *and* the host has turned on NXE (we reuse the host EFER in the switcher) */
1145 /* Todo: this needs to be fixed properly!! */
1146 if ( CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE)
1147 && (pVM->hwaccm.s.vmx.hostEFER & MSR_K6_EFER_NXE))
1148 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
1149
1150 LogRel((pVM->hwaccm.s.fAllow64BitGuests
1151 ? "HWACCM: 32-bit and 64-bit guests supported.\n"
1152 : "HWACCM: 32-bit guests supported.\n"));
1153#else
1154 LogRel(("HWACCM: 32-bit guests supported.\n"));
1155#endif
1156 LogRel(("HWACCM: VMX enabled!\n"));
1157 if (pVM->hwaccm.s.fNestedPaging)
1158 {
1159 LogRel(("HWACCM: Enabled nested paging\n"));
1160 LogRel(("HWACCM: EPT root page = %RHp\n", PGMGetHyperCR3(VMMGetCpu(pVM))));
1161 if (pVM->hwaccm.s.vmx.fUnrestrictedGuest)
1162 LogRel(("HWACCM: Unrestricted guest execution enabled!\n"));
1163
1164#if HC_ARCH_BITS == 64
1165 if (pVM->hwaccm.s.fLargePages)
1166 {
1167 /* Use large (2 MB) pages for our EPT PDEs where possible. */
1168 PGMSetLargePageUsage(pVM, true);
1169 LogRel(("HWACCM: Large page support enabled!\n"));
1170 }
1171#endif
1172 }
1173 else
1174 Assert(!pVM->hwaccm.s.vmx.fUnrestrictedGuest);
1175
1176 if (pVM->hwaccm.s.vmx.fVPID)
1177 LogRel(("HWACCM: Enabled VPID\n"));
1178
1179 if ( pVM->hwaccm.s.fNestedPaging
1180 || pVM->hwaccm.s.vmx.fVPID)
1181 {
1182 LogRel(("HWACCM: enmFlushPage %d\n", pVM->hwaccm.s.vmx.enmFlushPage));
1183 LogRel(("HWACCM: enmFlushContext %d\n", pVM->hwaccm.s.vmx.enmFlushContext));
1184 }
1185
1186 /* TPR patching status logging. */
1187 if (pVM->hwaccm.s.fTRPPatchingAllowed)
1188 {
1189 if ( (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1190 && (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
1191 {
1192 pVM->hwaccm.s.fTRPPatchingAllowed = false; /* not necessary as we have a hardware solution. */
1193 LogRel(("HWACCM: TPR Patching not required (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).\n"));
1194 }
1195 else
1196 {
1197 uint32_t u32Eax, u32Dummy;
1198
1199 /* TPR patching needs access to the MSR_K8_LSTAR msr. */
1200 ASMCpuId(0x80000000, &u32Eax, &u32Dummy, &u32Dummy, &u32Dummy);
1201 if ( u32Eax < 0x80000001
1202 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1203 {
1204 pVM->hwaccm.s.fTRPPatchingAllowed = false;
1205 LogRel(("HWACCM: TPR patching disabled (long mode not supported).\n"));
1206 }
1207 }
1208 }
1209 LogRel(("HWACCM: TPR Patching %s.\n", (pVM->hwaccm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
1210 }
1211 else
1212 {
1213 LogRel(("HWACCM: VMX setup failed with rc=%Rrc!\n", rc));
1214 LogRel(("HWACCM: Last instruction error %x\n", pVM->aCpus[0].hwaccm.s.vmx.lasterror.ulInstrError));
1215 pVM->fHWACCMEnabled = false;
1216 }
1217 }
1218 }
1219 else
1220 if (pVM->hwaccm.s.svm.fSupported)
1221 {
1222 Log(("pVM->hwaccm.s.svm.fSupported = %d\n", pVM->hwaccm.s.svm.fSupported));
1223
1224 if (pVM->hwaccm.s.fInitialized == false)
1225 {
1226 /* Erratum 170 which requires a forced TLB flush for each world switch:
1227 * See http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
1228 *
1229 * All BH-G1/2 and DH-G1/2 models include a fix:
1230 * Athlon X2: 0x6b 1/2
1231 * 0x68 1/2
1232 * Athlon 64: 0x7f 1
1233 * 0x6f 2
1234 * Sempron: 0x7f 1/2
1235 * 0x6f 2
1236 * 0x6c 2
1237 * 0x7c 2
1238 * Turion 64: 0x68 2
1239 *
1240 */
1241 uint32_t u32Dummy;
1242 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
1243 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
1244 u32BaseFamily= (u32Version >> 8) & 0xf;
1245 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
1246 u32Model = ((u32Version >> 4) & 0xf);
1247 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
1248 u32Stepping = u32Version & 0xf;
1249 if ( u32Family == 0xf
1250 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
1251 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
1252 {
1253 LogRel(("HWACMM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
1254 }
1255
1256 LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureECX = %RX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureECX));
1257 LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureEDX = %RX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureEDX));
1258 LogRel(("HWACCM: AMD HWCR MSR = %RX64\n", pVM->hwaccm.s.svm.msrHWCR));
1259 LogRel(("HWACCM: AMD-V revision = %X\n", pVM->hwaccm.s.svm.u32Rev));
1260 LogRel(("HWACCM: AMD-V max ASID = %d\n", pVM->hwaccm.s.uMaxASID));
1261 LogRel(("HWACCM: AMD-V features = %X\n", pVM->hwaccm.s.svm.u32Features));
1262
1263 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
1264 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING\n"));
1265 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT)
1266 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT\n"));
1267 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK)
1268 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK\n"));
1269 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
1270 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE\n"));
1271 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_SSE_3_5_DISABLE)
1272 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_SSE_3_5_DISABLE\n"));
1273 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER)
1274 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER\n"));
1275
1276 /* Only try once. */
1277 pVM->hwaccm.s.fInitialized = true;
1278
1279 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
1280 pVM->hwaccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging;
1281
1282 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
1283 AssertRC(rc);
1284 if (rc == VINF_SUCCESS)
1285 {
1286 pVM->fHWACCMEnabled = true;
1287 pVM->hwaccm.s.svm.fEnabled = true;
1288
1289 if (pVM->hwaccm.s.fNestedPaging)
1290 {
1291 LogRel(("HWACCM: Enabled nested paging\n"));
1292#if HC_ARCH_BITS == 64
1293 if (pVM->hwaccm.s.fLargePages)
1294 {
1295 /* Use large (2 MB) pages for our nested paging PDEs where possible. */
1296 PGMSetLargePageUsage(pVM, true);
1297 LogRel(("HWACCM: Large page support enabled!\n"));
1298 }
1299#endif
1300 }
1301
1302 hwaccmR3DisableRawMode(pVM);
1303 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1304 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);
1305 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);
1306#ifdef VBOX_ENABLE_64_BITS_GUESTS
1307 if (pVM->hwaccm.s.fAllow64BitGuests)
1308 {
1309 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1310 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1311 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
1312 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1313 }
1314 else
1315 /* Turn on NXE if PAE has been enabled. */
1316 if (CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
1317 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
1318#endif
1319
1320 LogRel((pVM->hwaccm.s.fAllow64BitGuests
1321 ? "HWACCM: 32-bit and 64-bit guest supported.\n"
1322 : "HWACCM: 32-bit guest supported.\n"));
1323
1324 LogRel(("HWACCM: TPR Patching %s.\n", (pVM->hwaccm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
1325 }
1326 else
1327 {
1328 pVM->fHWACCMEnabled = false;
1329 }
1330 }
1331 }
1332 if (pVM->fHWACCMEnabled)
1333 LogRel(("HWACCM: VT-x/AMD-V init method: %s\n", (pVM->hwaccm.s.fGlobalInit) ? "GLOBAL" : "LOCAL"));
1334 return VINF_SUCCESS;
1335}
1336
1337/**
1338 * Applies relocations to data and code managed by this
1339 * component. This function will be called at init and
1340 * whenever the VMM need to relocate it self inside the GC.
1341 *
1342 * @param pVM The VM.
1343 */
1344VMMR3DECL(void) HWACCMR3Relocate(PVM pVM)
1345{
1346 Log(("HWACCMR3Relocate to %RGv\n", MMHyperGetArea(pVM, 0)));
1347
1348 /* Fetch the current paging mode during the relocate callback during state loading. */
1349 if (VMR3GetState(pVM) == VMSTATE_LOADING)
1350 {
1351 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1352 {
1353 PVMCPU pVCpu = &pVM->aCpus[i];
1354
1355 pVCpu->hwaccm.s.enmShadowMode = PGMGetShadowMode(pVCpu);
1356 Assert(pVCpu->hwaccm.s.vmx.enmCurrGuestMode == PGMGetGuestMode(pVCpu));
1357 pVCpu->hwaccm.s.vmx.enmCurrGuestMode = PGMGetGuestMode(pVCpu);
1358 }
1359 }
1360#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1361 if (pVM->fHWACCMEnabled)
1362 {
1363 int rc;
1364
1365 switch(PGMGetHostMode(pVM))
1366 {
1367 case PGMMODE_32_BIT:
1368 pVM->hwaccm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_32_TO_AMD64);
1369 break;
1370
1371 case PGMMODE_PAE:
1372 case PGMMODE_PAE_NX:
1373 pVM->hwaccm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_PAE_TO_AMD64);
1374 break;
1375
1376 default:
1377 AssertFailed();
1378 break;
1379 }
1380 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "VMXGCStartVM64", &pVM->hwaccm.s.pfnVMXGCStartVM64);
1381 AssertReleaseMsgRC(rc, ("VMXGCStartVM64 -> rc=%Rrc\n", rc));
1382
1383 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "SVMGCVMRun64", &pVM->hwaccm.s.pfnSVMGCVMRun64);
1384 AssertReleaseMsgRC(rc, ("SVMGCVMRun64 -> rc=%Rrc\n", rc));
1385
1386 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HWACCMSaveGuestFPU64", &pVM->hwaccm.s.pfnSaveGuestFPU64);
1387 AssertReleaseMsgRC(rc, ("HWACCMSetupFPU64 -> rc=%Rrc\n", rc));
1388
1389 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HWACCMSaveGuestDebug64", &pVM->hwaccm.s.pfnSaveGuestDebug64);
1390 AssertReleaseMsgRC(rc, ("HWACCMSetupDebug64 -> rc=%Rrc\n", rc));
1391
1392# ifdef DEBUG
1393 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HWACCMTestSwitcher64", &pVM->hwaccm.s.pfnTest64);
1394 AssertReleaseMsgRC(rc, ("HWACCMTestSwitcher64 -> rc=%Rrc\n", rc));
1395# endif
1396 }
1397#endif
1398 return;
1399}
1400
1401/**
1402 * Checks hardware accelerated raw mode is allowed.
1403 *
1404 * @returns boolean
1405 * @param pVM The VM to operate on.
1406 */
1407VMMR3DECL(bool) HWACCMR3IsAllowed(PVM pVM)
1408{
1409 return pVM->hwaccm.s.fAllowed;
1410}
1411
1412/**
1413 * Notification callback which is called whenever there is a chance that a CR3
1414 * value might have changed.
1415 *
1416 * This is called by PGM.
1417 *
1418 * @param pVM The VM to operate on.
1419 * @param pVCpu The VMCPU to operate on.
1420 * @param enmShadowMode New shadow paging mode.
1421 * @param enmGuestMode New guest paging mode.
1422 */
1423VMMR3DECL(void) HWACCMR3PagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
1424{
1425 /* Ignore page mode changes during state loading. */
1426 if (VMR3GetState(pVCpu->pVMR3) == VMSTATE_LOADING)
1427 return;
1428
1429 pVCpu->hwaccm.s.enmShadowMode = enmShadowMode;
1430
1431 if ( pVM->hwaccm.s.vmx.fEnabled
1432 && pVM->fHWACCMEnabled)
1433 {
1434 if ( pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
1435 && enmGuestMode >= PGMMODE_PROTECTED)
1436 {
1437 PCPUMCTX pCtx;
1438
1439 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1440
1441 /* After a real mode switch to protected mode we must force
1442 * CPL to 0. Our real mode emulation had to set it to 3.
1443 */
1444 pCtx->ssHid.Attr.n.u2Dpl = 0;
1445 }
1446 }
1447
1448 if (pVCpu->hwaccm.s.vmx.enmCurrGuestMode != enmGuestMode)
1449 {
1450 /* Keep track of paging mode changes. */
1451 pVCpu->hwaccm.s.vmx.enmPrevGuestMode = pVCpu->hwaccm.s.vmx.enmCurrGuestMode;
1452 pVCpu->hwaccm.s.vmx.enmCurrGuestMode = enmGuestMode;
1453
1454 /* Did we miss a change, because all code was executed in the recompiler? */
1455 if (pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == enmGuestMode)
1456 {
1457 Log(("HWACCMR3PagingModeChanged missed %s->%s transition (prev %s)\n", PGMGetModeName(pVCpu->hwaccm.s.vmx.enmPrevGuestMode), PGMGetModeName(pVCpu->hwaccm.s.vmx.enmCurrGuestMode), PGMGetModeName(pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode)));
1458 pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = pVCpu->hwaccm.s.vmx.enmPrevGuestMode;
1459 }
1460 }
1461
1462 /* Reset the contents of the read cache. */
1463 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
1464 for (unsigned j=0;j<pCache->Read.cValidEntries;j++)
1465 pCache->Read.aFieldVal[j] = 0;
1466}
1467
1468/**
1469 * Terminates the HWACCM.
1470 *
1471 * Termination means cleaning up and freeing all resources,
1472 * the VM it self is at this point powered off or suspended.
1473 *
1474 * @returns VBox status code.
1475 * @param pVM The VM to operate on.
1476 */
1477VMMR3DECL(int) HWACCMR3Term(PVM pVM)
1478{
1479 if (pVM->hwaccm.s.vmx.pRealModeTSS)
1480 {
1481 PDMR3VMMDevHeapFree(pVM, pVM->hwaccm.s.vmx.pRealModeTSS);
1482 pVM->hwaccm.s.vmx.pRealModeTSS = 0;
1483 }
1484 HWACCMR3TermCPU(pVM);
1485 return 0;
1486}
1487
1488/**
1489 * Terminates the per-VCPU HWACCM.
1490 *
1491 * Termination means cleaning up and freeing all resources,
1492 * the VM it self is at this point powered off or suspended.
1493 *
1494 * @returns VBox status code.
1495 * @param pVM The VM to operate on.
1496 */
1497VMMR3DECL(int) HWACCMR3TermCPU(PVM pVM)
1498{
1499 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1500 {
1501 PVMCPU pVCpu = &pVM->aCpus[i];
1502
1503#ifdef VBOX_WITH_STATISTICS
1504 if (pVCpu->hwaccm.s.paStatExitReason)
1505 {
1506 MMHyperFree(pVM, pVCpu->hwaccm.s.paStatExitReason);
1507 pVCpu->hwaccm.s.paStatExitReason = NULL;
1508 pVCpu->hwaccm.s.paStatExitReasonR0 = NIL_RTR0PTR;
1509 }
1510 if (pVCpu->hwaccm.s.paStatInjectedIrqs)
1511 {
1512 MMHyperFree(pVM, pVCpu->hwaccm.s.paStatInjectedIrqs);
1513 pVCpu->hwaccm.s.paStatInjectedIrqs = NULL;
1514 pVCpu->hwaccm.s.paStatInjectedIrqsR0 = NIL_RTR0PTR;
1515 }
1516#endif
1517
1518#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1519 memset(pVCpu->hwaccm.s.vmx.VMCSCache.aMagic, 0, sizeof(pVCpu->hwaccm.s.vmx.VMCSCache.aMagic));
1520 pVCpu->hwaccm.s.vmx.VMCSCache.uMagic = 0;
1521 pVCpu->hwaccm.s.vmx.VMCSCache.uPos = 0xffffffff;
1522#endif
1523 }
1524 return 0;
1525}
1526
1527/**
1528 * Resets a virtual CPU.
1529 *
1530 * Used by HWACCMR3Reset and CPU hot plugging.
1531 *
1532 * @param pVCpu The CPU to reset.
1533 */
1534VMMR3DECL(void) HWACCMR3ResetCpu(PVMCPU pVCpu)
1535{
1536 /* On first entry we'll sync everything. */
1537 pVCpu->hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
1538
1539 pVCpu->hwaccm.s.vmx.cr0_mask = 0;
1540 pVCpu->hwaccm.s.vmx.cr4_mask = 0;
1541
1542 pVCpu->hwaccm.s.fActive = false;
1543 pVCpu->hwaccm.s.Event.fPending = false;
1544
1545 /* Reset state information for real-mode emulation in VT-x. */
1546 pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;
1547 pVCpu->hwaccm.s.vmx.enmPrevGuestMode = PGMMODE_REAL;
1548 pVCpu->hwaccm.s.vmx.enmCurrGuestMode = PGMMODE_REAL;
1549
1550 /* Reset the contents of the read cache. */
1551 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
1552 for (unsigned j=0;j<pCache->Read.cValidEntries;j++)
1553 pCache->Read.aFieldVal[j] = 0;
1554
1555#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1556 /* Magic marker for searching in crash dumps. */
1557 strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
1558 pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
1559#endif
1560}
1561
1562/**
1563 * The VM is being reset.
1564 *
1565 * For the HWACCM component this means that any GDT/LDT/TSS monitors
1566 * needs to be removed.
1567 *
1568 * @param pVM VM handle.
1569 */
1570VMMR3DECL(void) HWACCMR3Reset(PVM pVM)
1571{
1572 LogFlow(("HWACCMR3Reset:\n"));
1573
1574 if (pVM->fHWACCMEnabled)
1575 hwaccmR3DisableRawMode(pVM);
1576
1577 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1578 {
1579 PVMCPU pVCpu = &pVM->aCpus[i];
1580
1581 HWACCMR3ResetCpu(pVCpu);
1582 }
1583
1584 /* Clear all patch information. */
1585 pVM->hwaccm.s.pGuestPatchMem = 0;
1586 pVM->hwaccm.s.pFreeGuestPatchMem = 0;
1587 pVM->hwaccm.s.cbGuestPatchMem = 0;
1588 pVM->hwaccm.s.cPatches = 0;
1589 pVM->hwaccm.s.PatchTree = 0;
1590 pVM->hwaccm.s.fTPRPatchingActive = false;
1591 ASMMemZero32(pVM->hwaccm.s.aPatches, sizeof(pVM->hwaccm.s.aPatches));
1592}
1593
1594/**
1595 * Callback to patch a TPR instruction (vmmcall or mov cr8)
1596 *
1597 * @returns VBox strict status code.
1598 * @param pVM The VM handle.
1599 * @param pVCpu The VMCPU for the EMT we're being called on.
1600 * @param pvUser Unused
1601 *
1602 */
1603DECLCALLBACK(VBOXSTRICTRC) hwaccmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
1604{
1605 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
1606
1607 /* Only execute the handler on the VCPU the original patch request was issued. */
1608 if (pVCpu->idCpu != idCpu)
1609 return VINF_SUCCESS;
1610
1611 Log(("hwaccmR3RemovePatches\n"));
1612 for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
1613 {
1614 uint8_t szInstr[15];
1615 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
1616 RTGCPTR pInstrGC = (RTGCPTR)pPatch->Core.Key;
1617 int rc;
1618
1619#ifdef LOG_ENABLED
1620 char szOutput[256];
1621
1622 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, 0, szOutput, sizeof(szOutput), 0);
1623 if (RT_SUCCESS(rc))
1624 Log(("Patched instr: %s\n", szOutput));
1625#endif
1626
1627 /* Check if the instruction is still the same. */
1628 rc = PGMPhysSimpleReadGCPtr(pVCpu, szInstr, pInstrGC, pPatch->cbNewOp);
1629 if (rc != VINF_SUCCESS)
1630 {
1631 Log(("Patched code removed? (rc=%Rrc0\n", rc));
1632 continue; /* swapped out or otherwise removed; skip it. */
1633 }
1634
1635 if (memcmp(szInstr, pPatch->aNewOpcode, pPatch->cbNewOp))
1636 {
1637 Log(("Patched instruction was changed! (rc=%Rrc0\n", rc));
1638 continue; /* skip it. */
1639 }
1640
1641 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pInstrGC, pPatch->aOpcode, pPatch->cbOp);
1642 AssertRC(rc);
1643
1644#ifdef LOG_ENABLED
1645 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, 0, szOutput, sizeof(szOutput), 0);
1646 if (RT_SUCCESS(rc))
1647 Log(("Original instr: %s\n", szOutput));
1648#endif
1649 }
1650 pVM->hwaccm.s.cPatches = 0;
1651 pVM->hwaccm.s.PatchTree = 0;
1652 pVM->hwaccm.s.pFreeGuestPatchMem = pVM->hwaccm.s.pGuestPatchMem;
1653 pVM->hwaccm.s.fTPRPatchingActive = false;
1654 return VINF_SUCCESS;
1655}
1656
1657/**
1658 * Enable patching in a VT-x/AMD-V guest
1659 *
1660 * @returns VBox status code.
1661 * @param pVM The VM to operate on.
1662 * @param idCpu VCPU to execute hwaccmR3RemovePatches on
1663 * @param pPatchMem Patch memory range
1664 * @param cbPatchMem Size of the memory range
1665 */
1666int hwaccmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)
1667{
1668 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hwaccmR3RemovePatches, (void *)idCpu);
1669 AssertRC(rc);
1670
1671 pVM->hwaccm.s.pGuestPatchMem = pPatchMem;
1672 pVM->hwaccm.s.pFreeGuestPatchMem = pPatchMem;
1673 pVM->hwaccm.s.cbGuestPatchMem = cbPatchMem;
1674 return VINF_SUCCESS;
1675}
1676
1677/**
1678 * Enable patching in a VT-x/AMD-V guest
1679 *
1680 * @returns VBox status code.
1681 * @param pVM The VM to operate on.
1682 * @param pPatchMem Patch memory range
1683 * @param cbPatchMem Size of the memory range
1684 */
1685VMMR3DECL(int) HWACMMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1686{
1687 Log(("HWACMMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
1688 if (pVM->cCpus > 1)
1689 {
1690 /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
1691 int rc = VMR3ReqCallNoWaitU(pVM->pUVM, VMCPUID_ANY_QUEUE,
1692 (PFNRT)hwaccmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
1693 AssertRC(rc);
1694 return rc;
1695 }
1696 return hwaccmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
1697}
1698
1699/**
1700 * Disable patching in a VT-x/AMD-V guest
1701 *
1702 * @returns VBox status code.
1703 * @param pVM The VM to operate on.
1704 * @param pPatchMem Patch memory range
1705 * @param cbPatchMem Size of the memory range
1706 */
1707VMMR3DECL(int) HWACMMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1708{
1709 Log(("HWACMMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
1710
1711 Assert(pVM->hwaccm.s.pGuestPatchMem == pPatchMem);
1712 Assert(pVM->hwaccm.s.cbGuestPatchMem == cbPatchMem);
1713
1714 /* @todo Potential deadlock when other VCPUs are waiting on the IOM lock (we own it)!! */
1715 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hwaccmR3RemovePatches, (void *)VMMGetCpuId(pVM));
1716 AssertRC(rc);
1717
1718 pVM->hwaccm.s.pGuestPatchMem = 0;
1719 pVM->hwaccm.s.pFreeGuestPatchMem = 0;
1720 pVM->hwaccm.s.cbGuestPatchMem = 0;
1721 pVM->hwaccm.s.fTPRPatchingActive = false;
1722 return VINF_SUCCESS;
1723}
1724
1725
1726/**
1727 * Callback to patch a TPR instruction (vmmcall or mov cr8)
1728 *
1729 * @returns VBox strict status code.
1730 * @param pVM The VM handle.
1731 * @param pVCpu The VMCPU for the EMT we're being called on.
1732 * @param pvUser User specified CPU context
1733 *
1734 */
1735DECLCALLBACK(VBOXSTRICTRC) hwaccmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
1736{
1737 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
1738 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1739 PDISCPUSTATE pDis = &pVCpu->hwaccm.s.DisState;
1740 unsigned cbOp;
1741
1742 /* Only execute the handler on the VCPU the original patch request was issued. (the other CPU(s) might not yet have switched to protected mode) */
1743 if (pVCpu->idCpu != idCpu)
1744 return VINF_SUCCESS;
1745
1746 Log(("hwaccmR3ReplaceTprInstr: %RGv\n", pCtx->rip));
1747
1748 /* Two or more VCPUs were racing to patch this instruction. */
1749 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
1750 if (pPatch)
1751 return VINF_SUCCESS;
1752
1753 Assert(pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches));
1754
1755 int rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
1756 AssertRC(rc);
1757 if ( rc == VINF_SUCCESS
1758 && pDis->pCurInstr->opcode == OP_MOV
1759 && cbOp >= 3)
1760 {
1761 uint8_t aVMMCall[3] = { 0xf, 0x1, 0xd9};
1762 uint32_t idx = pVM->hwaccm.s.cPatches;
1763
1764 pPatch = &pVM->hwaccm.s.aPatches[idx];
1765
1766 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
1767 AssertRC(rc);
1768
1769 pPatch->cbOp = cbOp;
1770
1771 if (pDis->param1.flags == USE_DISPLACEMENT32)
1772 {
1773 /* write. */
1774 if (pDis->param2.flags == USE_REG_GEN32)
1775 {
1776 pPatch->enmType = HWACCMTPRINSTR_WRITE_REG;
1777 pPatch->uSrcOperand = pDis->param2.base.reg_gen;
1778 }
1779 else
1780 {
1781 Assert(pDis->param2.flags == USE_IMMEDIATE32);
1782 pPatch->enmType = HWACCMTPRINSTR_WRITE_IMM;
1783 pPatch->uSrcOperand = pDis->param2.parval;
1784 }
1785 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, aVMMCall, sizeof(aVMMCall));
1786 AssertRC(rc);
1787
1788 memcpy(pPatch->aNewOpcode, aVMMCall, sizeof(aVMMCall));
1789 pPatch->cbNewOp = sizeof(aVMMCall);
1790 }
1791 else
1792 {
1793 RTGCPTR oldrip = pCtx->rip;
1794 uint32_t oldcbOp = cbOp;
1795 uint32_t uMmioReg = pDis->param1.base.reg_gen;
1796
1797 /* read */
1798 Assert(pDis->param1.flags == USE_REG_GEN32);
1799
1800 /* Found:
1801 * mov eax, dword [fffe0080] (5 bytes)
1802 * Check if next instruction is:
1803 * shr eax, 4
1804 */
1805 pCtx->rip += cbOp;
1806 rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
1807 pCtx->rip = oldrip;
1808 if ( rc == VINF_SUCCESS
1809 && pDis->pCurInstr->opcode == OP_SHR
1810 && pDis->param1.flags == USE_REG_GEN32
1811 && pDis->param1.base.reg_gen == uMmioReg
1812 && pDis->param2.flags == USE_IMMEDIATE8
1813 && pDis->param2.parval == 4
1814 && oldcbOp + cbOp < sizeof(pVM->hwaccm.s.aPatches[idx].aOpcode))
1815 {
1816 uint8_t szInstr[15];
1817
1818 /* Replacing two instructions now. */
1819 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pPatch->aOpcode, pCtx->rip, oldcbOp + cbOp);
1820 AssertRC(rc);
1821
1822 pPatch->cbOp = oldcbOp + cbOp;
1823
1824 /* 0xF0, 0x0F, 0x20, 0xC0 = mov eax, cr8 */
1825 szInstr[0] = 0xF0;
1826 szInstr[1] = 0x0F;
1827 szInstr[2] = 0x20;
1828 szInstr[3] = 0xC0 | pDis->param1.base.reg_gen;
1829 for (unsigned i = 4; i < pPatch->cbOp; i++)
1830 szInstr[i] = 0x90; /* nop */
1831
1832 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, szInstr, pPatch->cbOp);
1833 AssertRC(rc);
1834
1835 memcpy(pPatch->aNewOpcode, szInstr, pPatch->cbOp);
1836 pPatch->cbNewOp = pPatch->cbOp;
1837
1838 Log(("Acceptable read/shr candidate!\n"));
1839 pPatch->enmType = HWACCMTPRINSTR_READ_SHR4;
1840 }
1841 else
1842 {
1843 pPatch->enmType = HWACCMTPRINSTR_READ;
1844 pPatch->uDstOperand = pDis->param1.base.reg_gen;
1845
1846 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, aVMMCall, sizeof(aVMMCall));
1847 AssertRC(rc);
1848
1849 memcpy(pPatch->aNewOpcode, aVMMCall, sizeof(aVMMCall));
1850 pPatch->cbNewOp = sizeof(aVMMCall);
1851 }
1852 }
1853
1854 pPatch->Core.Key = pCtx->eip;
1855 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
1856 AssertRC(rc);
1857
1858 pVM->hwaccm.s.cPatches++;
1859 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceSuccess);
1860 return VINF_SUCCESS;
1861 }
1862
1863 /* Save invalid patch, so we will not try again. */
1864 uint32_t idx = pVM->hwaccm.s.cPatches;
1865
1866#ifdef LOG_ENABLED
1867 char szOutput[256];
1868 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, 0, szOutput, sizeof(szOutput), 0);
1869 if (RT_SUCCESS(rc))
1870 Log(("Failed to patch instr: %s\n", szOutput));
1871#endif
1872
1873 pPatch = &pVM->hwaccm.s.aPatches[idx];
1874 pPatch->Core.Key = pCtx->eip;
1875 pPatch->enmType = HWACCMTPRINSTR_INVALID;
1876 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
1877 AssertRC(rc);
1878 pVM->hwaccm.s.cPatches++;
1879 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceFailure);
1880 return VINF_SUCCESS;
1881}
1882
1883/**
1884 * Callback to patch a TPR instruction (jump to generated code)
1885 *
1886 * @returns VBox strict status code.
1887 * @param pVM The VM handle.
1888 * @param pVCpu The VMCPU for the EMT we're being called on.
1889 * @param pvUser User specified CPU context
1890 *
1891 */
1892DECLCALLBACK(VBOXSTRICTRC) hwaccmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
1893{
1894 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
1895 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1896 PDISCPUSTATE pDis = &pVCpu->hwaccm.s.DisState;
1897 unsigned cbOp;
1898 int rc;
1899#ifdef LOG_ENABLED
1900 RTGCPTR pInstr;
1901 char szOutput[256];
1902#endif
1903
1904 /* Only execute the handler on the VCPU the original patch request was issued. (the other CPU(s) might not yet have switched to protected mode) */
1905 if (pVCpu->idCpu != idCpu)
1906 return VINF_SUCCESS;
1907
1908 Assert(pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches));
1909
1910 /* Two or more VCPUs were racing to patch this instruction. */
1911 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
1912 if (pPatch)
1913 {
1914 Log(("hwaccmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));
1915 return VINF_SUCCESS;
1916 }
1917
1918 Log(("hwaccmR3PatchTprInstr %RGv\n", pCtx->rip));
1919
1920 rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
1921 AssertRC(rc);
1922 if ( rc == VINF_SUCCESS
1923 && pDis->pCurInstr->opcode == OP_MOV
1924 && cbOp >= 5)
1925 {
1926 uint32_t idx = pVM->hwaccm.s.cPatches;
1927 uint8_t aPatch[64];
1928 uint32_t off = 0;
1929
1930 pPatch = &pVM->hwaccm.s.aPatches[idx];
1931
1932#ifdef LOG_ENABLED
1933 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, 0, szOutput, sizeof(szOutput), 0);
1934 if (RT_SUCCESS(rc))
1935 Log(("Original instr: %s\n", szOutput));
1936#endif
1937
1938 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
1939 AssertRC(rc);
1940
1941 pPatch->cbOp = cbOp;
1942 pPatch->enmType = HWACCMTPRINSTR_JUMP_REPLACEMENT;
1943
1944 if (pDis->param1.flags == USE_DISPLACEMENT32)
1945 {
1946 /*
1947 * TPR write:
1948 *
1949 * push ECX [51]
1950 * push EDX [52]
1951 * push EAX [50]
1952 * xor EDX,EDX [31 D2]
1953 * mov EAX,EAX [89 C0]
1954 * or
1955 * mov EAX,0000000CCh [B8 CC 00 00 00]
1956 * mov ECX,0C0000082h [B9 82 00 00 C0]
1957 * wrmsr [0F 30]
1958 * pop EAX [58]
1959 * pop EDX [5A]
1960 * pop ECX [59]
1961 * jmp return_address [E9 return_address]
1962 *
1963 */
1964 bool fUsesEax = (pDis->param2.flags == USE_REG_GEN32 && pDis->param2.base.reg_gen == USE_REG_EAX);
1965
1966 aPatch[off++] = 0x51; /* push ecx */
1967 aPatch[off++] = 0x52; /* push edx */
1968 if (!fUsesEax)
1969 aPatch[off++] = 0x50; /* push eax */
1970 aPatch[off++] = 0x31; /* xor edx, edx */
1971 aPatch[off++] = 0xD2;
1972 if (pDis->param2.flags == USE_REG_GEN32)
1973 {
1974 if (!fUsesEax)
1975 {
1976 aPatch[off++] = 0x89; /* mov eax, src_reg */
1977 aPatch[off++] = MAKE_MODRM(3, pDis->param2.base.reg_gen, USE_REG_EAX);
1978 }
1979 }
1980 else
1981 {
1982 Assert(pDis->param2.flags == USE_IMMEDIATE32);
1983 aPatch[off++] = 0xB8; /* mov eax, immediate */
1984 *(uint32_t *)&aPatch[off] = pDis->param2.parval;
1985 off += sizeof(uint32_t);
1986 }
1987 aPatch[off++] = 0xB9; /* mov ecx, 0xc0000082 */
1988 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
1989 off += sizeof(uint32_t);
1990
1991 aPatch[off++] = 0x0F; /* wrmsr */
1992 aPatch[off++] = 0x30;
1993 if (!fUsesEax)
1994 aPatch[off++] = 0x58; /* pop eax */
1995 aPatch[off++] = 0x5A; /* pop edx */
1996 aPatch[off++] = 0x59; /* pop ecx */
1997 }
1998 else
1999 {
2000 /*
2001 * TPR read:
2002 *
2003 * push ECX [51]
2004 * push EDX [52]
2005 * push EAX [50]
2006 * mov ECX,0C0000082h [B9 82 00 00 C0]
2007 * rdmsr [0F 32]
2008 * mov EAX,EAX [89 C0]
2009 * pop EAX [58]
2010 * pop EDX [5A]
2011 * pop ECX [59]
2012 * jmp return_address [E9 return_address]
2013 *
2014 */
2015 Assert(pDis->param1.flags == USE_REG_GEN32);
2016
2017 if (pDis->param1.base.reg_gen != USE_REG_ECX)
2018 aPatch[off++] = 0x51; /* push ecx */
2019 if (pDis->param1.base.reg_gen != USE_REG_EDX)
2020 aPatch[off++] = 0x52; /* push edx */
2021 if (pDis->param1.base.reg_gen != USE_REG_EAX)
2022 aPatch[off++] = 0x50; /* push eax */
2023
2024 aPatch[off++] = 0x31; /* xor edx, edx */
2025 aPatch[off++] = 0xD2;
2026
2027 aPatch[off++] = 0xB9; /* mov ecx, 0xc0000082 */
2028 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
2029 off += sizeof(uint32_t);
2030
2031 aPatch[off++] = 0x0F; /* rdmsr */
2032 aPatch[off++] = 0x32;
2033
2034 if (pDis->param1.base.reg_gen != USE_REG_EAX)
2035 {
2036 aPatch[off++] = 0x89; /* mov dst_reg, eax */
2037 aPatch[off++] = MAKE_MODRM(3, USE_REG_EAX, pDis->param1.base.reg_gen);
2038 }
2039
2040 if (pDis->param1.base.reg_gen != USE_REG_EAX)
2041 aPatch[off++] = 0x58; /* pop eax */
2042 if (pDis->param1.base.reg_gen != USE_REG_EDX)
2043 aPatch[off++] = 0x5A; /* pop edx */
2044 if (pDis->param1.base.reg_gen != USE_REG_ECX)
2045 aPatch[off++] = 0x59; /* pop ecx */
2046 }
2047 aPatch[off++] = 0xE9; /* jmp return_address */
2048 *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hwaccm.s.pFreeGuestPatchMem + off + 4);
2049 off += sizeof(RTRCUINTPTR);
2050
2051 if (pVM->hwaccm.s.pFreeGuestPatchMem + off <= pVM->hwaccm.s.pGuestPatchMem + pVM->hwaccm.s.cbGuestPatchMem)
2052 {
2053 /* Write new code to the patch buffer. */
2054 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hwaccm.s.pFreeGuestPatchMem, aPatch, off);
2055 AssertRC(rc);
2056
2057#ifdef LOG_ENABLED
2058 pInstr = pVM->hwaccm.s.pFreeGuestPatchMem;
2059 while (true)
2060 {
2061 uint32_t cb;
2062
2063 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pInstr, 0, szOutput, sizeof(szOutput), &cb);
2064 if (RT_SUCCESS(rc))
2065 Log(("Patch instr %s\n", szOutput));
2066
2067 pInstr += cb;
2068
2069 if (pInstr >= pVM->hwaccm.s.pFreeGuestPatchMem + off)
2070 break;
2071 }
2072#endif
2073
2074 pPatch->aNewOpcode[0] = 0xE9;
2075 *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hwaccm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);
2076
2077 /* Overwrite the TPR instruction with a jump. */
2078 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->eip, pPatch->aNewOpcode, 5);
2079 AssertRC(rc);
2080
2081#ifdef LOG_ENABLED
2082 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, 0, szOutput, sizeof(szOutput), 0);
2083 if (RT_SUCCESS(rc))
2084 Log(("Jump: %s\n", szOutput));
2085#endif
2086 pVM->hwaccm.s.pFreeGuestPatchMem += off;
2087 pPatch->cbNewOp = 5;
2088
2089 pPatch->Core.Key = pCtx->eip;
2090 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
2091 AssertRC(rc);
2092
2093 pVM->hwaccm.s.cPatches++;
2094 pVM->hwaccm.s.fTPRPatchingActive = true;
2095 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchSuccess);
2096 return VINF_SUCCESS;
2097 }
2098 else
2099 Log(("Ran out of space in our patch buffer!\n"));
2100 }
2101
2102 /* Save invalid patch, so we will not try again. */
2103 uint32_t idx = pVM->hwaccm.s.cPatches;
2104
2105#ifdef LOG_ENABLED
2106 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, 0, szOutput, sizeof(szOutput), 0);
2107 if (RT_SUCCESS(rc))
2108 Log(("Failed to patch instr: %s\n", szOutput));
2109#endif
2110
2111 pPatch = &pVM->hwaccm.s.aPatches[idx];
2112 pPatch->Core.Key = pCtx->eip;
2113 pPatch->enmType = HWACCMTPRINSTR_INVALID;
2114 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
2115 AssertRC(rc);
2116 pVM->hwaccm.s.cPatches++;
2117 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchFailure);
2118 return VINF_SUCCESS;
2119}
2120
2121/**
2122 * Attempt to patch TPR mmio instructions
2123 *
2124 * @returns VBox status code.
2125 * @param pVM The VM to operate on.
2126 * @param pVCpu The VM CPU to operate on.
2127 * @param pCtx CPU context
2128 */
2129VMMR3DECL(int) HWACCMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2130{
2131 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, (pVM->hwaccm.s.pGuestPatchMem) ? hwaccmR3PatchTprInstr : hwaccmR3ReplaceTprInstr, (void *)pVCpu->idCpu);
2132 AssertRC(rc);
2133 return rc;
2134}
2135
2136/**
2137 * Force execution of the current IO code in the recompiler
2138 *
2139 * @returns VBox status code.
2140 * @param pVM The VM to operate on.
2141 * @param pCtx Partial VM execution context
2142 */
2143VMMR3DECL(int) HWACCMR3EmulateIoBlock(PVM pVM, PCPUMCTX pCtx)
2144{
2145 PVMCPU pVCpu = VMMGetCpu(pVM);
2146
2147 Assert(pVM->fHWACCMEnabled);
2148 Log(("HWACCMR3EmulateIoBlock\n"));
2149
2150 /* This is primarily intended to speed up Grub, so we don't care about paged protected mode. */
2151 if (HWACCMCanEmulateIoBlockEx(pCtx))
2152 {
2153 Log(("HWACCMR3EmulateIoBlock -> enabled\n"));
2154 pVCpu->hwaccm.s.EmulateIoBlock.fEnabled = true;
2155 pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip = pCtx->rip;
2156 pVCpu->hwaccm.s.EmulateIoBlock.cr0 = pCtx->cr0;
2157 return VINF_EM_RESCHEDULE_REM;
2158 }
2159 return VINF_SUCCESS;
2160}
2161
2162/**
2163 * Checks if we can currently use hardware accelerated raw mode.
2164 *
2165 * @returns boolean
2166 * @param pVM The VM to operate on.
2167 * @param pCtx Partial VM execution context
2168 */
2169VMMR3DECL(bool) HWACCMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx)
2170{
2171 PVMCPU pVCpu = VMMGetCpu(pVM);
2172
2173 Assert(pVM->fHWACCMEnabled);
2174
2175 /* If we're still executing the IO code, then return false. */
2176 if ( RT_UNLIKELY(pVCpu->hwaccm.s.EmulateIoBlock.fEnabled)
2177 && pCtx->rip < pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip + 0x200
2178 && pCtx->rip > pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip - 0x200
2179 && pCtx->cr0 == pVCpu->hwaccm.s.EmulateIoBlock.cr0)
2180 return false;
2181
2182 pVCpu->hwaccm.s.EmulateIoBlock.fEnabled = false;
2183
2184 /* AMD-V supports real & protected mode with or without paging. */
2185 if (pVM->hwaccm.s.svm.fEnabled)
2186 {
2187 pVCpu->hwaccm.s.fActive = true;
2188 return true;
2189 }
2190
2191 pVCpu->hwaccm.s.fActive = false;
2192
2193 /* Note! The context supplied by REM is partial. If we add more checks here, be sure to verify that REM provides this info! */
2194 Assert((pVM->hwaccm.s.vmx.fUnrestrictedGuest && !pVM->hwaccm.s.vmx.pRealModeTSS) || (!pVM->hwaccm.s.vmx.fUnrestrictedGuest && pVM->hwaccm.s.vmx.pRealModeTSS));
2195
2196 bool fSupportsRealMode = pVM->hwaccm.s.vmx.fUnrestrictedGuest || PDMVMMDevHeapIsEnabled(pVM);
2197 if (!pVM->hwaccm.s.vmx.fUnrestrictedGuest)
2198 {
2199 /** The VMM device heap is a requirement for emulating real mode or protected mode without paging when the unrestricted guest execution feature is missing. */
2200 if (fSupportsRealMode)
2201 {
2202 if (CPUMIsGuestInRealModeEx(pCtx))
2203 {
2204 /* VT-x will not allow high selector bases in v86 mode; fall back to the recompiler in that case.
2205 * The base must also be equal to (sel << 4).
2206 */
2207 if ( ( pCtx->cs != (pCtx->csHid.u64Base >> 4)
2208 && pCtx->csHid.u64Base != 0xffff0000 /* we can deal with the BIOS code as it's also mapped into the lower region. */)
2209 || pCtx->ds != (pCtx->dsHid.u64Base >> 4)
2210 || pCtx->es != (pCtx->esHid.u64Base >> 4)
2211 || pCtx->fs != (pCtx->fsHid.u64Base >> 4)
2212 || pCtx->gs != (pCtx->gsHid.u64Base >> 4)
2213 || pCtx->ss != (pCtx->ssHid.u64Base >> 4))
2214 {
2215 return false;
2216 }
2217 }
2218 else
2219 {
2220 PGMMODE enmGuestMode = PGMGetGuestMode(pVCpu);
2221 /* Verify the requirements for executing code in protected mode. VT-x can't handle the CPU state right after a switch
2222 * from real to protected mode. (all sorts of RPL & DPL assumptions)
2223 */
2224 if ( pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
2225 && enmGuestMode >= PGMMODE_PROTECTED)
2226 {
2227 if ( (pCtx->cs & X86_SEL_RPL)
2228 || (pCtx->ds & X86_SEL_RPL)
2229 || (pCtx->es & X86_SEL_RPL)
2230 || (pCtx->fs & X86_SEL_RPL)
2231 || (pCtx->gs & X86_SEL_RPL)
2232 || (pCtx->ss & X86_SEL_RPL))
2233 {
2234 return false;
2235 }
2236 }
2237 }
2238 }
2239 else
2240 {
2241 if ( !CPUMIsGuestInLongModeEx(pCtx)
2242 && !pVM->hwaccm.s.vmx.fUnrestrictedGuest)
2243 {
2244 /** @todo This should (probably) be set on every excursion to the REM,
2245 * however it's too risky right now. So, only apply it when we go
2246 * back to REM for real mode execution. (The XP hack below doesn't
2247 * work reliably without this.)
2248 * Update: Implemented in EM.cpp, see #ifdef EM_NOTIFY_HWACCM. */
2249 pVM->aCpus[0].hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
2250
2251 if ( !pVM->hwaccm.s.fNestedPaging /* requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap*/
2252 || CPUMIsGuestInRealModeEx(pCtx)) /* requires a fake TSS for real mode - stored in the VMM device heap */
2253 return false;
2254
2255 /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
2256 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr == 0)
2257 return false;
2258
2259 /* The guest is about to complete the switch to protected mode. Wait a bit longer. */
2260 /* Windows XP; switch to protected mode; all selectors are marked not present in the
2261 * hidden registers (possible recompiler bug; see load_seg_vm) */
2262 if (pCtx->csHid.Attr.n.u1Present == 0)
2263 return false;
2264 if (pCtx->ssHid.Attr.n.u1Present == 0)
2265 return false;
2266
2267 /* Windows XP: possible same as above, but new recompiler requires new heuristics?
2268 VT-x doesn't seem to like something about the guest state and this stuff avoids it. */
2269 /** @todo This check is actually wrong, it doesn't take the direction of the
2270 * stack segment into account. But, it does the job for now. */
2271 if (pCtx->rsp >= pCtx->ssHid.u32Limit)
2272 return false;
2273 #if 0
2274 if ( pCtx->cs >= pCtx->gdtr.cbGdt
2275 || pCtx->ss >= pCtx->gdtr.cbGdt
2276 || pCtx->ds >= pCtx->gdtr.cbGdt
2277 || pCtx->es >= pCtx->gdtr.cbGdt
2278 || pCtx->fs >= pCtx->gdtr.cbGdt
2279 || pCtx->gs >= pCtx->gdtr.cbGdt)
2280 return false;
2281 #endif
2282 }
2283 }
2284 }
2285
2286 if (pVM->hwaccm.s.vmx.fEnabled)
2287 {
2288 uint32_t mask;
2289
2290 /* if bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
2291 mask = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0;
2292 /* Note: We ignore the NE bit here on purpose; see vmmr0\hwaccmr0.cpp for details. */
2293 mask &= ~X86_CR0_NE;
2294
2295 if (fSupportsRealMode)
2296 {
2297 /* Note: We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
2298 mask &= ~(X86_CR0_PG|X86_CR0_PE);
2299 }
2300 else
2301 {
2302 /* We support protected mode without paging using identity mapping. */
2303 mask &= ~X86_CR0_PG;
2304 }
2305 if ((pCtx->cr0 & mask) != mask)
2306 return false;
2307
2308 /* if bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
2309 mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1;
2310 if ((pCtx->cr0 & mask) != 0)
2311 return false;
2312
2313 /* if bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
2314 mask = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0;
2315 mask &= ~X86_CR4_VMXE;
2316 if ((pCtx->cr4 & mask) != mask)
2317 return false;
2318
2319 /* if bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
2320 mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1;
2321 if ((pCtx->cr4 & mask) != 0)
2322 return false;
2323
2324 pVCpu->hwaccm.s.fActive = true;
2325 return true;
2326 }
2327
2328 return false;
2329}
2330
2331/**
2332 * Checks if we need to reschedule due to VMM device heap changes
2333 *
2334 * @returns boolean
2335 * @param pVM The VM to operate on.
2336 * @param pCtx VM execution context
2337 */
2338VMMR3DECL(bool) HWACCMR3IsRescheduleRequired(PVM pVM, PCPUMCTX pCtx)
2339{
2340 /** The VMM device heap is a requirement for emulating real mode or protected mode without paging when the unrestricted guest execution feature is missing. (VT-x only) */
2341 if ( pVM->hwaccm.s.vmx.fEnabled
2342 && !CPUMIsGuestInPagedProtectedModeEx(pCtx)
2343 && !PDMVMMDevHeapIsEnabled(pVM)
2344 && (pVM->hwaccm.s.fNestedPaging || CPUMIsGuestInRealModeEx(pCtx)))
2345 return true;
2346
2347 return false;
2348}
2349
2350
2351/**
2352 * Notifcation from EM about a rescheduling into hardware assisted execution
2353 * mode.
2354 *
2355 * @param pVCpu Pointer to the current virtual cpu structure.
2356 */
2357VMMR3DECL(void) HWACCMR3NotifyScheduled(PVMCPU pVCpu)
2358{
2359 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
2360}
2361
2362/**
2363 * Notifcation from EM about returning from instruction emulation (REM / EM).
2364 *
2365 * @param pVCpu Pointer to the current virtual cpu structure.
2366 */
2367VMMR3DECL(void) HWACCMR3NotifyEmulated(PVMCPU pVCpu)
2368{
2369 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
2370}
2371
2372/**
2373 * Checks if we are currently using hardware accelerated raw mode.
2374 *
2375 * @returns boolean
2376 * @param pVCpu The VMCPU to operate on.
2377 */
2378VMMR3DECL(bool) HWACCMR3IsActive(PVMCPU pVCpu)
2379{
2380 return pVCpu->hwaccm.s.fActive;
2381}
2382
2383/**
2384 * Checks if we are currently using nested paging.
2385 *
2386 * @returns boolean
2387 * @param pVM The VM to operate on.
2388 */
2389VMMR3DECL(bool) HWACCMR3IsNestedPagingActive(PVM pVM)
2390{
2391 return pVM->hwaccm.s.fNestedPaging;
2392}
2393
2394/**
2395 * Checks if we are currently using VPID in VT-x mode.
2396 *
2397 * @returns boolean
2398 * @param pVM The VM to operate on.
2399 */
2400VMMR3DECL(bool) HWACCMR3IsVPIDActive(PVM pVM)
2401{
2402 return pVM->hwaccm.s.vmx.fVPID;
2403}
2404
2405
2406/**
2407 * Checks if internal events are pending. In that case we are not allowed to dispatch interrupts.
2408 *
2409 * @returns boolean
2410 * @param pVM The VM to operate on.
2411 */
2412VMMR3DECL(bool) HWACCMR3IsEventPending(PVMCPU pVCpu)
2413{
2414 return HWACCMIsEnabled(pVCpu->pVMR3) && pVCpu->hwaccm.s.Event.fPending;
2415}
2416
2417/**
2418 * Restart an I/O instruction that was refused in ring-0
2419 *
2420 * @returns Strict VBox status code. Informational status codes other than the one documented
2421 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2422 * @retval VINF_SUCCESS Success.
2423 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2424 * status code must be passed on to EM.
2425 * @retval VERR_NOT_FOUND if no pending I/O instruction.
2426 *
2427 * @param pVM The VM to operate on.
2428 * @param pVCpu The VMCPU to operate on.
2429 * @param pCtx VCPU register context
2430 */
2431VMMR3DECL(VBOXSTRICTRC) HWACCMR3RestartPendingIOInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2432{
2433 HWACCMPENDINGIO enmType = pVCpu->hwaccm.s.PendingIO.enmType;
2434
2435 pVCpu->hwaccm.s.PendingIO.enmType = HWACCMPENDINGIO_INVALID;
2436
2437 if ( pVCpu->hwaccm.s.PendingIO.GCPtrRip != pCtx->rip
2438 || enmType == HWACCMPENDINGIO_INVALID)
2439 return VERR_NOT_FOUND;
2440
2441 VBOXSTRICTRC rcStrict;
2442 switch (enmType)
2443 {
2444 case HWACCMPENDINGIO_PORT_READ:
2445 {
2446 uint32_t uAndVal = pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal;
2447 uint32_t u32Val = 0;
2448
2449 rcStrict = IOMIOPortRead(pVM, pVCpu->hwaccm.s.PendingIO.s.Port.uPort,
2450 &u32Val,
2451 pVCpu->hwaccm.s.PendingIO.s.Port.cbSize);
2452 if (IOM_SUCCESS(rcStrict))
2453 {
2454 /* Write back to the EAX register. */
2455 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
2456 pCtx->rip = pVCpu->hwaccm.s.PendingIO.GCPtrRipNext;
2457 }
2458 break;
2459 }
2460
2461 case HWACCMPENDINGIO_PORT_WRITE:
2462 rcStrict = IOMIOPortWrite(pVM, pVCpu->hwaccm.s.PendingIO.s.Port.uPort,
2463 pCtx->eax & pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal,
2464 pVCpu->hwaccm.s.PendingIO.s.Port.cbSize);
2465 if (IOM_SUCCESS(rcStrict))
2466 pCtx->rip = pVCpu->hwaccm.s.PendingIO.GCPtrRipNext;
2467 break;
2468
2469 default:
2470 AssertFailed();
2471 return VERR_INTERNAL_ERROR;
2472 }
2473
2474 return rcStrict;
2475}
2476
2477/**
2478 * Inject an NMI into a running VM (only VCPU 0!)
2479 *
2480 * @returns boolean
2481 * @param pVM The VM to operate on.
2482 */
2483VMMR3DECL(int) HWACCMR3InjectNMI(PVM pVM)
2484{
2485 VMCPU_FF_SET(&pVM->aCpus[0], VMCPU_FF_INTERRUPT_NMI);
2486 return VINF_SUCCESS;
2487}
2488
2489/**
2490 * Check fatal VT-x/AMD-V error and produce some meaningful
2491 * log release message.
2492 *
2493 * @param pVM The VM to operate on.
2494 * @param iStatusCode VBox status code
2495 */
2496VMMR3DECL(void) HWACCMR3CheckError(PVM pVM, int iStatusCode)
2497{
2498 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2499 {
2500 switch(iStatusCode)
2501 {
2502 case VERR_VMX_INVALID_VMCS_FIELD:
2503 break;
2504
2505 case VERR_VMX_INVALID_VMCS_PTR:
2506 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys));
2507 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current VMCS version %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulVMCSRevision));
2508 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Entered Cpu %d\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.idEnteredCpu));
2509 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current Cpu %d\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.idCurrentCpu));
2510 break;
2511
2512 case VERR_VMX_UNABLE_TO_START_VM:
2513 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError));
2514 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulExitReason));
2515#if 0 /* @todo dump the current control fields to the release log */
2516 if (pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS)
2517 {
2518
2519 }
2520#endif
2521 break;
2522
2523 case VERR_VMX_UNABLE_TO_RESUME_VM:
2524 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError));
2525 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulExitReason));
2526 break;
2527
2528 case VERR_VMX_INVALID_VMXON_PTR:
2529 break;
2530 }
2531 }
2532}
2533
2534/**
2535 * Execute state save operation.
2536 *
2537 * @returns VBox status code.
2538 * @param pVM VM Handle.
2539 * @param pSSM SSM operation handle.
2540 */
2541static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM)
2542{
2543 int rc;
2544
2545 Log(("hwaccmR3Save:\n"));
2546
2547 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2548 {
2549 /*
2550 * Save the basic bits - fortunately all the other things can be resynced on load.
2551 */
2552 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.fPending);
2553 AssertRCReturn(rc, rc);
2554 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.errCode);
2555 AssertRCReturn(rc, rc);
2556 rc = SSMR3PutU64(pSSM, pVM->aCpus[i].hwaccm.s.Event.intInfo);
2557 AssertRCReturn(rc, rc);
2558
2559 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmLastSeenGuestMode);
2560 AssertRCReturn(rc, rc);
2561 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmCurrGuestMode);
2562 AssertRCReturn(rc, rc);
2563 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmPrevGuestMode);
2564 AssertRCReturn(rc, rc);
2565 }
2566#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
2567 rc = SSMR3PutGCPtr(pSSM, pVM->hwaccm.s.pGuestPatchMem);
2568 AssertRCReturn(rc, rc);
2569 rc = SSMR3PutGCPtr(pSSM, pVM->hwaccm.s.pFreeGuestPatchMem);
2570 AssertRCReturn(rc, rc);
2571 rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.cbGuestPatchMem);
2572 AssertRCReturn(rc, rc);
2573
2574 /* Store all the guest patch records too. */
2575 rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.cPatches);
2576 AssertRCReturn(rc, rc);
2577
2578 for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
2579 {
2580 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
2581
2582 rc = SSMR3PutU32(pSSM, pPatch->Core.Key);
2583 AssertRCReturn(rc, rc);
2584
2585 rc = SSMR3PutMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
2586 AssertRCReturn(rc, rc);
2587
2588 rc = SSMR3PutU32(pSSM, pPatch->cbOp);
2589 AssertRCReturn(rc, rc);
2590
2591 rc = SSMR3PutMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
2592 AssertRCReturn(rc, rc);
2593
2594 rc = SSMR3PutU32(pSSM, pPatch->cbNewOp);
2595 AssertRCReturn(rc, rc);
2596
2597 AssertCompileSize(HWACCMTPRINSTR, 4);
2598 rc = SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType);
2599 AssertRCReturn(rc, rc);
2600
2601 rc = SSMR3PutU32(pSSM, pPatch->uSrcOperand);
2602 AssertRCReturn(rc, rc);
2603
2604 rc = SSMR3PutU32(pSSM, pPatch->uDstOperand);
2605 AssertRCReturn(rc, rc);
2606
2607 rc = SSMR3PutU32(pSSM, pPatch->pJumpTarget);
2608 AssertRCReturn(rc, rc);
2609
2610 rc = SSMR3PutU32(pSSM, pPatch->cFaults);
2611 AssertRCReturn(rc, rc);
2612 }
2613#endif
2614 return VINF_SUCCESS;
2615}
2616
2617/**
2618 * Execute state load operation.
2619 *
2620 * @returns VBox status code.
2621 * @param pVM VM Handle.
2622 * @param pSSM SSM operation handle.
2623 * @param uVersion Data layout version.
2624 * @param uPass The data pass.
2625 */
2626static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
2627{
2628 int rc;
2629
2630 Log(("hwaccmR3Load:\n"));
2631 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
2632
2633 /*
2634 * Validate version.
2635 */
2636 if ( uVersion != HWACCM_SSM_VERSION
2637 && uVersion != HWACCM_SSM_VERSION_NO_PATCHING
2638 && uVersion != HWACCM_SSM_VERSION_2_0_X)
2639 {
2640 AssertMsgFailed(("hwaccmR3Load: Invalid version uVersion=%d!\n", uVersion));
2641 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
2642 }
2643 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2644 {
2645 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.fPending);
2646 AssertRCReturn(rc, rc);
2647 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.errCode);
2648 AssertRCReturn(rc, rc);
2649 rc = SSMR3GetU64(pSSM, &pVM->aCpus[i].hwaccm.s.Event.intInfo);
2650 AssertRCReturn(rc, rc);
2651
2652 if (uVersion >= HWACCM_SSM_VERSION_NO_PATCHING)
2653 {
2654 uint32_t val;
2655
2656 rc = SSMR3GetU32(pSSM, &val);
2657 AssertRCReturn(rc, rc);
2658 pVM->aCpus[i].hwaccm.s.vmx.enmLastSeenGuestMode = (PGMMODE)val;
2659
2660 rc = SSMR3GetU32(pSSM, &val);
2661 AssertRCReturn(rc, rc);
2662 pVM->aCpus[i].hwaccm.s.vmx.enmCurrGuestMode = (PGMMODE)val;
2663
2664 rc = SSMR3GetU32(pSSM, &val);
2665 AssertRCReturn(rc, rc);
2666 pVM->aCpus[i].hwaccm.s.vmx.enmPrevGuestMode = (PGMMODE)val;
2667 }
2668 }
2669#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
2670 if (uVersion > HWACCM_SSM_VERSION_NO_PATCHING)
2671 {
2672 rc = SSMR3GetGCPtr(pSSM, &pVM->hwaccm.s.pGuestPatchMem);
2673 AssertRCReturn(rc, rc);
2674 rc = SSMR3GetGCPtr(pSSM, &pVM->hwaccm.s.pFreeGuestPatchMem);
2675 AssertRCReturn(rc, rc);
2676 rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.cbGuestPatchMem);
2677 AssertRCReturn(rc, rc);
2678
2679 /* Fetch all TPR patch records. */
2680 rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.cPatches);
2681 AssertRCReturn(rc, rc);
2682
2683 for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
2684 {
2685 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
2686
2687 rc = SSMR3GetU32(pSSM, &pPatch->Core.Key);
2688 AssertRCReturn(rc, rc);
2689
2690 rc = SSMR3GetMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
2691 AssertRCReturn(rc, rc);
2692
2693 rc = SSMR3GetU32(pSSM, &pPatch->cbOp);
2694 AssertRCReturn(rc, rc);
2695
2696 rc = SSMR3GetMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
2697 AssertRCReturn(rc, rc);
2698
2699 rc = SSMR3GetU32(pSSM, &pPatch->cbNewOp);
2700 AssertRCReturn(rc, rc);
2701
2702 rc = SSMR3GetU32(pSSM, (uint32_t *)&pPatch->enmType);
2703 AssertRCReturn(rc, rc);
2704
2705 if (pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT)
2706 pVM->hwaccm.s.fTPRPatchingActive = true;
2707
2708 Assert(pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT || pVM->hwaccm.s.fTPRPatchingActive == false);
2709
2710 rc = SSMR3GetU32(pSSM, &pPatch->uSrcOperand);
2711 AssertRCReturn(rc, rc);
2712
2713 rc = SSMR3GetU32(pSSM, &pPatch->uDstOperand);
2714 AssertRCReturn(rc, rc);
2715
2716 rc = SSMR3GetU32(pSSM, &pPatch->cFaults);
2717 AssertRCReturn(rc, rc);
2718
2719 rc = SSMR3GetU32(pSSM, &pPatch->pJumpTarget);
2720 AssertRCReturn(rc, rc);
2721
2722 Log(("hwaccmR3Load: patch %d\n", i));
2723 Log(("Key = %x\n", pPatch->Core.Key));
2724 Log(("cbOp = %d\n", pPatch->cbOp));
2725 Log(("cbNewOp = %d\n", pPatch->cbNewOp));
2726 Log(("type = %d\n", pPatch->enmType));
2727 Log(("srcop = %d\n", pPatch->uSrcOperand));
2728 Log(("dstop = %d\n", pPatch->uDstOperand));
2729 Log(("cFaults = %d\n", pPatch->cFaults));
2730 Log(("target = %x\n", pPatch->pJumpTarget));
2731 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
2732 AssertRC(rc);
2733 }
2734 }
2735#endif
2736
2737 /* Recheck all VCPUs if we can go staight into hwaccm execution mode. */
2738 if (HWACCMIsEnabled(pVM))
2739 {
2740 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2741 {
2742 PVMCPU pVCpu = &pVM->aCpus[i];
2743
2744 HWACCMR3CanExecuteGuest(pVM, CPUMQueryGuestCtxPtr(pVCpu));
2745 }
2746 }
2747 return VINF_SUCCESS;
2748}
2749
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use