VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 100594

Last change on this file since 100594 was 100184, checked in by vboxsync, 18 months ago

VMM: Add a CPUMGetGuestArch() method and PDM device helper to make it easier to determine the guest architecture and not having to deal with the massive CPUMMICROARCH enum, bugref:10385

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 99.5 KB
Line 
1/* $Id: CPUMAllRegs.cpp 100184 2023-06-16 06:51:39Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_CPUM
33#include <VBox/vmm/cpum.h>
34#include <VBox/vmm/dbgf.h>
35#include <VBox/vmm/apic.h>
36#include <VBox/vmm/pgm.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/em.h>
39#include <VBox/vmm/nem.h>
40#include <VBox/vmm/hm.h>
41#include "CPUMInternal.h"
42#include <VBox/vmm/vmcc.h>
43#include <VBox/err.h>
44#include <VBox/dis.h>
45#include <VBox/log.h>
46#include <VBox/vmm/hm.h>
47#include <VBox/vmm/tm.h>
48#include <iprt/assert.h>
49#include <iprt/asm.h>
50#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
51# include <iprt/asm-amd64-x86.h>
52#endif
53#ifdef IN_RING3
54# include <iprt/thread.h>
55#endif
56
57/** Disable stack frame pointer generation here. */
58#if defined(_MSC_VER) && !defined(DEBUG) && defined(RT_ARCH_X86)
59# pragma optimize("y", off)
60#endif
61
62AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
63
64
65/*********************************************************************************************************************************
66* Defined Constants And Macros *
67*********************************************************************************************************************************/
68/**
69 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
70 *
71 * @returns Pointer to the Virtual CPU.
72 * @param a_pGuestCtx Pointer to the guest context.
73 */
74#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
75
76/**
77 * Lazily loads the hidden parts of a selector register when using raw-mode.
78 */
79#define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
80 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg))
81
82/** @def CPUM_INT_ASSERT_NOT_EXTRN
83 * Macro for asserting that @a a_fNotExtrn are present.
84 *
85 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
86 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
87 */
88#define CPUM_INT_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
89 AssertMsg(!((a_pVCpu)->cpum.s.Guest.fExtrn & (a_fNotExtrn)), \
90 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.s.Guest.fExtrn, (a_fNotExtrn)))
91
92
93VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
94{
95 pVCpu->cpum.s.Hyper.cr3 = cr3;
96}
97
98VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
99{
100 return pVCpu->cpum.s.Hyper.cr3;
101}
102
103
104/** @def MAYBE_LOAD_DRx
105 * Macro for updating DRx values in raw-mode and ring-0 contexts.
106 */
107#ifdef IN_RING0
108# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { a_fnLoad(a_uValue); } while (0)
109#else
110# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
111#endif
112
113VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
114{
115 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
116 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
117}
118
119
120VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
121{
122 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
123 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
124}
125
126
127VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
128{
129 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
130 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
131}
132
133
134VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
135{
136 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
137 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
138}
139
140
141VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
142{
143 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
144}
145
146
147VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
148{
149 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
150}
151
152
153VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
154{
155 return pVCpu->cpum.s.Hyper.dr[0];
156}
157
158
159VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
160{
161 return pVCpu->cpum.s.Hyper.dr[1];
162}
163
164
165VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
166{
167 return pVCpu->cpum.s.Hyper.dr[2];
168}
169
170
171VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
172{
173 return pVCpu->cpum.s.Hyper.dr[3];
174}
175
176
177VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
178{
179 return pVCpu->cpum.s.Hyper.dr[6];
180}
181
182
183VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
184{
185 return pVCpu->cpum.s.Hyper.dr[7];
186}
187
188
189/**
190 * Checks that the special cookie stored in unused reserved RFLAGS bits
191 *
192 * @retval true if cookie is ok.
193 * @retval false if cookie is not ok.
194 * @param pVM The cross context VM structure.
195 * @param pVCpu The cross context virtual CPU structure.
196 */
197VMM_INT_DECL(bool) CPUMAssertGuestRFlagsCookie(PVM pVM, PVMCPU pVCpu)
198{
199 AssertLogRelMsgReturn( ( pVCpu->cpum.s.Guest.rflags.uBoth
200 & ~(uint64_t)(CPUMX86EFLAGS_HW_MASK_64 | CPUMX86EFLAGS_INT_MASK_64))
201 == pVM->cpum.s.fReservedRFlagsCookie
202 && (pVCpu->cpum.s.Guest.rflags.uBoth & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK
203 && (pVCpu->cpum.s.Guest.rflags.uBoth & X86_EFL_RAZ_MASK & CPUMX86EFLAGS_HW_MASK_64) == 0,
204 ("rflags=%#RX64 vs fReservedRFlagsCookie=%#RX64\n",
205 pVCpu->cpum.s.Guest.rflags.uBoth, pVM->cpum.s.fReservedRFlagsCookie),
206 false);
207 return true;
208}
209
210
211/**
212 * Queries the pointer to the internal CPUMCTX structure.
213 *
214 * @returns The CPUMCTX pointer.
215 * @param pVCpu The cross context virtual CPU structure.
216 */
217VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
218{
219 return &pVCpu->cpum.s.Guest;
220}
221
222
223/**
224 * Queries the pointer to the internal CPUMCTXMSRS structure.
225 *
226 * This is for NEM only.
227 *
228 * @returns The CPUMCTX pointer.
229 * @param pVCpu The cross context virtual CPU structure.
230 */
231VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu)
232{
233 return &pVCpu->cpum.s.GuestMsrs;
234}
235
236
237VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
238{
239 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
240 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
241 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_GDTR;
242 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
243 return VINF_SUCCESS; /* formality, consider it void. */
244}
245
246
247VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
248{
249 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
250 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
251 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_IDTR;
252 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
253 return VINF_SUCCESS; /* formality, consider it void. */
254}
255
256
257VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
258{
259 pVCpu->cpum.s.Guest.tr.Sel = tr;
260 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
261 return VINF_SUCCESS; /* formality, consider it void. */
262}
263
264
265VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
266{
267 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
268 /* The caller will set more hidden bits if it has them. */
269 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
270 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
271 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
272 return VINF_SUCCESS; /* formality, consider it void. */
273}
274
275
276/**
277 * Set the guest CR0.
278 *
279 * When called in GC, the hyper CR0 may be updated if that is
280 * required. The caller only has to take special action if AM,
281 * WP, PG or PE changes.
282 *
283 * @returns VINF_SUCCESS (consider it void).
284 * @param pVCpu The cross context virtual CPU structure.
285 * @param cr0 The new CR0 value.
286 */
287VMMDECL(int) CPUMSetGuestCR0(PVMCPUCC pVCpu, uint64_t cr0)
288{
289 /*
290 * Check for changes causing TLB flushes (for REM).
291 * The caller is responsible for calling PGM when appropriate.
292 */
293 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
294 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
295 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
296 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
297
298 /*
299 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
300 */
301 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
302 PGMCr0WpEnabled(pVCpu);
303
304 /* The ET flag is settable on a 386 and hardwired on 486+. */
305 if ( !(cr0 & X86_CR0_ET)
306 && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
307 cr0 |= X86_CR0_ET;
308
309 pVCpu->cpum.s.Guest.cr0 = cr0;
310 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR0;
311 return VINF_SUCCESS;
312}
313
314
315VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
316{
317 pVCpu->cpum.s.Guest.cr2 = cr2;
318 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR2;
319 return VINF_SUCCESS;
320}
321
322
323VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
324{
325 pVCpu->cpum.s.Guest.cr3 = cr3;
326 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
327 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
328 return VINF_SUCCESS;
329}
330
331
332VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
333{
334 /* Note! We don't bother with OSXSAVE and legacy CPUID patches. */
335
336 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
337 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
338 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
339
340 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
341 pVCpu->cpum.s.Guest.cr4 = cr4;
342 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR4;
343 return VINF_SUCCESS;
344}
345
346
347VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
348{
349 pVCpu->cpum.s.Guest.eflags.u = eflags;
350 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
351 return VINF_SUCCESS;
352}
353
354
355VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
356{
357 pVCpu->cpum.s.Guest.eip = eip;
358 return VINF_SUCCESS;
359}
360
361
362VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
363{
364 pVCpu->cpum.s.Guest.eax = eax;
365 return VINF_SUCCESS;
366}
367
368
369VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
370{
371 pVCpu->cpum.s.Guest.ebx = ebx;
372 return VINF_SUCCESS;
373}
374
375
376VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
377{
378 pVCpu->cpum.s.Guest.ecx = ecx;
379 return VINF_SUCCESS;
380}
381
382
383VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
384{
385 pVCpu->cpum.s.Guest.edx = edx;
386 return VINF_SUCCESS;
387}
388
389
390VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
391{
392 pVCpu->cpum.s.Guest.esp = esp;
393 return VINF_SUCCESS;
394}
395
396
397VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
398{
399 pVCpu->cpum.s.Guest.ebp = ebp;
400 return VINF_SUCCESS;
401}
402
403
404VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
405{
406 pVCpu->cpum.s.Guest.esi = esi;
407 return VINF_SUCCESS;
408}
409
410
411VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
412{
413 pVCpu->cpum.s.Guest.edi = edi;
414 return VINF_SUCCESS;
415}
416
417
418VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
419{
420 pVCpu->cpum.s.Guest.ss.Sel = ss;
421 return VINF_SUCCESS;
422}
423
424
425VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
426{
427 pVCpu->cpum.s.Guest.cs.Sel = cs;
428 return VINF_SUCCESS;
429}
430
431
432VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
433{
434 pVCpu->cpum.s.Guest.ds.Sel = ds;
435 return VINF_SUCCESS;
436}
437
438
439VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
440{
441 pVCpu->cpum.s.Guest.es.Sel = es;
442 return VINF_SUCCESS;
443}
444
445
446VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
447{
448 pVCpu->cpum.s.Guest.fs.Sel = fs;
449 return VINF_SUCCESS;
450}
451
452
453VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
454{
455 pVCpu->cpum.s.Guest.gs.Sel = gs;
456 return VINF_SUCCESS;
457}
458
459
460VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
461{
462 pVCpu->cpum.s.Guest.msrEFER = val;
463 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_EFER;
464}
465
466
467VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PCVMCPU pVCpu, uint16_t *pcbLimit)
468{
469 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_IDTR);
470 if (pcbLimit)
471 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
472 return pVCpu->cpum.s.Guest.idtr.pIdt;
473}
474
475
476VMMDECL(RTSEL) CPUMGetGuestTR(PCVMCPU pVCpu, PCPUMSELREGHID pHidden)
477{
478 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_TR);
479 if (pHidden)
480 *pHidden = pVCpu->cpum.s.Guest.tr;
481 return pVCpu->cpum.s.Guest.tr.Sel;
482}
483
484
485VMMDECL(RTSEL) CPUMGetGuestCS(PCVMCPU pVCpu)
486{
487 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS);
488 return pVCpu->cpum.s.Guest.cs.Sel;
489}
490
491
492VMMDECL(RTSEL) CPUMGetGuestDS(PCVMCPU pVCpu)
493{
494 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DS);
495 return pVCpu->cpum.s.Guest.ds.Sel;
496}
497
498
499VMMDECL(RTSEL) CPUMGetGuestES(PCVMCPU pVCpu)
500{
501 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_ES);
502 return pVCpu->cpum.s.Guest.es.Sel;
503}
504
505
506VMMDECL(RTSEL) CPUMGetGuestFS(PCVMCPU pVCpu)
507{
508 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_FS);
509 return pVCpu->cpum.s.Guest.fs.Sel;
510}
511
512
513VMMDECL(RTSEL) CPUMGetGuestGS(PCVMCPU pVCpu)
514{
515 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GS);
516 return pVCpu->cpum.s.Guest.gs.Sel;
517}
518
519
520VMMDECL(RTSEL) CPUMGetGuestSS(PCVMCPU pVCpu)
521{
522 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SS);
523 return pVCpu->cpum.s.Guest.ss.Sel;
524}
525
526
527VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu)
528{
529 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
530 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
531 if ( !CPUMIsGuestInLongMode(pVCpu)
532 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
533 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.cs.u64Base;
534 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.cs.u64Base;
535}
536
537
538VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu)
539{
540 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
541 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
542 if ( !CPUMIsGuestInLongMode(pVCpu)
543 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
544 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.ss.u64Base;
545 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.ss.u64Base;
546}
547
548
549VMMDECL(RTSEL) CPUMGetGuestLDTR(PCVMCPU pVCpu)
550{
551 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
552 return pVCpu->cpum.s.Guest.ldtr.Sel;
553}
554
555
556VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PCVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
557{
558 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
559 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
560 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
561 return pVCpu->cpum.s.Guest.ldtr.Sel;
562}
563
564
565VMMDECL(uint64_t) CPUMGetGuestCR0(PCVMCPU pVCpu)
566{
567 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
568 return pVCpu->cpum.s.Guest.cr0;
569}
570
571
572VMMDECL(uint64_t) CPUMGetGuestCR2(PCVMCPU pVCpu)
573{
574 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
575 return pVCpu->cpum.s.Guest.cr2;
576}
577
578
579VMMDECL(uint64_t) CPUMGetGuestCR3(PCVMCPU pVCpu)
580{
581 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
582 return pVCpu->cpum.s.Guest.cr3;
583}
584
585
586VMMDECL(uint64_t) CPUMGetGuestCR4(PCVMCPU pVCpu)
587{
588 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
589 return pVCpu->cpum.s.Guest.cr4;
590}
591
592
593VMMDECL(uint64_t) CPUMGetGuestCR8(PCVMCPUCC pVCpu)
594{
595 uint64_t u64;
596 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
597 if (RT_FAILURE(rc))
598 u64 = 0;
599 return u64;
600}
601
602
603VMMDECL(void) CPUMGetGuestGDTR(PCVMCPU pVCpu, PVBOXGDTR pGDTR)
604{
605 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GDTR);
606 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
607}
608
609
610VMMDECL(uint32_t) CPUMGetGuestEIP(PCVMCPU pVCpu)
611{
612 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
613 return pVCpu->cpum.s.Guest.eip;
614}
615
616
617VMMDECL(uint64_t) CPUMGetGuestRIP(PCVMCPU pVCpu)
618{
619 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
620 return pVCpu->cpum.s.Guest.rip;
621}
622
623
624VMMDECL(uint32_t) CPUMGetGuestEAX(PCVMCPU pVCpu)
625{
626 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RAX);
627 return pVCpu->cpum.s.Guest.eax;
628}
629
630
631VMMDECL(uint32_t) CPUMGetGuestEBX(PCVMCPU pVCpu)
632{
633 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBX);
634 return pVCpu->cpum.s.Guest.ebx;
635}
636
637
638VMMDECL(uint32_t) CPUMGetGuestECX(PCVMCPU pVCpu)
639{
640 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RCX);
641 return pVCpu->cpum.s.Guest.ecx;
642}
643
644
645VMMDECL(uint32_t) CPUMGetGuestEDX(PCVMCPU pVCpu)
646{
647 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDX);
648 return pVCpu->cpum.s.Guest.edx;
649}
650
651
652VMMDECL(uint32_t) CPUMGetGuestESI(PCVMCPU pVCpu)
653{
654 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSI);
655 return pVCpu->cpum.s.Guest.esi;
656}
657
658
659VMMDECL(uint32_t) CPUMGetGuestEDI(PCVMCPU pVCpu)
660{
661 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDI);
662 return pVCpu->cpum.s.Guest.edi;
663}
664
665
666VMMDECL(uint32_t) CPUMGetGuestESP(PCVMCPU pVCpu)
667{
668 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP);
669 return pVCpu->cpum.s.Guest.esp;
670}
671
672
673VMMDECL(uint32_t) CPUMGetGuestEBP(PCVMCPU pVCpu)
674{
675 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBP);
676 return pVCpu->cpum.s.Guest.ebp;
677}
678
679
680VMMDECL(uint32_t) CPUMGetGuestEFlags(PCVMCPU pVCpu)
681{
682 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
683 return pVCpu->cpum.s.Guest.eflags.u;
684}
685
686
687VMMDECL(int) CPUMGetGuestCRx(PCVMCPUCC pVCpu, unsigned iReg, uint64_t *pValue)
688{
689 switch (iReg)
690 {
691 case DISCREG_CR0:
692 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
693 *pValue = pVCpu->cpum.s.Guest.cr0;
694 break;
695
696 case DISCREG_CR2:
697 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
698 *pValue = pVCpu->cpum.s.Guest.cr2;
699 break;
700
701 case DISCREG_CR3:
702 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
703 *pValue = pVCpu->cpum.s.Guest.cr3;
704 break;
705
706 case DISCREG_CR4:
707 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
708 *pValue = pVCpu->cpum.s.Guest.cr4;
709 break;
710
711 case DISCREG_CR8:
712 {
713 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
714 uint8_t u8Tpr;
715 int rc = APICGetTpr(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
716 if (RT_FAILURE(rc))
717 {
718 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
719 *pValue = 0;
720 return rc;
721 }
722 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0 */
723 break;
724 }
725
726 default:
727 return VERR_INVALID_PARAMETER;
728 }
729 return VINF_SUCCESS;
730}
731
732
733VMMDECL(uint64_t) CPUMGetGuestDR0(PCVMCPU pVCpu)
734{
735 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
736 return pVCpu->cpum.s.Guest.dr[0];
737}
738
739
740VMMDECL(uint64_t) CPUMGetGuestDR1(PCVMCPU pVCpu)
741{
742 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
743 return pVCpu->cpum.s.Guest.dr[1];
744}
745
746
747VMMDECL(uint64_t) CPUMGetGuestDR2(PCVMCPU pVCpu)
748{
749 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
750 return pVCpu->cpum.s.Guest.dr[2];
751}
752
753
754VMMDECL(uint64_t) CPUMGetGuestDR3(PCVMCPU pVCpu)
755{
756 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
757 return pVCpu->cpum.s.Guest.dr[3];
758}
759
760
761VMMDECL(uint64_t) CPUMGetGuestDR6(PCVMCPU pVCpu)
762{
763 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR6);
764 return pVCpu->cpum.s.Guest.dr[6];
765}
766
767
768VMMDECL(uint64_t) CPUMGetGuestDR7(PCVMCPU pVCpu)
769{
770 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR7);
771 return pVCpu->cpum.s.Guest.dr[7];
772}
773
774
775VMMDECL(int) CPUMGetGuestDRx(PCVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
776{
777 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR_MASK);
778 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
779 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
780 if (iReg == 4 || iReg == 5)
781 iReg += 2;
782 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
783 return VINF_SUCCESS;
784}
785
786
787VMMDECL(uint64_t) CPUMGetGuestEFER(PCVMCPU pVCpu)
788{
789 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
790 return pVCpu->cpum.s.Guest.msrEFER;
791}
792
793
794/**
795 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
796 *
797 * @returns Pointer to the leaf if found, NULL if not.
798 *
799 * @param pVM The cross context VM structure.
800 * @param uLeaf The leaf to get.
801 */
802PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
803{
804 unsigned iEnd = RT_MIN(pVM->cpum.s.GuestInfo.cCpuIdLeaves, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves));
805 if (iEnd)
806 {
807 unsigned iStart = 0;
808 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.aCpuIdLeaves;
809 for (;;)
810 {
811 unsigned i = iStart + (iEnd - iStart) / 2U;
812 if (uLeaf < paLeaves[i].uLeaf)
813 {
814 if (i <= iStart)
815 return NULL;
816 iEnd = i;
817 }
818 else if (uLeaf > paLeaves[i].uLeaf)
819 {
820 i += 1;
821 if (i >= iEnd)
822 return NULL;
823 iStart = i;
824 }
825 else
826 {
827 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
828 return &paLeaves[i];
829
830 /* This shouldn't normally happen. But in case the it does due
831 to user configuration overrids or something, just return the
832 first sub-leaf. */
833 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
834 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
835 while ( paLeaves[i].uSubLeaf != 0
836 && i > 0
837 && uLeaf == paLeaves[i - 1].uLeaf)
838 i--;
839 return &paLeaves[i];
840 }
841 }
842 }
843
844 return NULL;
845}
846
847
848/**
849 * Looks up a CPUID leaf in the CPUID leaf array.
850 *
851 * @returns Pointer to the leaf if found, NULL if not.
852 *
853 * @param pVM The cross context VM structure.
854 * @param uLeaf The leaf to get.
855 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
856 * isn't.
857 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
858 */
859PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
860{
861 unsigned iEnd = RT_MIN(pVM->cpum.s.GuestInfo.cCpuIdLeaves, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves));
862 if (iEnd)
863 {
864 unsigned iStart = 0;
865 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.aCpuIdLeaves;
866 for (;;)
867 {
868 unsigned i = iStart + (iEnd - iStart) / 2U;
869 if (uLeaf < paLeaves[i].uLeaf)
870 {
871 if (i <= iStart)
872 return NULL;
873 iEnd = i;
874 }
875 else if (uLeaf > paLeaves[i].uLeaf)
876 {
877 i += 1;
878 if (i >= iEnd)
879 return NULL;
880 iStart = i;
881 }
882 else
883 {
884 uSubLeaf &= paLeaves[i].fSubLeafMask;
885 if (uSubLeaf == paLeaves[i].uSubLeaf)
886 *pfExactSubLeafHit = true;
887 else
888 {
889 /* Find the right subleaf. We return the last one before
890 uSubLeaf if we don't find an exact match. */
891 if (uSubLeaf < paLeaves[i].uSubLeaf)
892 while ( i > 0
893 && uLeaf == paLeaves[i - 1].uLeaf
894 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
895 i--;
896 else
897 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
898 && uLeaf == paLeaves[i + 1].uLeaf
899 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
900 i++;
901 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
902 }
903 return &paLeaves[i];
904 }
905 }
906 }
907
908 *pfExactSubLeafHit = false;
909 return NULL;
910}
911
912
913/**
914 * Gets a CPUID leaf.
915 *
916 * @param pVCpu The cross context virtual CPU structure.
917 * @param uLeaf The CPUID leaf to get.
918 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
919 * @param f64BitMode A tristate indicate if the caller is in 64-bit mode or
920 * not: 1=true, 0=false, 1=whatever. This affect how the
921 * X86_CPUID_EXT_FEATURE_EDX_SYSCALL flag is returned on
922 * Intel CPUs, where it's only returned in 64-bit mode.
923 * @param pEax Where to store the EAX value.
924 * @param pEbx Where to store the EBX value.
925 * @param pEcx Where to store the ECX value.
926 * @param pEdx Where to store the EDX value.
927 */
928VMMDECL(void) CPUMGetGuestCpuId(PVMCPUCC pVCpu, uint32_t uLeaf, uint32_t uSubLeaf, int f64BitMode,
929 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
930{
931 bool fExactSubLeafHit;
932 PVM pVM = pVCpu->CTX_SUFF(pVM);
933 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
934 if (pLeaf)
935 {
936 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
937 if (fExactSubLeafHit)
938 {
939 *pEax = pLeaf->uEax;
940 *pEbx = pLeaf->uEbx;
941 *pEcx = pLeaf->uEcx;
942 *pEdx = pLeaf->uEdx;
943
944 /*
945 * Deal with CPU specific information.
946 */
947 if (pLeaf->fFlags & ( CPUMCPUIDLEAF_F_CONTAINS_APIC_ID
948 | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
949 | CPUMCPUIDLEAF_F_CONTAINS_APIC ))
950 {
951 if (uLeaf == 1)
952 {
953 /* EBX: Bits 31-24: Initial APIC ID. */
954 Assert(pVCpu->idCpu <= 255);
955 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
956 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
957
958 /* EDX: Bit 9: AND with APICBASE.EN. */
959 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
960 *pEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
961
962 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
963 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
964 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
965 }
966 else if (uLeaf == 0xb)
967 {
968 /* EDX: Initial extended APIC ID. */
969 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
970 *pEdx = pVCpu->idCpu;
971 Assert(!(pLeaf->fFlags & ~(CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)));
972 }
973 else if (uLeaf == UINT32_C(0x8000001e))
974 {
975 /* EAX: Initial extended APIC ID. */
976 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
977 *pEax = pVCpu->idCpu;
978 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC_ID));
979 }
980 else if (uLeaf == UINT32_C(0x80000001))
981 {
982 /* EDX: Bit 9: AND with APICBASE.EN. */
983 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible)
984 *pEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
985 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC));
986 }
987 else
988 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
989 }
990
991 /* Intel CPUs supresses the SYSCALL bit when not executing in 64-bit mode: */
992 if ( uLeaf == UINT32_C(0x80000001)
993 && f64BitMode == false
994 && (*pEdx & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
995 && ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL
996 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_VIA /*?*/
997 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_SHANGHAI /*?*/ ) )
998 *pEdx &= ~X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
999
1000 }
1001 /*
1002 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
1003 * them here, but we do the best we can here...
1004 */
1005 else
1006 {
1007 *pEax = *pEbx = *pEcx = *pEdx = 0;
1008 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
1009 {
1010 *pEcx = uSubLeaf & 0xff;
1011 *pEdx = pVCpu->idCpu;
1012 }
1013 }
1014 }
1015 else
1016 {
1017 /*
1018 * Different CPUs have different ways of dealing with unknown CPUID leaves.
1019 */
1020 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
1021 {
1022 default:
1023 AssertFailed();
1024 RT_FALL_THRU();
1025 case CPUMUNKNOWNCPUID_DEFAULTS:
1026 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1027 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1028 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1029 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1030 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1031 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
1032 break;
1033 case CPUMUNKNOWNCPUID_PASSTHRU:
1034 *pEax = uLeaf;
1035 *pEbx = 0;
1036 *pEcx = uSubLeaf;
1037 *pEdx = 0;
1038 break;
1039 }
1040 }
1041 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1042}
1043
1044
1045/**
1046 * Sets the visibility of the X86_CPUID_FEATURE_EDX_APIC and
1047 * X86_CPUID_AMD_FEATURE_EDX_APIC CPUID bits.
1048 *
1049 * @returns Previous value.
1050 * @param pVCpu The cross context virtual CPU structure to make the
1051 * change on. Usually the calling EMT.
1052 * @param fVisible Whether to make it visible (true) or hide it (false).
1053 *
1054 * @remarks This is "VMMDECL" so that it still links with
1055 * the old APIC code which is in VBoxDD2 and not in
1056 * the VMM module.
1057 */
1058VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible)
1059{
1060 bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible;
1061 pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible;
1062 return fOld;
1063}
1064
1065
1066/**
1067 * Gets the host CPU vendor.
1068 *
1069 * @returns CPU vendor.
1070 * @param pVM The cross context VM structure.
1071 */
1072VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1073{
1074 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1075}
1076
1077
1078/**
1079 * Gets the host CPU microarchitecture.
1080 *
1081 * @returns CPU microarchitecture.
1082 * @param pVM The cross context VM structure.
1083 */
1084VMMDECL(CPUMMICROARCH) CPUMGetHostMicroarch(PCVM pVM)
1085{
1086 return pVM->cpum.s.HostFeatures.enmMicroarch;
1087}
1088
1089
1090/**
1091 * Gets the guest CPU vendor.
1092 *
1093 * @returns CPU vendor.
1094 * @param pVM The cross context VM structure.
1095 */
1096VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1097{
1098 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1099}
1100
1101
1102/**
1103 * Gets the guest CPU architecture.
1104 *
1105 * @returns CPU architecture.
1106 * @param pVM The cross context VM structure.
1107 */
1108VMMDECL(CPUMARCH) CPUMGetGuestArch(PCVM pVM)
1109{
1110 RT_NOREF(pVM);
1111 return kCpumArch_X86; /* Static as we are in the x86 VMM module here. */
1112}
1113
1114
1115/**
1116 * Gets the guest CPU microarchitecture.
1117 *
1118 * @returns CPU microarchitecture.
1119 * @param pVM The cross context VM structure.
1120 */
1121VMMDECL(CPUMMICROARCH) CPUMGetGuestMicroarch(PCVM pVM)
1122{
1123 return pVM->cpum.s.GuestFeatures.enmMicroarch;
1124}
1125
1126
1127/**
1128 * Gets the maximum number of physical and linear address bits supported by the
1129 * guest.
1130 *
1131 * @param pVM The cross context VM structure.
1132 * @param pcPhysAddrWidth Where to store the physical address width.
1133 * @param pcLinearAddrWidth Where to store the linear address width.
1134 */
1135VMMDECL(void) CPUMGetGuestAddrWidths(PCVM pVM, uint8_t *pcPhysAddrWidth, uint8_t *pcLinearAddrWidth)
1136{
1137 AssertPtr(pVM);
1138 AssertReturnVoid(pcPhysAddrWidth);
1139 AssertReturnVoid(pcLinearAddrWidth);
1140 *pcPhysAddrWidth = pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth;
1141 *pcLinearAddrWidth = pVM->cpum.s.GuestFeatures.cMaxLinearAddrWidth;
1142}
1143
1144
1145VMMDECL(int) CPUMSetGuestDR0(PVMCPUCC pVCpu, uint64_t uDr0)
1146{
1147 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1148 return CPUMRecalcHyperDRx(pVCpu, 0);
1149}
1150
1151
1152VMMDECL(int) CPUMSetGuestDR1(PVMCPUCC pVCpu, uint64_t uDr1)
1153{
1154 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1155 return CPUMRecalcHyperDRx(pVCpu, 1);
1156}
1157
1158
1159VMMDECL(int) CPUMSetGuestDR2(PVMCPUCC pVCpu, uint64_t uDr2)
1160{
1161 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1162 return CPUMRecalcHyperDRx(pVCpu, 2);
1163}
1164
1165
1166VMMDECL(int) CPUMSetGuestDR3(PVMCPUCC pVCpu, uint64_t uDr3)
1167{
1168 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1169 return CPUMRecalcHyperDRx(pVCpu, 3);
1170}
1171
1172
1173VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1174{
1175 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1176 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR6;
1177 return VINF_SUCCESS; /* No need to recalc. */
1178}
1179
1180
1181VMMDECL(int) CPUMSetGuestDR7(PVMCPUCC pVCpu, uint64_t uDr7)
1182{
1183 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1184 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR7;
1185 return CPUMRecalcHyperDRx(pVCpu, 7);
1186}
1187
1188
1189VMMDECL(int) CPUMSetGuestDRx(PVMCPUCC pVCpu, uint32_t iReg, uint64_t Value)
1190{
1191 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1192 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1193 if (iReg == 4 || iReg == 5)
1194 iReg += 2;
1195 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1196 return CPUMRecalcHyperDRx(pVCpu, iReg);
1197}
1198
1199
1200/**
1201 * Recalculates the hypervisor DRx register values based on current guest
1202 * registers and DBGF breakpoints, updating changed registers depending on the
1203 * context.
1204 *
1205 * This is called whenever a guest DRx register is modified (any context) and
1206 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1207 *
1208 * In raw-mode context this function will reload any (hyper) DRx registers which
1209 * comes out with a different value. It may also have to save the host debug
1210 * registers if that haven't been done already. In this context though, we'll
1211 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1212 * are only important when breakpoints are actually enabled.
1213 *
1214 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1215 * reloaded by the HM code if it changes. Further more, we will only use the
1216 * combined register set when the VBox debugger is actually using hardware BPs,
1217 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1218 * concern us here).
1219 *
1220 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1221 * all the time.
1222 *
1223 * @returns VINF_SUCCESS.
1224 * @param pVCpu The cross context virtual CPU structure.
1225 * @param iGstReg The guest debug register number that was modified.
1226 * UINT8_MAX if not guest register.
1227 */
1228VMMDECL(int) CPUMRecalcHyperDRx(PVMCPUCC pVCpu, uint8_t iGstReg)
1229{
1230 PVM pVM = pVCpu->CTX_SUFF(pVM);
1231#ifndef IN_RING0
1232 RT_NOREF_PV(iGstReg);
1233#endif
1234
1235 /*
1236 * Compare the DR7s first.
1237 *
1238 * We only care about the enabled flags. GD is virtualized when we
1239 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1240 * always have the LE and GE bits set, so no need to check and disable
1241 * stuff if they're cleared like we have to for the guest DR7.
1242 */
1243 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1244 /** @todo This isn't correct. BPs work without setting LE and GE under AMD-V. They are also documented as unsupported by P6+. */
1245 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1246 uGstDr7 = 0;
1247 else if (!(uGstDr7 & X86_DR7_LE))
1248 uGstDr7 &= ~X86_DR7_LE_ALL;
1249 else if (!(uGstDr7 & X86_DR7_GE))
1250 uGstDr7 &= ~X86_DR7_GE_ALL;
1251
1252 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1253 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
1254 {
1255 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1256
1257 /*
1258 * Ok, something is enabled. Recalc each of the breakpoints, taking
1259 * the VM debugger ones of the guest ones. In raw-mode context we will
1260 * not allow breakpoints with values inside the hypervisor area.
1261 */
1262 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1263
1264 /* bp 0 */
1265 RTGCUINTREG uNewDr0;
1266 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1267 {
1268 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1269 uNewDr0 = DBGFBpGetDR0(pVM);
1270 }
1271 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1272 {
1273 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1274 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1275 }
1276 else
1277 uNewDr0 = 0;
1278
1279 /* bp 1 */
1280 RTGCUINTREG uNewDr1;
1281 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1282 {
1283 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1284 uNewDr1 = DBGFBpGetDR1(pVM);
1285 }
1286 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1287 {
1288 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1289 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1290 }
1291 else
1292 uNewDr1 = 0;
1293
1294 /* bp 2 */
1295 RTGCUINTREG uNewDr2;
1296 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1297 {
1298 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1299 uNewDr2 = DBGFBpGetDR2(pVM);
1300 }
1301 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1302 {
1303 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1304 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1305 }
1306 else
1307 uNewDr2 = 0;
1308
1309 /* bp 3 */
1310 RTGCUINTREG uNewDr3;
1311 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1312 {
1313 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1314 uNewDr3 = DBGFBpGetDR3(pVM);
1315 }
1316 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1317 {
1318 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1319 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1320 }
1321 else
1322 uNewDr3 = 0;
1323
1324 /*
1325 * Apply the updates.
1326 */
1327 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
1328 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1329 CPUMSetHyperDR3(pVCpu, uNewDr3);
1330 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1331 CPUMSetHyperDR2(pVCpu, uNewDr2);
1332 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1333 CPUMSetHyperDR1(pVCpu, uNewDr1);
1334 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1335 CPUMSetHyperDR0(pVCpu, uNewDr0);
1336 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1337 CPUMSetHyperDR7(pVCpu, uNewDr7);
1338 }
1339#ifdef IN_RING0
1340 else if (CPUMIsGuestDebugStateActive(pVCpu))
1341 {
1342 /*
1343 * Reload the register that was modified. Normally this won't happen
1344 * as we won't intercept DRx writes when not having the hyper debug
1345 * state loaded, but in case we do for some reason we'll simply deal
1346 * with it.
1347 */
1348 switch (iGstReg)
1349 {
1350 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
1351 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
1352 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
1353 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
1354 default:
1355 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
1356 }
1357 }
1358#endif
1359 else
1360 {
1361 /*
1362 * No active debug state any more. In raw-mode this means we have to
1363 * make sure DR7 has everything disabled now, if we armed it already.
1364 * In ring-0 we might end up here when just single stepping.
1365 */
1366#ifdef IN_RING0
1367 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
1368 {
1369 if (pVCpu->cpum.s.Hyper.dr[0])
1370 ASMSetDR0(0);
1371 if (pVCpu->cpum.s.Hyper.dr[1])
1372 ASMSetDR1(0);
1373 if (pVCpu->cpum.s.Hyper.dr[2])
1374 ASMSetDR2(0);
1375 if (pVCpu->cpum.s.Hyper.dr[3])
1376 ASMSetDR3(0);
1377 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
1378 }
1379#endif
1380 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
1381
1382 /* Clear all the registers. */
1383 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
1384 pVCpu->cpum.s.Hyper.dr[3] = 0;
1385 pVCpu->cpum.s.Hyper.dr[2] = 0;
1386 pVCpu->cpum.s.Hyper.dr[1] = 0;
1387 pVCpu->cpum.s.Hyper.dr[0] = 0;
1388
1389 }
1390 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1391 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1392 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1393 pVCpu->cpum.s.Hyper.dr[7]));
1394
1395 return VINF_SUCCESS;
1396}
1397
1398
1399/**
1400 * Set the guest XCR0 register.
1401 *
1402 * Will load additional state if the FPU state is already loaded (in ring-0 &
1403 * raw-mode context).
1404 *
1405 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
1406 * value.
1407 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1408 * @param uNewValue The new value.
1409 * @thread EMT(pVCpu)
1410 */
1411VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPUCC pVCpu, uint64_t uNewValue)
1412{
1413 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_XCRx);
1414 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
1415 /* The X87 bit cannot be cleared. */
1416 && (uNewValue & XSAVE_C_X87)
1417 /* AVX requires SSE. */
1418 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
1419 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
1420 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
1421 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
1422 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
1423 )
1424 {
1425 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
1426
1427 /* If more state components are enabled, we need to take care to load
1428 them if the FPU/SSE state is already loaded. May otherwise leak
1429 host state to the guest. */
1430 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
1431 if (fNewComponents)
1432 {
1433#ifdef IN_RING0
1434 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
1435 {
1436 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
1437 /* Adding more components. */
1438 ASMXRstor(&pVCpu->cpum.s.Guest.XState, fNewComponents);
1439 else
1440 {
1441 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
1442 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
1443 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
1444 ASMXRstor(&pVCpu->cpum.s.Guest.XState, uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
1445 }
1446 }
1447#endif
1448 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
1449 }
1450 return VINF_SUCCESS;
1451 }
1452 return VERR_CPUM_RAISE_GP_0;
1453}
1454
1455
1456/**
1457 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1458 *
1459 * @returns true if in real mode, otherwise false.
1460 * @param pVCpu The cross context virtual CPU structure.
1461 */
1462VMMDECL(bool) CPUMIsGuestNXEnabled(PCVMCPU pVCpu)
1463{
1464 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1465 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1466}
1467
1468
1469/**
1470 * Tests if the guest has the Page Size Extension enabled (PSE).
1471 *
1472 * @returns true if in real mode, otherwise false.
1473 * @param pVCpu The cross context virtual CPU structure.
1474 */
1475VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu)
1476{
1477 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
1478 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1479 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1480}
1481
1482
1483/**
1484 * Tests if the guest has the paging enabled (PG).
1485 *
1486 * @returns true if in real mode, otherwise false.
1487 * @param pVCpu The cross context virtual CPU structure.
1488 */
1489VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu)
1490{
1491 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1492 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1493}
1494
1495
1496/**
1497 * Tests if the guest has the paging enabled (PG).
1498 *
1499 * @returns true if in real mode, otherwise false.
1500 * @param pVCpu The cross context virtual CPU structure.
1501 */
1502VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PCVMCPU pVCpu)
1503{
1504 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1505 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1506}
1507
1508
1509/**
1510 * Tests if the guest is running in real mode or not.
1511 *
1512 * @returns true if in real mode, otherwise false.
1513 * @param pVCpu The cross context virtual CPU structure.
1514 */
1515VMMDECL(bool) CPUMIsGuestInRealMode(PCVMCPU pVCpu)
1516{
1517 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1518 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1519}
1520
1521
1522/**
1523 * Tests if the guest is running in real or virtual 8086 mode.
1524 *
1525 * @returns @c true if it is, @c false if not.
1526 * @param pVCpu The cross context virtual CPU structure.
1527 */
1528VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PCVMCPU pVCpu)
1529{
1530 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS);
1531 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1532 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
1533}
1534
1535
1536/**
1537 * Tests if the guest is running in protected or not.
1538 *
1539 * @returns true if in protected mode, otherwise false.
1540 * @param pVCpu The cross context virtual CPU structure.
1541 */
1542VMMDECL(bool) CPUMIsGuestInProtectedMode(PCVMCPU pVCpu)
1543{
1544 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1545 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1546}
1547
1548
1549/**
1550 * Tests if the guest is running in paged protected or not.
1551 *
1552 * @returns true if in paged protected mode, otherwise false.
1553 * @param pVCpu The cross context virtual CPU structure.
1554 */
1555VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PCVMCPU pVCpu)
1556{
1557 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
1558 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1559}
1560
1561
1562/**
1563 * Tests if the guest is running in long mode or not.
1564 *
1565 * @returns true if in long mode, otherwise false.
1566 * @param pVCpu The cross context virtual CPU structure.
1567 */
1568VMMDECL(bool) CPUMIsGuestInLongMode(PCVMCPU pVCpu)
1569{
1570 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
1571 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1572}
1573
1574
1575/**
1576 * Tests if the guest is running in PAE mode or not.
1577 *
1578 * @returns true if in PAE mode, otherwise false.
1579 * @param pVCpu The cross context virtual CPU structure.
1580 */
1581VMMDECL(bool) CPUMIsGuestInPAEMode(PCVMCPU pVCpu)
1582{
1583 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
1584 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1585 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1586 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
1587 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
1588 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
1589}
1590
1591
1592/**
1593 * Tests if the guest is running in 64 bits mode or not.
1594 *
1595 * @returns true if in 64 bits protected mode, otherwise false.
1596 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1597 */
1598VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
1599{
1600 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
1601 if (!CPUMIsGuestInLongMode(pVCpu))
1602 return false;
1603 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1604 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
1605}
1606
1607
1608/**
1609 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
1610 * registers.
1611 *
1612 * @returns true if in 64 bits protected mode, otherwise false.
1613 * @param pCtx Pointer to the current guest CPU context.
1614 */
1615VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
1616{
1617 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
1618}
1619
1620
1621/**
1622 * Sets the specified changed flags (CPUM_CHANGED_*).
1623 *
1624 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1625 * @param fChangedAdd The changed flags to add.
1626 */
1627VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
1628{
1629 pVCpu->cpum.s.fChanged |= fChangedAdd;
1630}
1631
1632
1633/**
1634 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
1635 *
1636 * @returns true if supported.
1637 * @returns false if not supported.
1638 * @param pVM The cross context VM structure.
1639 */
1640VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
1641{
1642 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
1643}
1644
1645
1646/**
1647 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
1648 * @returns true if used.
1649 * @returns false if not used.
1650 * @param pVM The cross context VM structure.
1651 */
1652VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
1653{
1654 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
1655}
1656
1657
1658/**
1659 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
1660 * @returns true if used.
1661 * @returns false if not used.
1662 * @param pVM The cross context VM structure.
1663 */
1664VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
1665{
1666 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
1667}
1668
1669
1670/**
1671 * Checks if we activated the FPU/XMM state of the guest OS.
1672 *
1673 * Obsolete: This differs from CPUMIsGuestFPUStateLoaded() in that it refers to
1674 * the next time we'll be executing guest code, so it may return true for
1675 * 64-on-32 when we still haven't actually loaded the FPU status, just scheduled
1676 * it to be loaded the next time we go thru the world switcher
1677 * (CPUM_SYNC_FPU_STATE).
1678 *
1679 * @returns true / false.
1680 * @param pVCpu The cross context virtual CPU structure.
1681 */
1682VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
1683{
1684 bool fRet = RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
1685 AssertMsg(fRet == pVCpu->cpum.s.Guest.fUsedFpuGuest, ("fRet=%d\n", fRet));
1686 return fRet;
1687}
1688
1689
1690/**
1691 * Checks if we've really loaded the FPU/XMM state of the guest OS.
1692 *
1693 * @returns true / false.
1694 * @param pVCpu The cross context virtual CPU structure.
1695 */
1696VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu)
1697{
1698 bool fRet = RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
1699 AssertMsg(fRet == pVCpu->cpum.s.Guest.fUsedFpuGuest, ("fRet=%d\n", fRet));
1700 return fRet;
1701}
1702
1703
1704/**
1705 * Checks if we saved the FPU/XMM state of the host OS.
1706 *
1707 * @returns true / false.
1708 * @param pVCpu The cross context virtual CPU structure.
1709 */
1710VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu)
1711{
1712 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST);
1713}
1714
1715
1716/**
1717 * Checks if the guest debug state is active.
1718 *
1719 * @returns boolean
1720 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1721 */
1722VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
1723{
1724 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
1725}
1726
1727
1728/**
1729 * Checks if the hyper debug state is active.
1730 *
1731 * @returns boolean
1732 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1733 */
1734VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
1735{
1736 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
1737}
1738
1739
1740/**
1741 * Mark the guest's debug state as inactive.
1742 *
1743 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1744 * @todo This API doesn't make sense any more.
1745 */
1746VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
1747{
1748 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
1749 NOREF(pVCpu);
1750}
1751
1752
1753/**
1754 * Get the current privilege level of the guest.
1755 *
1756 * @returns CPL
1757 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1758 */
1759VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
1760{
1761 /*
1762 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
1763 *
1764 * Note! We used to check CS.DPL here, assuming it was always equal to
1765 * CPL even if a conforming segment was loaded. But this turned out to
1766 * only apply to older AMD-V. With VT-x we had an ACP2 regression
1767 * during install after a far call to ring 2 with VT-x. Then on newer
1768 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
1769 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
1770 *
1771 * So, forget CS.DPL, always use SS.DPL.
1772 *
1773 * Note! The SS RPL is always equal to the CPL, while the CS RPL
1774 * isn't necessarily equal if the segment is conforming.
1775 * See section 4.11.1 in the AMD manual.
1776 *
1777 * Update: Where the heck does it say CS.RPL can differ from CPL other than
1778 * right after real->prot mode switch and when in V8086 mode? That
1779 * section says the RPL specified in a direct transfere (call, jmp,
1780 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
1781 * it would be impossible for an exception handle or the iret
1782 * instruction to figure out whether SS:ESP are part of the frame
1783 * or not. VBox or qemu bug must've lead to this misconception.
1784 *
1785 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
1786 * selector into SS with an RPL other than the CPL when CPL != 3 and
1787 * we're in 64-bit mode. The intel dev box doesn't allow this, on
1788 * RPL = CPL. Weird.
1789 */
1790 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
1791 uint32_t uCpl;
1792 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1793 {
1794 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1795 {
1796 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
1797 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
1798 else
1799 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
1800 }
1801 else
1802 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
1803 }
1804 else
1805 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
1806 return uCpl;
1807}
1808
1809
1810/**
1811 * Gets the current guest CPU mode.
1812 *
1813 * If paging mode is what you need, check out PGMGetGuestMode().
1814 *
1815 * @returns The CPU mode.
1816 * @param pVCpu The cross context virtual CPU structure.
1817 */
1818VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
1819{
1820 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
1821 CPUMMODE enmMode;
1822 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1823 enmMode = CPUMMODE_REAL;
1824 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1825 enmMode = CPUMMODE_PROTECTED;
1826 else
1827 enmMode = CPUMMODE_LONG;
1828
1829 return enmMode;
1830}
1831
1832
1833/**
1834 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
1835 *
1836 * @returns 16, 32 or 64.
1837 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1838 */
1839VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
1840{
1841 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
1842
1843 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1844 return 16;
1845
1846 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1847 {
1848 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
1849 return 16;
1850 }
1851
1852 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1853 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
1854 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1855 return 64;
1856
1857 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
1858 return 32;
1859
1860 return 16;
1861}
1862
1863
1864VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
1865{
1866 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
1867
1868 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1869 return DISCPUMODE_16BIT;
1870
1871 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1872 {
1873 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
1874 return DISCPUMODE_16BIT;
1875 }
1876
1877 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1878 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
1879 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1880 return DISCPUMODE_64BIT;
1881
1882 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
1883 return DISCPUMODE_32BIT;
1884
1885 return DISCPUMODE_16BIT;
1886}
1887
1888
1889/**
1890 * Gets the guest MXCSR_MASK value.
1891 *
1892 * This does not access the x87 state, but the value we determined at VM
1893 * initialization.
1894 *
1895 * @returns MXCSR mask.
1896 * @param pVM The cross context VM structure.
1897 */
1898VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM)
1899{
1900 return pVM->cpum.s.GuestInfo.fMxCsrMask;
1901}
1902
1903
1904/**
1905 * Returns whether the guest has physical interrupts enabled.
1906 *
1907 * @returns @c true if interrupts are enabled, @c false otherwise.
1908 * @param pVCpu The cross context virtual CPU structure.
1909 *
1910 * @remarks Warning! This function does -not- take into account the global-interrupt
1911 * flag (GIF).
1912 */
1913VMM_INT_DECL(bool) CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu)
1914{
1915 switch (CPUMGetGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest))
1916 {
1917 case CPUMHWVIRT_NONE:
1918 default:
1919 return pVCpu->cpum.s.Guest.eflags.Bits.u1IF;
1920 case CPUMHWVIRT_VMX:
1921 return CPUMIsGuestVmxPhysIntrEnabled(&pVCpu->cpum.s.Guest);
1922 case CPUMHWVIRT_SVM:
1923 return CPUMIsGuestSvmPhysIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
1924 }
1925}
1926
1927
1928/**
1929 * Returns whether the nested-guest has virtual interrupts enabled.
1930 *
1931 * @returns @c true if interrupts are enabled, @c false otherwise.
1932 * @param pVCpu The cross context virtual CPU structure.
1933 *
1934 * @remarks Warning! This function does -not- take into account the global-interrupt
1935 * flag (GIF).
1936 */
1937VMM_INT_DECL(bool) CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu)
1938{
1939 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
1940 Assert(CPUMIsGuestInNestedHwvirtMode(pCtx));
1941
1942 if (CPUMIsGuestInVmxNonRootMode(pCtx))
1943 return CPUMIsGuestVmxVirtIntrEnabled(pCtx);
1944
1945 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
1946 return CPUMIsGuestSvmVirtIntrEnabled(pVCpu, pCtx);
1947}
1948
1949
1950/**
1951 * Calculates the interruptiblity of the guest.
1952 *
1953 * @returns Interruptibility level.
1954 * @param pVCpu The cross context virtual CPU structure.
1955 */
1956VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu)
1957{
1958#if 1
1959 /* Global-interrupt flag blocks pretty much everything we care about here. */
1960 if (CPUMGetGuestGif(&pVCpu->cpum.s.Guest))
1961 {
1962 /*
1963 * Physical interrupts are primarily blocked using EFLAGS. However, we cannot access
1964 * it directly here. If and how EFLAGS are used depends on the context (nested-guest
1965 * or raw-mode). Hence we use the function below which handles the details.
1966 */
1967 if ( !(pVCpu->cpum.s.Guest.eflags.uBoth & CPUMCTX_INHIBIT_ALL_MASK)
1968 || ( !(pVCpu->cpum.s.Guest.eflags.uBoth & CPUMCTX_INHIBIT_NMI)
1969 && pVCpu->cpum.s.Guest.uRipInhibitInt != pVCpu->cpum.s.Guest.rip))
1970 {
1971 /** @todo OPT: this next call should be inlined! */
1972 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
1973 {
1974 /** @todo OPT: type this out as it repeats tests. */
1975 if ( !CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest)
1976 || CPUMIsGuestVirtIntrEnabled(pVCpu))
1977 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
1978
1979 /* Physical interrupts are enabled, but nested-guest virtual interrupts are disabled. */
1980 return CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED;
1981 }
1982 return CPUMINTERRUPTIBILITY_INT_DISABLED;
1983 }
1984
1985 /*
1986 * Blocking the delivery of NMIs during an interrupt shadow is CPU implementation
1987 * specific. Therefore, in practice, we can't deliver an NMI in an interrupt shadow.
1988 * However, there is some uncertainity regarding the converse, i.e. whether
1989 * NMI-blocking until IRET blocks delivery of physical interrupts.
1990 *
1991 * See Intel spec. 25.4.1 "Event Blocking".
1992 */
1993 /** @todo r=bird: The above comment mixes up VMX root-mode and non-root. Section
1994 * 25.4.1 is only applicable to VMX non-root mode. In root mode /
1995 * non-VMX mode, I have not see any evidence in the intel manuals that
1996 * NMIs are not blocked when in an interrupt shadow. Section "6.7
1997 * NONMASKABLE INTERRUPT (NMI)" in SDM 3A seems pretty clear to me.
1998 */
1999 if (!(pVCpu->cpum.s.Guest.eflags.uBoth & CPUMCTX_INHIBIT_NMI))
2000 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
2001 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2002 }
2003 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
2004#else
2005 if (pVCpu->cpum.s.Guest.rflags.Bits.u1IF)
2006 {
2007 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
2008 {
2009 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
2010 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
2011
2012 /** @todo does blocking NMIs mean interrupts are also inhibited? */
2013 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2014 {
2015 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2016 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
2017 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2018 }
2019 AssertFailed();
2020 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2021 }
2022 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
2023 }
2024 else
2025 {
2026 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
2027 {
2028 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2029 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2030 return CPUMINTERRUPTIBILITY_INT_DISABLED;
2031 }
2032 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
2033 }
2034#endif
2035}
2036
2037
2038/**
2039 * Checks whether the SVM nested-guest has physical interrupts enabled.
2040 *
2041 * @returns true if interrupts are enabled, false otherwise.
2042 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2043 * @param pCtx The guest-CPU context.
2044 *
2045 * @remarks This does -not- take into account the global-interrupt flag.
2046 */
2047VMM_INT_DECL(bool) CPUMIsGuestSvmPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2048{
2049 /** @todo Optimization: Avoid this function call and use a pointer to the
2050 * relevant eflags instead (setup during VMRUN instruction emulation). */
2051 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2052
2053 X86EFLAGS fEFlags;
2054 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
2055 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
2056 else
2057 fEFlags.u = pCtx->eflags.u;
2058
2059 return fEFlags.Bits.u1IF;
2060}
2061
2062
2063/**
2064 * Checks whether the SVM nested-guest is in a state to receive virtual (setup
2065 * for injection by VMRUN instruction) interrupts.
2066 *
2067 * @returns VBox status code.
2068 * @retval true if it's ready, false otherwise.
2069 *
2070 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2071 * @param pCtx The guest-CPU context.
2072 */
2073VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2074{
2075 RT_NOREF(pVCpu);
2076 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2077
2078 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.Vmcb.ctrl;
2079 PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
2080 Assert(!pVmcbIntCtrl->n.u1VGifEnable); /* We don't support passing virtual-GIF feature to the guest yet. */
2081 if ( !pVmcbIntCtrl->n.u1IgnoreTPR
2082 && pVmcbIntCtrl->n.u4VIntrPrio <= pVmcbIntCtrl->n.u8VTPR)
2083 return false;
2084
2085 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2086}
2087
2088
2089/**
2090 * Gets the pending SVM nested-guest interruptvector.
2091 *
2092 * @returns The nested-guest interrupt to inject.
2093 * @param pCtx The guest-CPU context.
2094 */
2095VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx)
2096{
2097 return pCtx->hwvirt.svm.Vmcb.ctrl.IntCtrl.n.u8VIntrVector;
2098}
2099
2100
2101/**
2102 * Restores the host-state from the host-state save area as part of a \#VMEXIT.
2103 *
2104 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2105 * @param pCtx The guest-CPU context.
2106 */
2107VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPUCC pVCpu, PCPUMCTX pCtx)
2108{
2109 /*
2110 * Reload the guest's "host state".
2111 */
2112 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2113 pCtx->es = pHostState->es;
2114 pCtx->cs = pHostState->cs;
2115 pCtx->ss = pHostState->ss;
2116 pCtx->ds = pHostState->ds;
2117 pCtx->gdtr = pHostState->gdtr;
2118 pCtx->idtr = pHostState->idtr;
2119 CPUMSetGuestEferMsrNoChecks(pVCpu, pCtx->msrEFER, pHostState->uEferMsr);
2120 CPUMSetGuestCR0(pVCpu, pHostState->uCr0 | X86_CR0_PE);
2121 pCtx->cr3 = pHostState->uCr3;
2122 CPUMSetGuestCR4(pVCpu, pHostState->uCr4);
2123 pCtx->rflags.u = pHostState->rflags.u;
2124 pCtx->rflags.Bits.u1VM = 0;
2125 pCtx->rip = pHostState->uRip;
2126 pCtx->rsp = pHostState->uRsp;
2127 pCtx->rax = pHostState->uRax;
2128 pCtx->dr[7] &= ~(X86_DR7_ENABLED_MASK | X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
2129 pCtx->dr[7] |= X86_DR7_RA1_MASK;
2130 Assert(pCtx->ss.Attr.n.u2Dpl == 0);
2131
2132 /** @todo if RIP is not canonical or outside the CS segment limit, we need to
2133 * raise \#GP(0) in the guest. */
2134
2135 /** @todo check the loaded host-state for consistency. Figure out what
2136 * exactly this involves? */
2137}
2138
2139
2140/**
2141 * Saves the host-state to the host-state save area as part of a VMRUN.
2142 *
2143 * @param pCtx The guest-CPU context.
2144 * @param cbInstr The length of the VMRUN instruction in bytes.
2145 */
2146VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr)
2147{
2148 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2149 pHostState->es = pCtx->es;
2150 pHostState->cs = pCtx->cs;
2151 pHostState->ss = pCtx->ss;
2152 pHostState->ds = pCtx->ds;
2153 pHostState->gdtr = pCtx->gdtr;
2154 pHostState->idtr = pCtx->idtr;
2155 pHostState->uEferMsr = pCtx->msrEFER;
2156 pHostState->uCr0 = pCtx->cr0;
2157 pHostState->uCr3 = pCtx->cr3;
2158 pHostState->uCr4 = pCtx->cr4;
2159 pHostState->rflags.u = pCtx->rflags.u;
2160 pHostState->uRip = pCtx->rip + cbInstr;
2161 pHostState->uRsp = pCtx->rsp;
2162 pHostState->uRax = pCtx->rax;
2163}
2164
2165
2166/**
2167 * Applies the TSC offset of a nested-guest if any and returns the TSC value for the
2168 * nested-guest.
2169 *
2170 * @returns The TSC offset after applying any nested-guest TSC offset.
2171 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2172 * @param uTscValue The guest TSC.
2173 *
2174 * @sa CPUMRemoveNestedGuestTscOffset.
2175 */
2176VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
2177{
2178 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2179 if (CPUMIsGuestInVmxNonRootMode(pCtx))
2180 {
2181 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
2182 return uTscValue + pCtx->hwvirt.vmx.Vmcs.u64TscOffset.u;
2183 return uTscValue;
2184 }
2185
2186 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2187 {
2188 uint64_t offTsc;
2189 if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
2190 offTsc = pCtx->hwvirt.svm.Vmcb.ctrl.u64TSCOffset;
2191 return uTscValue + offTsc;
2192 }
2193 return uTscValue;
2194}
2195
2196
2197/**
2198 * Removes the TSC offset of a nested-guest if any and returns the TSC value for the
2199 * guest.
2200 *
2201 * @returns The TSC offset after removing any nested-guest TSC offset.
2202 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2203 * @param uTscValue The nested-guest TSC.
2204 *
2205 * @sa CPUMApplyNestedGuestTscOffset.
2206 */
2207VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
2208{
2209 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2210 if (CPUMIsGuestInVmxNonRootMode(pCtx))
2211 {
2212 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
2213 return uTscValue - pCtx->hwvirt.vmx.Vmcs.u64TscOffset.u;
2214 return uTscValue;
2215 }
2216
2217 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2218 {
2219 uint64_t offTsc;
2220 if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
2221 offTsc = pCtx->hwvirt.svm.Vmcb.ctrl.u64TSCOffset;
2222 return uTscValue - offTsc;
2223 }
2224 return uTscValue;
2225}
2226
2227
2228/**
2229 * Used to dynamically imports state residing in NEM or HM.
2230 *
2231 * This is a worker for the CPUM_IMPORT_EXTRN_RET() macro and various IEM ones.
2232 *
2233 * @returns VBox status code.
2234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2235 * @param fExtrnImport The fields to import.
2236 * @thread EMT(pVCpu)
2237 */
2238VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPUCC pVCpu, uint64_t fExtrnImport)
2239{
2240 VMCPU_ASSERT_EMT(pVCpu);
2241 if (pVCpu->cpum.s.Guest.fExtrn & fExtrnImport)
2242 {
2243 switch (pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_KEEPER_MASK)
2244 {
2245 case CPUMCTX_EXTRN_KEEPER_NEM:
2246 {
2247 int rc = NEMImportStateOnDemand(pVCpu, fExtrnImport);
2248 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
2249 return rc;
2250 }
2251
2252 case CPUMCTX_EXTRN_KEEPER_HM:
2253 {
2254#ifdef IN_RING0
2255 int rc = HMR0ImportStateOnDemand(pVCpu, fExtrnImport);
2256 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
2257 return rc;
2258#else
2259 AssertLogRelMsgFailed(("TODO Fetch HM state: %#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport));
2260 return VINF_SUCCESS;
2261#endif
2262 }
2263 default:
2264 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
2265 }
2266 }
2267 return VINF_SUCCESS;
2268}
2269
2270
2271/**
2272 * Gets valid CR4 bits for the guest.
2273 *
2274 * @returns Valid CR4 bits.
2275 * @param pVM The cross context VM structure.
2276 */
2277VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM)
2278{
2279 PCCPUMFEATURES pGuestFeatures = &pVM->cpum.s.GuestFeatures;
2280 uint64_t fMask = X86_CR4_VME | X86_CR4_PVI
2281 | X86_CR4_TSD | X86_CR4_DE
2282 | X86_CR4_MCE | X86_CR4_PCE;
2283 if (pGuestFeatures->fPae)
2284 fMask |= X86_CR4_PAE;
2285 if (pGuestFeatures->fPge)
2286 fMask |= X86_CR4_PGE;
2287 if (pGuestFeatures->fPse)
2288 fMask |= X86_CR4_PSE;
2289 if (pGuestFeatures->fFxSaveRstor)
2290 fMask |= X86_CR4_OSFXSR;
2291 if (pGuestFeatures->fVmx)
2292 fMask |= X86_CR4_VMXE;
2293 if (pGuestFeatures->fXSaveRstor)
2294 fMask |= X86_CR4_OSXSAVE;
2295 if (pGuestFeatures->fPcid)
2296 fMask |= X86_CR4_PCIDE;
2297 if (pGuestFeatures->fFsGsBase)
2298 fMask |= X86_CR4_FSGSBASE;
2299 if (pGuestFeatures->fSse)
2300 fMask |= X86_CR4_OSXMMEEXCPT;
2301 return fMask;
2302}
2303
2304
2305/**
2306 * Sets the PAE PDPEs for the guest.
2307 *
2308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2309 * @param paPaePdpes The PAE PDPEs to set.
2310 */
2311VMM_INT_DECL(void) CPUMSetGuestPaePdpes(PVMCPU pVCpu, PCX86PDPE paPaePdpes)
2312{
2313 Assert(paPaePdpes);
2314 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.s.Guest.aPaePdpes); i++)
2315 pVCpu->cpum.s.Guest.aPaePdpes[i].u = paPaePdpes[i].u;
2316 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
2317}
2318
2319
2320/**
2321 * Gets the PAE PDPTEs for the guest.
2322 *
2323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2324 * @param paPaePdpes Where to store the PAE PDPEs.
2325 */
2326VMM_INT_DECL(void) CPUMGetGuestPaePdpes(PVMCPU pVCpu, PX86PDPE paPaePdpes)
2327{
2328 Assert(paPaePdpes);
2329 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
2330 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.s.Guest.aPaePdpes); i++)
2331 paPaePdpes[i].u = pVCpu->cpum.s.Guest.aPaePdpes[i].u;
2332}
2333
2334
2335/**
2336 * Starts a VMX-preemption timer to expire as specified by the nested hypervisor.
2337 *
2338 * @returns VBox status code.
2339 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2340 * @param uTimer The VMCS preemption timer value.
2341 * @param cShift The VMX-preemption timer shift (usually based on guest
2342 * VMX MSR rate).
2343 * @param pu64EntryTick Where to store the current tick when the timer is
2344 * programmed.
2345 * @thread EMT(pVCpu)
2346 */
2347VMM_INT_DECL(int) CPUMStartGuestVmxPremptTimer(PVMCPUCC pVCpu, uint32_t uTimer, uint8_t cShift, uint64_t *pu64EntryTick)
2348{
2349 Assert(uTimer);
2350 Assert(cShift <= 31);
2351 Assert(pu64EntryTick);
2352 VMCPU_ASSERT_EMT(pVCpu);
2353 uint64_t const cTicksToNext = uTimer << cShift;
2354 return TMTimerSetRelative(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.s.hNestedVmxPreemptTimer, cTicksToNext, pu64EntryTick);
2355}
2356
2357
2358/**
2359 * Stops the VMX-preemption timer from firing.
2360 *
2361 * @returns VBox status code.
2362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2363 * @thread EMT.
2364 *
2365 * @remarks This can be called during VM reset, so we cannot assume it will be on
2366 * the EMT corresponding to @c pVCpu.
2367 */
2368VMM_INT_DECL(int) CPUMStopGuestVmxPremptTimer(PVMCPUCC pVCpu)
2369{
2370 /*
2371 * CPUM gets initialized before TM, so we defer creation of timers till CPUMR3InitCompleted().
2372 * However, we still get called during CPUMR3Init() and hence we need to check if we have
2373 * a valid timer object before trying to stop it.
2374 */
2375 int rc;
2376 TMTIMERHANDLE hTimer = pVCpu->cpum.s.hNestedVmxPreemptTimer;
2377 if (hTimer != NIL_TMTIMERHANDLE)
2378 {
2379 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2380 rc = TMTimerLock(pVM, hTimer, VERR_IGNORED);
2381 if (rc == VINF_SUCCESS)
2382 {
2383 if (TMTimerIsActive(pVM, hTimer))
2384 TMTimerStop(pVM, hTimer);
2385 TMTimerUnlock(pVM, hTimer);
2386 }
2387 }
2388 else
2389 rc = VERR_NOT_FOUND;
2390 return rc;
2391}
2392
2393
2394/**
2395 * Gets the read and write permission bits for an MSR in an MSR bitmap.
2396 *
2397 * @returns VMXMSRPM_XXX - the MSR permission.
2398 * @param pvMsrBitmap Pointer to the MSR bitmap.
2399 * @param idMsr The MSR to get permissions for.
2400 *
2401 * @sa hmR0VmxSetMsrPermission.
2402 */
2403VMM_INT_DECL(uint32_t) CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr)
2404{
2405 AssertPtrReturn(pvMsrBitmap, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
2406
2407 uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap;
2408
2409 /*
2410 * MSR Layout:
2411 * Byte index MSR range Interpreted as
2412 * 0x000 - 0x3ff 0x00000000 - 0x00001fff Low MSR read bits.
2413 * 0x400 - 0x7ff 0xc0000000 - 0xc0001fff High MSR read bits.
2414 * 0x800 - 0xbff 0x00000000 - 0x00001fff Low MSR write bits.
2415 * 0xc00 - 0xfff 0xc0000000 - 0xc0001fff High MSR write bits.
2416 *
2417 * A bit corresponding to an MSR within the above range causes a VM-exit
2418 * if the bit is 1 on executions of RDMSR/WRMSR. If an MSR falls out of
2419 * the MSR range, it always cause a VM-exit.
2420 *
2421 * See Intel spec. 24.6.9 "MSR-Bitmap Address".
2422 */
2423 uint32_t const offBitmapRead = 0;
2424 uint32_t const offBitmapWrite = 0x800;
2425 uint32_t offMsr;
2426 uint32_t iBit;
2427 if (idMsr <= UINT32_C(0x00001fff))
2428 {
2429 offMsr = 0;
2430 iBit = idMsr;
2431 }
2432 else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
2433 {
2434 offMsr = 0x400;
2435 iBit = idMsr - UINT32_C(0xc0000000);
2436 }
2437 else
2438 {
2439 LogFunc(("Warning! Out of range MSR %#RX32\n", idMsr));
2440 return VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR;
2441 }
2442
2443 /*
2444 * Get the MSR read permissions.
2445 */
2446 uint32_t fRet;
2447 uint32_t const offMsrRead = offBitmapRead + offMsr;
2448 Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
2449 if (ASMBitTest(pbMsrBitmap, (offMsrRead << 3) + iBit))
2450 fRet = VMXMSRPM_EXIT_RD;
2451 else
2452 fRet = VMXMSRPM_ALLOW_RD;
2453
2454 /*
2455 * Get the MSR write permissions.
2456 */
2457 uint32_t const offMsrWrite = offBitmapWrite + offMsr;
2458 Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
2459 if (ASMBitTest(pbMsrBitmap, (offMsrWrite << 3) + iBit))
2460 fRet |= VMXMSRPM_EXIT_WR;
2461 else
2462 fRet |= VMXMSRPM_ALLOW_WR;
2463
2464 Assert(VMXMSRPM_IS_FLAG_VALID(fRet));
2465 return fRet;
2466}
2467
2468
2469/**
2470 * Checks the permission bits for the specified I/O port from the given I/O bitmap
2471 * to see if causes a VM-exit.
2472 *
2473 * @returns @c true if the I/O port access must cause a VM-exit, @c false otherwise.
2474 * @param pbIoBitmap Pointer to I/O bitmap.
2475 * @param uPort The I/O port being accessed.
2476 * @param cbAccess e size of the I/O access in bytes (1, 2 or 4 bytes).
2477 */
2478static bool cpumGetVmxIoBitmapPermission(uint8_t const *pbIoBitmap, uint16_t uPort, uint8_t cbAccess)
2479{
2480 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
2481
2482 /*
2483 * If the I/O port access wraps around the 16-bit port I/O space, we must cause a
2484 * VM-exit.
2485 *
2486 * Reading 1, 2, 4 bytes at ports 0xffff, 0xfffe and 0xfffc are valid and do not
2487 * constitute a wrap around. However, reading 2 bytes at port 0xffff or 4 bytes
2488 * from port 0xffff/0xfffe/0xfffd constitute a wrap around. In other words, any
2489 * access to -both- ports 0xffff and port 0 is a wrap around.
2490 *
2491 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2492 */
2493 uint32_t const uPortLast = uPort + cbAccess;
2494 if (uPortLast > 0x10000)
2495 return true;
2496
2497 /*
2498 * If any bit corresponding to the I/O access is set, we must cause a VM-exit.
2499 */
2500 uint16_t const offPerm = uPort >> 3; /* Byte offset of the port. */
2501 uint16_t const idxPermBit = uPort - (offPerm << 3); /* Bit offset within byte. */
2502 Assert(idxPermBit < 8);
2503 static const uint8_t s_afMask[] = { 0x0, 0x1, 0x3, 0x7, 0xf }; /* Bit-mask for all access sizes. */
2504 uint16_t const fMask = s_afMask[cbAccess] << idxPermBit; /* Bit-mask of the access. */
2505
2506 /* Fetch 8 or 16-bits depending on whether the access spans 8-bit boundary. */
2507 RTUINT16U uPerm;
2508 uPerm.s.Lo = pbIoBitmap[offPerm];
2509 if (idxPermBit + cbAccess > 8)
2510 uPerm.s.Hi = pbIoBitmap[offPerm + 1];
2511 else
2512 uPerm.s.Hi = 0;
2513
2514 /* If any bit for the access is 1, we must cause a VM-exit. */
2515 if (uPerm.u & fMask)
2516 return true;
2517
2518 return false;
2519}
2520
2521
2522/**
2523 * Returns whether the given VMCS field is valid and supported for the guest.
2524 *
2525 * @param pVM The cross context VM structure.
2526 * @param u64VmcsField The VMCS field.
2527 *
2528 * @remarks This takes into account the CPU features exposed to the guest.
2529 */
2530VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVMCC pVM, uint64_t u64VmcsField)
2531{
2532 uint32_t const uFieldEncHi = RT_HI_U32(u64VmcsField);
2533 uint32_t const uFieldEncLo = RT_LO_U32(u64VmcsField);
2534 if (!uFieldEncHi)
2535 { /* likely */ }
2536 else
2537 return false;
2538
2539 PCCPUMFEATURES pFeat = &pVM->cpum.s.GuestFeatures;
2540 switch (uFieldEncLo)
2541 {
2542 /*
2543 * 16-bit fields.
2544 */
2545 /* Control fields. */
2546 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
2547 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
2548 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
2549
2550 /* Guest-state fields. */
2551 case VMX_VMCS16_GUEST_ES_SEL:
2552 case VMX_VMCS16_GUEST_CS_SEL:
2553 case VMX_VMCS16_GUEST_SS_SEL:
2554 case VMX_VMCS16_GUEST_DS_SEL:
2555 case VMX_VMCS16_GUEST_FS_SEL:
2556 case VMX_VMCS16_GUEST_GS_SEL:
2557 case VMX_VMCS16_GUEST_LDTR_SEL:
2558 case VMX_VMCS16_GUEST_TR_SEL: return true;
2559 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
2560 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
2561
2562 /* Host-state fields. */
2563 case VMX_VMCS16_HOST_ES_SEL:
2564 case VMX_VMCS16_HOST_CS_SEL:
2565 case VMX_VMCS16_HOST_SS_SEL:
2566 case VMX_VMCS16_HOST_DS_SEL:
2567 case VMX_VMCS16_HOST_FS_SEL:
2568 case VMX_VMCS16_HOST_GS_SEL:
2569 case VMX_VMCS16_HOST_TR_SEL: return true;
2570
2571 /*
2572 * 64-bit fields.
2573 */
2574 /* Control fields. */
2575 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
2576 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
2577 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
2578 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
2579 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
2580 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
2581 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
2582 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
2583 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
2584 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
2585 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
2586 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
2587 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
2588 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
2589 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
2590 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
2591 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
2592 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
2593 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
2594 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
2595 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
2596 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
2597 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
2598 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
2599 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
2600 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
2601 case VMX_VMCS64_CTRL_EPTP_FULL:
2602 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
2603 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
2604 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
2605 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
2606 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
2607 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
2608 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
2609 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
2610 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
2611 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
2612 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
2613 {
2614 PCVMCPU pVCpu = pVM->CTX_SUFF(apCpus)[0];
2615 uint64_t const uVmFuncMsr = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64VmFunc;
2616 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
2617 }
2618 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
2619 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
2620 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
2621 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
2622 case VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL:
2623 case VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
2624 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
2625 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
2626 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
2627 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
2628 case VMX_VMCS64_CTRL_PROC_EXEC3_FULL:
2629 case VMX_VMCS64_CTRL_PROC_EXEC3_HIGH: return pFeat->fVmxTertiaryExecCtls;
2630
2631 /* Read-only data fields. */
2632 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
2633 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
2634
2635 /* Guest-state fields. */
2636 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
2637 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
2638 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
2639 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
2640 case VMX_VMCS64_GUEST_PAT_FULL:
2641 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
2642 case VMX_VMCS64_GUEST_EFER_FULL:
2643 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
2644 case VMX_VMCS64_GUEST_PDPTE0_FULL:
2645 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
2646 case VMX_VMCS64_GUEST_PDPTE1_FULL:
2647 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
2648 case VMX_VMCS64_GUEST_PDPTE2_FULL:
2649 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
2650 case VMX_VMCS64_GUEST_PDPTE3_FULL:
2651 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
2652
2653 /* Host-state fields. */
2654 case VMX_VMCS64_HOST_PAT_FULL:
2655 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
2656 case VMX_VMCS64_HOST_EFER_FULL:
2657 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
2658
2659 /*
2660 * 32-bit fields.
2661 */
2662 /* Control fields. */
2663 case VMX_VMCS32_CTRL_PIN_EXEC:
2664 case VMX_VMCS32_CTRL_PROC_EXEC:
2665 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
2666 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
2667 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
2668 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
2669 case VMX_VMCS32_CTRL_EXIT:
2670 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
2671 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
2672 case VMX_VMCS32_CTRL_ENTRY:
2673 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
2674 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
2675 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
2676 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
2677 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
2678 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
2679 case VMX_VMCS32_CTRL_PLE_GAP:
2680 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
2681
2682 /* Read-only data fields. */
2683 case VMX_VMCS32_RO_VM_INSTR_ERROR:
2684 case VMX_VMCS32_RO_EXIT_REASON:
2685 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
2686 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
2687 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
2688 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
2689 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
2690 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
2691
2692 /* Guest-state fields. */
2693 case VMX_VMCS32_GUEST_ES_LIMIT:
2694 case VMX_VMCS32_GUEST_CS_LIMIT:
2695 case VMX_VMCS32_GUEST_SS_LIMIT:
2696 case VMX_VMCS32_GUEST_DS_LIMIT:
2697 case VMX_VMCS32_GUEST_FS_LIMIT:
2698 case VMX_VMCS32_GUEST_GS_LIMIT:
2699 case VMX_VMCS32_GUEST_LDTR_LIMIT:
2700 case VMX_VMCS32_GUEST_TR_LIMIT:
2701 case VMX_VMCS32_GUEST_GDTR_LIMIT:
2702 case VMX_VMCS32_GUEST_IDTR_LIMIT:
2703 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
2704 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
2705 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
2706 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
2707 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
2708 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
2709 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
2710 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
2711 case VMX_VMCS32_GUEST_INT_STATE:
2712 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
2713 case VMX_VMCS32_GUEST_SMBASE:
2714 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
2715 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
2716
2717 /* Host-state fields. */
2718 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
2719
2720 /*
2721 * Natural-width fields.
2722 */
2723 /* Control fields. */
2724 case VMX_VMCS_CTRL_CR0_MASK:
2725 case VMX_VMCS_CTRL_CR4_MASK:
2726 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
2727 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
2728 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
2729 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
2730 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
2731 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
2732
2733 /* Read-only data fields. */
2734 case VMX_VMCS_RO_EXIT_QUALIFICATION:
2735 case VMX_VMCS_RO_IO_RCX:
2736 case VMX_VMCS_RO_IO_RSI:
2737 case VMX_VMCS_RO_IO_RDI:
2738 case VMX_VMCS_RO_IO_RIP:
2739 case VMX_VMCS_RO_GUEST_LINEAR_ADDR: return true;
2740
2741 /* Guest-state fields. */
2742 case VMX_VMCS_GUEST_CR0:
2743 case VMX_VMCS_GUEST_CR3:
2744 case VMX_VMCS_GUEST_CR4:
2745 case VMX_VMCS_GUEST_ES_BASE:
2746 case VMX_VMCS_GUEST_CS_BASE:
2747 case VMX_VMCS_GUEST_SS_BASE:
2748 case VMX_VMCS_GUEST_DS_BASE:
2749 case VMX_VMCS_GUEST_FS_BASE:
2750 case VMX_VMCS_GUEST_GS_BASE:
2751 case VMX_VMCS_GUEST_LDTR_BASE:
2752 case VMX_VMCS_GUEST_TR_BASE:
2753 case VMX_VMCS_GUEST_GDTR_BASE:
2754 case VMX_VMCS_GUEST_IDTR_BASE:
2755 case VMX_VMCS_GUEST_DR7:
2756 case VMX_VMCS_GUEST_RSP:
2757 case VMX_VMCS_GUEST_RIP:
2758 case VMX_VMCS_GUEST_RFLAGS:
2759 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
2760 case VMX_VMCS_GUEST_SYSENTER_ESP:
2761 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
2762
2763 /* Host-state fields. */
2764 case VMX_VMCS_HOST_CR0:
2765 case VMX_VMCS_HOST_CR3:
2766 case VMX_VMCS_HOST_CR4:
2767 case VMX_VMCS_HOST_FS_BASE:
2768 case VMX_VMCS_HOST_GS_BASE:
2769 case VMX_VMCS_HOST_TR_BASE:
2770 case VMX_VMCS_HOST_GDTR_BASE:
2771 case VMX_VMCS_HOST_IDTR_BASE:
2772 case VMX_VMCS_HOST_SYSENTER_ESP:
2773 case VMX_VMCS_HOST_SYSENTER_EIP:
2774 case VMX_VMCS_HOST_RSP:
2775 case VMX_VMCS_HOST_RIP: return true;
2776 }
2777
2778 return false;
2779}
2780
2781
2782/**
2783 * Checks whether the given I/O access should cause a nested-guest VM-exit.
2784 *
2785 * @returns @c true if it causes a VM-exit, @c false otherwise.
2786 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2787 * @param u16Port The I/O port being accessed.
2788 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
2789 */
2790VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess)
2791{
2792 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2793 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_UNCOND_IO_EXIT))
2794 return true;
2795
2796 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_IO_BITMAPS))
2797 return cpumGetVmxIoBitmapPermission(pCtx->hwvirt.vmx.abIoBitmap, u16Port, cbAccess);
2798
2799 return false;
2800}
2801
2802
2803/**
2804 * Checks whether the Mov-to-CR3 instruction causes a nested-guest VM-exit.
2805 *
2806 * @returns @c true if it causes a VM-exit, @c false otherwise.
2807 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2808 * @param uNewCr3 The CR3 value being written.
2809 */
2810VMM_INT_DECL(bool) CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3)
2811{
2812 /*
2813 * If the CR3-load exiting control is set and the new CR3 value does not
2814 * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
2815 *
2816 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2817 */
2818 PCCPUMCTX const pCtx = &pVCpu->cpum.s.Guest;
2819 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_CR3_LOAD_EXIT))
2820 {
2821 uint32_t const uCr3TargetCount = pCtx->hwvirt.vmx.Vmcs.u32Cr3TargetCount;
2822 Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
2823
2824 /* If the CR3-target count is 0, cause a VM-exit. */
2825 if (uCr3TargetCount == 0)
2826 return true;
2827
2828 /* If the CR3 being written doesn't match any of the target values, cause a VM-exit. */
2829 AssertCompile(VMX_V_CR3_TARGET_COUNT == 4);
2830 if ( uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target0.u
2831 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target1.u
2832 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target2.u
2833 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target3.u)
2834 return true;
2835 }
2836 return false;
2837}
2838
2839
2840/**
2841 * Checks whether a VMREAD or VMWRITE instruction for the given VMCS field causes a
2842 * VM-exit or not.
2843 *
2844 * @returns @c true if the VMREAD/VMWRITE is intercepted, @c false otherwise.
2845 * @param pVCpu The cross context virtual CPU structure.
2846 * @param uExitReason The VM-exit reason (VMX_EXIT_VMREAD or
2847 * VMX_EXIT_VMREAD).
2848 * @param u64VmcsField The VMCS field.
2849 */
2850VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64VmcsField)
2851{
2852 Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest));
2853 Assert( uExitReason == VMX_EXIT_VMREAD
2854 || uExitReason == VMX_EXIT_VMWRITE);
2855
2856 /*
2857 * Without VMCS shadowing, all VMREAD and VMWRITE instructions are intercepted.
2858 */
2859 if (!CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.s.Guest, VMX_PROC_CTLS2_VMCS_SHADOWING))
2860 return true;
2861
2862 /*
2863 * If any reserved bit in the 64-bit VMCS field encoding is set, the VMREAD/VMWRITE
2864 * is intercepted. This excludes any reserved bits in the valid parts of the field
2865 * encoding (i.e. bit 12).
2866 */
2867 if (u64VmcsField & VMX_VMCSFIELD_RSVD_MASK)
2868 return true;
2869
2870 /*
2871 * Finally, consult the VMREAD/VMWRITE bitmap whether to intercept the instruction or not.
2872 */
2873 uint32_t const u32VmcsField = RT_LO_U32(u64VmcsField);
2874 uint8_t const * const pbBitmap = uExitReason == VMX_EXIT_VMREAD
2875 ? &pVCpu->cpum.s.Guest.hwvirt.vmx.abVmreadBitmap[0]
2876 : &pVCpu->cpum.s.Guest.hwvirt.vmx.abVmwriteBitmap[0];
2877 Assert(pbBitmap);
2878 Assert(u32VmcsField >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
2879 return ASMBitTest(pbBitmap, (u32VmcsField << 3) + (u32VmcsField & 7));
2880}
2881
2882
2883
2884/**
2885 * Determines whether the given I/O access should cause a nested-guest \#VMEXIT.
2886 *
2887 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
2888 * @param u16Port The IO port being accessed.
2889 * @param enmIoType The type of IO access.
2890 * @param cbReg The IO operand size in bytes.
2891 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
2892 * @param iEffSeg The effective segment number.
2893 * @param fRep Whether this is a repeating IO instruction (REP prefix).
2894 * @param fStrIo Whether this is a string IO instruction.
2895 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO struct to be filled.
2896 * Optional, can be NULL.
2897 */
2898VMM_INT_DECL(bool) CPUMIsSvmIoInterceptSet(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
2899 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
2900 PSVMIOIOEXITINFO pIoExitInfo)
2901{
2902 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
2903 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
2904
2905 /*
2906 * The IOPM layout:
2907 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
2908 * two 4K pages.
2909 *
2910 * For IO instructions that access more than a single byte, the permission bits
2911 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
2912 *
2913 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),
2914 * we need 3 extra bits beyond the second 4K page.
2915 */
2916 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 };
2917
2918 uint16_t const offIopm = u16Port >> 3;
2919 uint16_t const fSizeMask = s_auSizeMasks[(cAddrSizeBits >> SVM_IOIO_OP_SIZE_SHIFT) & 7];
2920 uint8_t const cShift = u16Port - (offIopm << 3);
2921 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);
2922
2923 uint8_t const *pbIopm = (uint8_t *)pvIoBitmap;
2924 Assert(pbIopm);
2925 pbIopm += offIopm;
2926 uint16_t const u16Iopm = *(uint16_t *)pbIopm;
2927 if (u16Iopm & fIopmMask)
2928 {
2929 if (pIoExitInfo)
2930 {
2931 static const uint32_t s_auIoOpSize[] =
2932 { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
2933
2934 static const uint32_t s_auIoAddrSize[] =
2935 { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
2936
2937 pIoExitInfo->u = s_auIoOpSize[cbReg & 7];
2938 pIoExitInfo->u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
2939 pIoExitInfo->n.u1Str = fStrIo;
2940 pIoExitInfo->n.u1Rep = fRep;
2941 pIoExitInfo->n.u3Seg = iEffSeg & 7;
2942 pIoExitInfo->n.u1Type = enmIoType;
2943 pIoExitInfo->n.u16Port = u16Port;
2944 }
2945 return true;
2946 }
2947
2948 /** @todo remove later (for debugging as VirtualBox always traps all IO
2949 * intercepts). */
2950 AssertMsgFailed(("CPUMSvmIsIOInterceptActive: We expect an IO intercept here!\n"));
2951 return false;
2952}
2953
2954
2955/**
2956 * Gets the MSR permission bitmap byte and bit offset for the specified MSR.
2957 *
2958 * @returns VBox status code.
2959 * @param idMsr The MSR being requested.
2960 * @param pbOffMsrpm Where to store the byte offset in the MSR permission
2961 * bitmap for @a idMsr.
2962 * @param puMsrpmBit Where to store the bit offset starting at the byte
2963 * returned in @a pbOffMsrpm.
2964 */
2965VMM_INT_DECL(int) CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit)
2966{
2967 Assert(pbOffMsrpm);
2968 Assert(puMsrpmBit);
2969
2970 /*
2971 * MSRPM Layout:
2972 * Byte offset MSR range
2973 * 0x000 - 0x7ff 0x00000000 - 0x00001fff
2974 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
2975 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
2976 * 0x1800 - 0x1fff Reserved
2977 *
2978 * Each MSR is represented by 2 permission bits (read and write).
2979 */
2980 if (idMsr <= 0x00001fff)
2981 {
2982 /* Pentium-compatible MSRs. */
2983 uint32_t const bitoffMsr = idMsr << 1;
2984 *pbOffMsrpm = bitoffMsr >> 3;
2985 *puMsrpmBit = bitoffMsr & 7;
2986 return VINF_SUCCESS;
2987 }
2988
2989 if ( idMsr >= 0xc0000000
2990 && idMsr <= 0xc0001fff)
2991 {
2992 /* AMD Sixth Generation x86 Processor MSRs. */
2993 uint32_t const bitoffMsr = (idMsr - 0xc0000000) << 1;
2994 *pbOffMsrpm = 0x800 + (bitoffMsr >> 3);
2995 *puMsrpmBit = bitoffMsr & 7;
2996 return VINF_SUCCESS;
2997 }
2998
2999 if ( idMsr >= 0xc0010000
3000 && idMsr <= 0xc0011fff)
3001 {
3002 /* AMD Seventh and Eighth Generation Processor MSRs. */
3003 uint32_t const bitoffMsr = (idMsr - 0xc0010000) << 1;
3004 *pbOffMsrpm = 0x1000 + (bitoffMsr >> 3);
3005 *puMsrpmBit = bitoffMsr & 7;
3006 return VINF_SUCCESS;
3007 }
3008
3009 *pbOffMsrpm = 0;
3010 *puMsrpmBit = 0;
3011 return VERR_OUT_OF_RANGE;
3012}
3013
3014
3015/**
3016 * Checks whether the guest is in VMX non-root mode and using EPT paging.
3017 *
3018 * @returns @c true if in VMX non-root operation with EPT, @c false otherwise.
3019 * @param pVCpu The cross context virtual CPU structure.
3020 */
3021VMM_INT_DECL(bool) CPUMIsGuestVmxEptPagingEnabled(PCVMCPUCC pVCpu)
3022{
3023 return CPUMIsGuestVmxEptPagingEnabledEx(&pVCpu->cpum.s.Guest);
3024}
3025
3026
3027/**
3028 * Checks whether the guest is in VMX non-root mode and using EPT paging and the
3029 * nested-guest is in PAE mode.
3030 *
3031 * @returns @c true if in VMX non-root operation with EPT, @c false otherwise.
3032 * @param pVCpu The cross context virtual CPU structure.
3033 */
3034VMM_INT_DECL(bool) CPUMIsGuestVmxEptPaePagingEnabled(PCVMCPUCC pVCpu)
3035{
3036 return CPUMIsGuestVmxEptPagingEnabledEx(&pVCpu->cpum.s.Guest)
3037 && CPUMIsGuestInPAEModeEx(&pVCpu->cpum.s.Guest);
3038}
3039
3040
3041/**
3042 * Returns the guest-physical address of the APIC-access page when executing a
3043 * nested-guest.
3044 *
3045 * @returns The APIC-access page guest-physical address.
3046 * @param pVCpu The cross context virtual CPU structure.
3047 */
3048VMM_INT_DECL(uint64_t) CPUMGetGuestVmxApicAccessPageAddr(PCVMCPUCC pVCpu)
3049{
3050 return CPUMGetGuestVmxApicAccessPageAddrEx(&pVCpu->cpum.s.Guest);
3051}
3052
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette