VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 96860

Last change on this file since 96860 was 96407, checked in by vboxsync, 22 months ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 102.2 KB
RevLine 
[23]1/* $Id: CPUMAllRegs.cpp 96407 2022-08-22 17:43:14Z vboxsync $ */
[1]2/** @file
[12657]3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
[1]4 */
5
6/*
[96407]7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
[1]8 *
[96407]9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
[1]26 */
27
28
[57358]29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
[1]32#define LOG_GROUP LOG_GROUP_CPUM
[35346]33#include <VBox/vmm/cpum.h>
34#include <VBox/vmm/dbgf.h>
[64655]35#include <VBox/vmm/apic.h>
[35346]36#include <VBox/vmm/pgm.h>
37#include <VBox/vmm/mm.h>
[45276]38#include <VBox/vmm/em.h>
[80064]39#include <VBox/vmm/nem.h>
40#include <VBox/vmm/hm.h>
[1]41#include "CPUMInternal.h"
[80253]42#include <VBox/vmm/vmcc.h>
[1]43#include <VBox/err.h>
44#include <VBox/dis.h>
45#include <VBox/log.h>
[43387]46#include <VBox/vmm/hm.h>
[35346]47#include <VBox/vmm/tm.h>
[1]48#include <iprt/assert.h>
[772]49#include <iprt/asm.h>
[93725]50#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
51# include <iprt/asm-amd64-x86.h>
52#endif
[13960]53#ifdef IN_RING3
[72488]54# include <iprt/thread.h>
[13960]55#endif
[1]56
57/** Disable stack frame pointer generation here. */
[62440]58#if defined(_MSC_VER) && !defined(DEBUG) && defined(RT_ARCH_X86)
[1]59# pragma optimize("y", off)
60#endif
61
[55229]62AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
[1]63
[55229]64
[57358]65/*********************************************************************************************************************************
66* Defined Constants And Macros *
67*********************************************************************************************************************************/
[1]68/**
[42165]69 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
70 *
71 * @returns Pointer to the Virtual CPU.
72 * @param a_pGuestCtx Pointer to the guest context.
73 */
74#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
75
76/**
77 * Lazily loads the hidden parts of a selector register when using raw-mode.
78 */
[80050]79#define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
80 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg))
[42165]81
[72676]82/** @def CPUM_INT_ASSERT_NOT_EXTRN
83 * Macro for asserting that @a a_fNotExtrn are present.
84 *
85 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
86 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
87 */
88#define CPUM_INT_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
89 AssertMsg(!((a_pVCpu)->cpum.s.Guest.fExtrn & (a_fNotExtrn)), \
90 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.s.Guest.fExtrn, (a_fNotExtrn)))
[42165]91
92
[18927]93VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
[1]94{
[18927]95 pVCpu->cpum.s.Hyper.cr3 = cr3;
[1]96}
97
[18927]98VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
[16859]99{
[18927]100 return pVCpu->cpum.s.Hyper.cr3;
[16859]101}
[12657]102
[16859]103
[58116]104/** @def MAYBE_LOAD_DRx
[47660]105 * Macro for updating DRx values in raw-mode and ring-0 contexts.
106 */
107#ifdef IN_RING0
[80053]108# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { a_fnLoad(a_uValue); } while (0)
[47660]109#else
110# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
111#endif
112
[18927]113VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
[1]114{
[18927]115 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
[47660]116 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
[1]117}
118
[12657]119
[18927]120VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
[1]121{
[18927]122 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
[47660]123 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
[1]124}
125
[12657]126
[18927]127VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
[1]128{
[18927]129 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
[47660]130 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
[1]131}
132
[12657]133
[18927]134VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
[1]135{
[18927]136 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
[47660]137 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
[1]138}
139
[12657]140
[18927]141VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
[1]142{
[18927]143 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
[1]144}
145
[12657]146
[18927]147VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
[1]148{
[18927]149 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
[1]150}
151
152
[18927]153VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
[1]154{
[18927]155 return pVCpu->cpum.s.Hyper.dr[0];
[1]156}
157
[12657]158
[18927]159VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
[1]160{
[18927]161 return pVCpu->cpum.s.Hyper.dr[1];
[1]162}
163
[12657]164
[18927]165VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
[1]166{
[18927]167 return pVCpu->cpum.s.Hyper.dr[2];
[1]168}
169
[12657]170
[18927]171VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
[1]172{
[18927]173 return pVCpu->cpum.s.Hyper.dr[3];
[1]174}
175
[12657]176
[18927]177VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
[1]178{
[18927]179 return pVCpu->cpum.s.Hyper.dr[6];
[1]180}
181
[12657]182
[18927]183VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
[1]184{
[18927]185 return pVCpu->cpum.s.Hyper.dr[7];
[1]186}
187
188
189/**
190 * Gets the pointer to the internal CPUMCTXCORE structure.
191 * This is only for reading in order to save a few calls.
192 *
[58123]193 * @param pVCpu The cross context virtual CPU structure.
[1]194 */
[18927]195VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
[1]196{
[13975]197 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
198}
[1]199
[13975]200
[1]201/**
[42034]202 * Queries the pointer to the internal CPUMCTX structure.
[1]203 *
[13532]204 * @returns The CPUMCTX pointer.
[58123]205 * @param pVCpu The cross context virtual CPU structure.
[1]206 */
[18927]207VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
[1]208{
[13960]209 return &pVCpu->cpum.s.Guest;
210}
211
[72358]212
213/**
214 * Queries the pointer to the internal CPUMCTXMSRS structure.
215 *
216 * This is for NEM only.
217 *
218 * @returns The CPUMCTX pointer.
219 * @param pVCpu The cross context virtual CPU structure.
220 */
221VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu)
222{
223 return &pVCpu->cpum.s.GuestMsrs;
224}
225
226
[36762]227VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
[1]228{
[36762]229 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
230 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
[72484]231 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_GDTR;
[18927]232 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
[42452]233 return VINF_SUCCESS; /* formality, consider it void. */
[1]234}
235
[72358]236
[36762]237VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
[1]238{
[36762]239 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
240 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
[72484]241 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_IDTR;
[18927]242 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
[42452]243 return VINF_SUCCESS; /* formality, consider it void. */
[1]244}
245
[72358]246
[18927]247VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
[1]248{
[41906]249 pVCpu->cpum.s.Guest.tr.Sel = tr;
[18927]250 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
[42452]251 return VINF_SUCCESS; /* formality, consider it void. */
[1]252}
253
[72358]254
[18927]255VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
[1]256{
[42407]257 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
258 /* The caller will set more hidden bits if it has them. */
259 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
260 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
[18927]261 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
[42452]262 return VINF_SUCCESS; /* formality, consider it void. */
[1]263}
264
265
[5389]266/**
[5695]267 * Set the guest CR0.
268 *
269 * When called in GC, the hyper CR0 may be updated if that is
270 * required. The caller only has to take special action if AM,
271 * WP, PG or PE changes.
272 *
[5389]273 * @returns VINF_SUCCESS (consider it void).
[58123]274 * @param pVCpu The cross context virtual CPU structure.
[5389]275 * @param cr0 The new CR0 value.
276 */
[80253]277VMMDECL(int) CPUMSetGuestCR0(PVMCPUCC pVCpu, uint64_t cr0)
[1]278{
[5389]279 /*
[5695]280 * Check for changes causing TLB flushes (for REM).
281 * The caller is responsible for calling PGM when appropriate.
[5389]282 */
[31079]283 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
[18927]284 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
285 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
286 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
[5389]287
[45798]288 /*
289 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
290 */
291 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
292 PGMCr0WpEnabled(pVCpu);
293
[60664]294 /* The ET flag is settable on a 386 and hardwired on 486+. */
295 if ( !(cr0 & X86_CR0_ET)
296 && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
297 cr0 |= X86_CR0_ET;
298
299 pVCpu->cpum.s.Guest.cr0 = cr0;
[72676]300 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR0;
[1]301 return VINF_SUCCESS;
302}
303
[12657]304
[18927]305VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
[1]306{
[18927]307 pVCpu->cpum.s.Guest.cr2 = cr2;
[72676]308 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR2;
[1]309 return VINF_SUCCESS;
310}
311
[12657]312
[18927]313VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
[1]314{
[18927]315 pVCpu->cpum.s.Guest.cr3 = cr3;
316 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
[72676]317 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
[1]318 return VINF_SUCCESS;
319}
320
[12657]321
[18927]322VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
[1]323{
[61776]324 /* Note! We don't bother with OSXSAVE and legacy CPUID patches. */
[55229]325
326 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
327 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
[18927]328 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
[55229]329
[18927]330 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
331 pVCpu->cpum.s.Guest.cr4 = cr4;
[72676]332 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR4;
[1]333 return VINF_SUCCESS;
334}
335
[12657]336
[18927]337VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
[1]338{
[18927]339 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
[72676]340 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
[1]341 return VINF_SUCCESS;
342}
343
[12657]344
[18927]345VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
[1]346{
[18927]347 pVCpu->cpum.s.Guest.eip = eip;
[1]348 return VINF_SUCCESS;
349}
350
[12657]351
[18927]352VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
[1]353{
[18927]354 pVCpu->cpum.s.Guest.eax = eax;
[1]355 return VINF_SUCCESS;
356}
357
[12657]358
[18927]359VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
[1]360{
[18927]361 pVCpu->cpum.s.Guest.ebx = ebx;
[1]362 return VINF_SUCCESS;
363}
364
[12657]365
[18927]366VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
[1]367{
[18927]368 pVCpu->cpum.s.Guest.ecx = ecx;
[1]369 return VINF_SUCCESS;
370}
371
[12657]372
[18927]373VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
[1]374{
[18927]375 pVCpu->cpum.s.Guest.edx = edx;
[1]376 return VINF_SUCCESS;
377}
378
[12657]379
[18927]380VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
[1]381{
[18927]382 pVCpu->cpum.s.Guest.esp = esp;
[1]383 return VINF_SUCCESS;
384}
385
[12657]386
[18927]387VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
[1]388{
[18927]389 pVCpu->cpum.s.Guest.ebp = ebp;
[1]390 return VINF_SUCCESS;
391}
392
[12657]393
[18927]394VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
[1]395{
[18927]396 pVCpu->cpum.s.Guest.esi = esi;
[1]397 return VINF_SUCCESS;
398}
399
[12657]400
[18927]401VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
[1]402{
[18927]403 pVCpu->cpum.s.Guest.edi = edi;
[1]404 return VINF_SUCCESS;
405}
406
[12657]407
[18927]408VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
[1]409{
[41906]410 pVCpu->cpum.s.Guest.ss.Sel = ss;
[1]411 return VINF_SUCCESS;
412}
413
[12657]414
[18927]415VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
[1]416{
[41906]417 pVCpu->cpum.s.Guest.cs.Sel = cs;
[1]418 return VINF_SUCCESS;
419}
420
[12657]421
[18927]422VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
[1]423{
[41906]424 pVCpu->cpum.s.Guest.ds.Sel = ds;
[1]425 return VINF_SUCCESS;
426}
427
[12657]428
[18927]429VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
[1]430{
[41906]431 pVCpu->cpum.s.Guest.es.Sel = es;
[1]432 return VINF_SUCCESS;
433}
434
[12657]435
[18927]436VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
[1]437{
[41906]438 pVCpu->cpum.s.Guest.fs.Sel = fs;
[1]439 return VINF_SUCCESS;
440}
441
[12657]442
[18927]443VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
[1]444{
[41906]445 pVCpu->cpum.s.Guest.gs.Sel = gs;
[1]446 return VINF_SUCCESS;
447}
448
[12657]449
[18927]450VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
[7730]451{
[18927]452 pVCpu->cpum.s.Guest.msrEFER = val;
[72676]453 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_EFER;
[7730]454}
[1]455
[12657]456
[79164]457VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PCVMCPU pVCpu, uint16_t *pcbLimit)
[1]458{
[72676]459 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_IDTR);
[1]460 if (pcbLimit)
[18927]461 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
462 return pVCpu->cpum.s.Guest.idtr.pIdt;
[1]463}
464
[12657]465
[79164]466VMMDECL(RTSEL) CPUMGetGuestTR(PCVMCPU pVCpu, PCPUMSELREGHID pHidden)
[1]467{
[72676]468 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_TR);
[17035]469 if (pHidden)
[41906]470 *pHidden = pVCpu->cpum.s.Guest.tr;
471 return pVCpu->cpum.s.Guest.tr.Sel;
[1]472}
473
[12657]474
[79164]475VMMDECL(RTSEL) CPUMGetGuestCS(PCVMCPU pVCpu)
[1]476{
[72676]477 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS);
[41906]478 return pVCpu->cpum.s.Guest.cs.Sel;
[1]479}
480
[12657]481
[79164]482VMMDECL(RTSEL) CPUMGetGuestDS(PCVMCPU pVCpu)
[1]483{
[72676]484 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DS);
[41906]485 return pVCpu->cpum.s.Guest.ds.Sel;
[1]486}
487
[12657]488
[79164]489VMMDECL(RTSEL) CPUMGetGuestES(PCVMCPU pVCpu)
[1]490{
[72676]491 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_ES);
[41906]492 return pVCpu->cpum.s.Guest.es.Sel;
[1]493}
494
[12657]495
[79164]496VMMDECL(RTSEL) CPUMGetGuestFS(PCVMCPU pVCpu)
[1]497{
[72676]498 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_FS);
[41906]499 return pVCpu->cpum.s.Guest.fs.Sel;
[1]500}
501
[12657]502
[79164]503VMMDECL(RTSEL) CPUMGetGuestGS(PCVMCPU pVCpu)
[1]504{
[72676]505 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GS);
[41906]506 return pVCpu->cpum.s.Guest.gs.Sel;
[1]507}
508
[12657]509
[79164]510VMMDECL(RTSEL) CPUMGetGuestSS(PCVMCPU pVCpu)
[1]511{
[72676]512 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SS);
[41906]513 return pVCpu->cpum.s.Guest.ss.Sel;
[1]514}
515
[12657]516
[64720]517VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu)
518{
[72676]519 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
[64720]520 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
521 if ( !CPUMIsGuestInLongMode(pVCpu)
[72676]522 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
[64720]523 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.cs.u64Base;
524 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.cs.u64Base;
525}
526
527
528VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu)
529{
[72676]530 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
[64720]531 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
532 if ( !CPUMIsGuestInLongMode(pVCpu)
[72676]533 || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
[64720]534 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.ss.u64Base;
535 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.ss.u64Base;
536}
537
538
[79164]539VMMDECL(RTSEL) CPUMGetGuestLDTR(PCVMCPU pVCpu)
[1]540{
[72676]541 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
[41906]542 return pVCpu->cpum.s.Guest.ldtr.Sel;
[1]543}
544
[12657]545
[79164]546VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PCVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
[42427]547{
[72676]548 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
[42427]549 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
550 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
551 return pVCpu->cpum.s.Guest.ldtr.Sel;
552}
553
554
[79164]555VMMDECL(uint64_t) CPUMGetGuestCR0(PCVMCPU pVCpu)
[1]556{
[72676]557 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
[18927]558 return pVCpu->cpum.s.Guest.cr0;
[1]559}
560
[12657]561
[79164]562VMMDECL(uint64_t) CPUMGetGuestCR2(PCVMCPU pVCpu)
[1]563{
[72676]564 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
[18927]565 return pVCpu->cpum.s.Guest.cr2;
[1]566}
567
[12657]568
[79164]569VMMDECL(uint64_t) CPUMGetGuestCR3(PCVMCPU pVCpu)
[1]570{
[72676]571 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
[18927]572 return pVCpu->cpum.s.Guest.cr3;
[1]573}
574
[12657]575
[79164]576VMMDECL(uint64_t) CPUMGetGuestCR4(PCVMCPU pVCpu)
[1]577{
[72676]578 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
[18927]579 return pVCpu->cpum.s.Guest.cr4;
[1]580}
581
[12657]582
[80253]583VMMDECL(uint64_t) CPUMGetGuestCR8(PCVMCPUCC pVCpu)
[31489]584{
585 uint64_t u64;
[41728]586 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
[31489]587 if (RT_FAILURE(rc))
588 u64 = 0;
589 return u64;
590}
591
592
[79164]593VMMDECL(void) CPUMGetGuestGDTR(PCVMCPU pVCpu, PVBOXGDTR pGDTR)
[1]594{
[72676]595 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GDTR);
[18927]596 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
[1]597}
598
[12657]599
[79164]600VMMDECL(uint32_t) CPUMGetGuestEIP(PCVMCPU pVCpu)
[1]601{
[72676]602 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
[18927]603 return pVCpu->cpum.s.Guest.eip;
[1]604}
605
[12657]606
[79164]607VMMDECL(uint64_t) CPUMGetGuestRIP(PCVMCPU pVCpu)
[9841]608{
[72676]609 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
[18927]610 return pVCpu->cpum.s.Guest.rip;
[9841]611}
612
[12657]613
[79164]614VMMDECL(uint32_t) CPUMGetGuestEAX(PCVMCPU pVCpu)
[1]615{
[72676]616 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RAX);
[18927]617 return pVCpu->cpum.s.Guest.eax;
[1]618}
619
[12657]620
[79164]621VMMDECL(uint32_t) CPUMGetGuestEBX(PCVMCPU pVCpu)
[1]622{
[72676]623 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBX);
[18927]624 return pVCpu->cpum.s.Guest.ebx;
[1]625}
626
[12657]627
[79164]628VMMDECL(uint32_t) CPUMGetGuestECX(PCVMCPU pVCpu)
[1]629{
[72676]630 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RCX);
[18927]631 return pVCpu->cpum.s.Guest.ecx;
[1]632}
633
[12657]634
[79164]635VMMDECL(uint32_t) CPUMGetGuestEDX(PCVMCPU pVCpu)
[1]636{
[72676]637 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDX);
[18927]638 return pVCpu->cpum.s.Guest.edx;
[1]639}
640
[12657]641
[79164]642VMMDECL(uint32_t) CPUMGetGuestESI(PCVMCPU pVCpu)
[1]643{
[72676]644 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSI);
[18927]645 return pVCpu->cpum.s.Guest.esi;
[1]646}
647
[12657]648
[79164]649VMMDECL(uint32_t) CPUMGetGuestEDI(PCVMCPU pVCpu)
[1]650{
[72676]651 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDI);
[18927]652 return pVCpu->cpum.s.Guest.edi;
[1]653}
654
[12657]655
[79164]656VMMDECL(uint32_t) CPUMGetGuestESP(PCVMCPU pVCpu)
[1]657{
[72676]658 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP);
[18927]659 return pVCpu->cpum.s.Guest.esp;
[1]660}
661
[12657]662
[79164]663VMMDECL(uint32_t) CPUMGetGuestEBP(PCVMCPU pVCpu)
[1]664{
[72676]665 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBP);
[18927]666 return pVCpu->cpum.s.Guest.ebp;
[1]667}
668
[12657]669
[79164]670VMMDECL(uint32_t) CPUMGetGuestEFlags(PCVMCPU pVCpu)
[1]671{
[72676]672 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
[18927]673 return pVCpu->cpum.s.Guest.eflags.u32;
[1]674}
675
[12657]676
[80253]677VMMDECL(int) CPUMGetGuestCRx(PCVMCPUCC pVCpu, unsigned iReg, uint64_t *pValue)
[1]678{
679 switch (iReg)
680 {
[41728]681 case DISCREG_CR0:
[72676]682 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
[18927]683 *pValue = pVCpu->cpum.s.Guest.cr0;
[1]684 break;
[31489]685
[41728]686 case DISCREG_CR2:
[72676]687 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
[18927]688 *pValue = pVCpu->cpum.s.Guest.cr2;
[1]689 break;
[31489]690
[41728]691 case DISCREG_CR3:
[72676]692 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
[18927]693 *pValue = pVCpu->cpum.s.Guest.cr3;
[1]694 break;
[31489]695
[41728]696 case DISCREG_CR4:
[72676]697 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
[18927]698 *pValue = pVCpu->cpum.s.Guest.cr4;
[1]699 break;
[31489]700
[41728]701 case DISCREG_CR8:
[31489]702 {
[72676]703 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
[31489]704 uint8_t u8Tpr;
[64655]705 int rc = APICGetTpr(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
[31489]706 if (RT_FAILURE(rc))
707 {
708 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
709 *pValue = 0;
710 return rc;
711 }
[64655]712 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0 */
[31489]713 break;
714 }
715
[1]716 default:
717 return VERR_INVALID_PARAMETER;
718 }
719 return VINF_SUCCESS;
720}
721
[12657]722
[79164]723VMMDECL(uint64_t) CPUMGetGuestDR0(PCVMCPU pVCpu)
[1]724{
[72676]725 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
[18927]726 return pVCpu->cpum.s.Guest.dr[0];
[1]727}
728
[12657]729
[79164]730VMMDECL(uint64_t) CPUMGetGuestDR1(PCVMCPU pVCpu)
[1]731{
[72676]732 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
[18927]733 return pVCpu->cpum.s.Guest.dr[1];
[1]734}
735
[12657]736
[79164]737VMMDECL(uint64_t) CPUMGetGuestDR2(PCVMCPU pVCpu)
[1]738{
[72676]739 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
[18927]740 return pVCpu->cpum.s.Guest.dr[2];
[1]741}
742
[12657]743
[79164]744VMMDECL(uint64_t) CPUMGetGuestDR3(PCVMCPU pVCpu)
[1]745{
[72676]746 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
[18927]747 return pVCpu->cpum.s.Guest.dr[3];
[1]748}
749
[12657]750
[79164]751VMMDECL(uint64_t) CPUMGetGuestDR6(PCVMCPU pVCpu)
[1]752{
[72676]753 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR6);
[18927]754 return pVCpu->cpum.s.Guest.dr[6];
[1]755}
756
[12657]757
[79164]758VMMDECL(uint64_t) CPUMGetGuestDR7(PCVMCPU pVCpu)
[1]759{
[72676]760 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR7);
[18927]761 return pVCpu->cpum.s.Guest.dr[7];
[1]762}
763
[12657]764
[79164]765VMMDECL(int) CPUMGetGuestDRx(PCVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
[1]766{
[72676]767 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR_MASK);
[41728]768 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
[12657]769 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
770 if (iReg == 4 || iReg == 5)
771 iReg += 2;
[18927]772 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
[1]773 return VINF_SUCCESS;
774}
775
[12657]776
[79164]777VMMDECL(uint64_t) CPUMGetGuestEFER(PCVMCPU pVCpu)
[7730]778{
[72676]779 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
[18927]780 return pVCpu->cpum.s.Guest.msrEFER;
[7730]781}
782
[12657]783
[1]784/**
[54737]785 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
786 *
787 * @returns Pointer to the leaf if found, NULL if not.
788 *
[58122]789 * @param pVM The cross context VM structure.
[54737]790 * @param uLeaf The leaf to get.
791 */
792PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
793{
[91266]794 unsigned iEnd = RT_MIN(pVM->cpum.s.GuestInfo.cCpuIdLeaves, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves));
[54737]795 if (iEnd)
796 {
797 unsigned iStart = 0;
[91266]798 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.aCpuIdLeaves;
[54737]799 for (;;)
800 {
801 unsigned i = iStart + (iEnd - iStart) / 2U;
802 if (uLeaf < paLeaves[i].uLeaf)
803 {
804 if (i <= iStart)
805 return NULL;
806 iEnd = i;
807 }
808 else if (uLeaf > paLeaves[i].uLeaf)
809 {
810 i += 1;
811 if (i >= iEnd)
812 return NULL;
813 iStart = i;
814 }
815 else
816 {
817 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
818 return &paLeaves[i];
819
820 /* This shouldn't normally happen. But in case the it does due
821 to user configuration overrids or something, just return the
822 first sub-leaf. */
823 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
824 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
825 while ( paLeaves[i].uSubLeaf != 0
826 && i > 0
827 && uLeaf == paLeaves[i - 1].uLeaf)
828 i--;
829 return &paLeaves[i];
830 }
831 }
832 }
833
834 return NULL;
835}
836
837
838/**
[49893]839 * Looks up a CPUID leaf in the CPUID leaf array.
840 *
841 * @returns Pointer to the leaf if found, NULL if not.
842 *
[58122]843 * @param pVM The cross context VM structure.
[49893]844 * @param uLeaf The leaf to get.
845 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
846 * isn't.
[54737]847 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
[49893]848 */
[54737]849PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
[49893]850{
[91266]851 unsigned iEnd = RT_MIN(pVM->cpum.s.GuestInfo.cCpuIdLeaves, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves));
[49893]852 if (iEnd)
853 {
854 unsigned iStart = 0;
[91266]855 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.aCpuIdLeaves;
[49893]856 for (;;)
857 {
858 unsigned i = iStart + (iEnd - iStart) / 2U;
859 if (uLeaf < paLeaves[i].uLeaf)
860 {
861 if (i <= iStart)
862 return NULL;
863 iEnd = i;
864 }
865 else if (uLeaf > paLeaves[i].uLeaf)
866 {
867 i += 1;
868 if (i >= iEnd)
869 return NULL;
870 iStart = i;
871 }
872 else
873 {
874 uSubLeaf &= paLeaves[i].fSubLeafMask;
[54737]875 if (uSubLeaf == paLeaves[i].uSubLeaf)
876 *pfExactSubLeafHit = true;
877 else
[49893]878 {
879 /* Find the right subleaf. We return the last one before
880 uSubLeaf if we don't find an exact match. */
881 if (uSubLeaf < paLeaves[i].uSubLeaf)
882 while ( i > 0
[54714]883 && uLeaf == paLeaves[i - 1].uLeaf
884 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
[49893]885 i--;
886 else
887 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
888 && uLeaf == paLeaves[i + 1].uLeaf
889 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
890 i++;
[54737]891 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
[49893]892 }
893 return &paLeaves[i];
894 }
895 }
896 }
897
[54737]898 *pfExactSubLeafHit = false;
[49893]899 return NULL;
900}
901
902
903/**
[42034]904 * Gets a CPUID leaf.
[1]905 *
[58123]906 * @param pVCpu The cross context virtual CPU structure.
[54737]907 * @param uLeaf The CPUID leaf to get.
908 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
[95248]909 * @param f64BitMode A tristate indicate if the caller is in 64-bit mode or
910 * not: 1=true, 0=false, 1=whatever. This affect how the
911 * X86_CPUID_EXT_FEATURE_EDX_SYSCALL flag is returned on
912 * Intel CPUs, where it's only returned in 64-bit mode.
[54737]913 * @param pEax Where to store the EAX value.
914 * @param pEbx Where to store the EBX value.
915 * @param pEcx Where to store the ECX value.
916 * @param pEdx Where to store the EDX value.
[1]917 */
[95248]918VMMDECL(void) CPUMGetGuestCpuId(PVMCPUCC pVCpu, uint32_t uLeaf, uint32_t uSubLeaf, int f64BitMode,
[54737]919 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
[1]920{
[54760]921 bool fExactSubLeafHit;
922 PVM pVM = pVCpu->CTX_SUFF(pVM);
923 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
[54737]924 if (pLeaf)
[51301]925 {
[56985]926 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
[54737]927 if (fExactSubLeafHit)
[51301]928 {
[54737]929 *pEax = pLeaf->uEax;
930 *pEbx = pLeaf->uEbx;
931 *pEcx = pLeaf->uEcx;
932 *pEdx = pLeaf->uEdx;
933
934 /*
[61776]935 * Deal with CPU specific information.
[54737]936 */
[61776]937 if (pLeaf->fFlags & ( CPUMCPUIDLEAF_F_CONTAINS_APIC_ID
938 | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
939 | CPUMCPUIDLEAF_F_CONTAINS_APIC ))
[54737]940 {
941 if (uLeaf == 1)
942 {
[55229]943 /* EBX: Bits 31-24: Initial APIC ID. */
[54737]944 Assert(pVCpu->idCpu <= 255);
[54760]945 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
946 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
[55229]947
[61776]948 /* EDX: Bit 9: AND with APICBASE.EN. */
949 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
950 *pEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
951
[55229]952 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
953 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
954 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
[54737]955 }
956 else if (uLeaf == 0xb)
957 {
958 /* EDX: Initial extended APIC ID. */
[54760]959 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
[54737]960 *pEdx = pVCpu->idCpu;
[61776]961 Assert(!(pLeaf->fFlags & ~(CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)));
[54737]962 }
963 else if (uLeaf == UINT32_C(0x8000001e))
964 {
965 /* EAX: Initial extended APIC ID. */
[54760]966 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
[54737]967 *pEax = pVCpu->idCpu;
[61776]968 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC_ID));
[54737]969 }
[61776]970 else if (uLeaf == UINT32_C(0x80000001))
971 {
972 /* EDX: Bit 9: AND with APICBASE.EN. */
973 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible)
974 *pEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
975 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC));
976 }
[54737]977 else
978 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
979 }
[95248]980
981 /* Intel CPUs supresses the SYSCALL bit when not executing in 64-bit mode: */
982 if ( uLeaf == UINT32_C(0x80000001)
983 && f64BitMode == false
984 && (*pEdx & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
985 && ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL
986 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_VIA /*?*/
987 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_SHANGHAI /*?*/ ) )
988 *pEdx &= ~X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
989
[51301]990 }
[54737]991 /*
992 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
993 * them here, but we do the best we can here...
994 */
[51301]995 else
996 {
997 *pEax = *pEbx = *pEcx = *pEdx = 0;
[54737]998 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
999 {
1000 *pEcx = uSubLeaf & 0xff;
1001 *pEdx = pVCpu->idCpu;
1002 }
[51301]1003 }
1004 }
[1]1005 else
[19076]1006 {
[54737]1007 /*
1008 * Different CPUs have different ways of dealing with unknown CPUID leaves.
1009 */
1010 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
[25803]1011 {
[54737]1012 default:
1013 AssertFailed();
[69046]1014 RT_FALL_THRU();
[54737]1015 case CPUMUNKNOWNCPUID_DEFAULTS:
1016 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1017 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1018 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1019 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1020 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1021 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
[25803]1022 break;
[54737]1023 case CPUMUNKNOWNCPUID_PASSTHRU:
1024 *pEax = uLeaf;
1025 *pEbx = 0;
1026 *pEcx = uSubLeaf;
1027 *pEdx = 0;
[25803]1028 break;
1029 }
[19076]1030 }
[54737]1031 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
[1]1032}
1033
1034
1035/**
[61776]1036 * Sets the visibility of the X86_CPUID_FEATURE_EDX_APIC and
1037 * X86_CPUID_AMD_FEATURE_EDX_APIC CPUID bits.
[1]1038 *
[61776]1039 * @returns Previous value.
1040 * @param pVCpu The cross context virtual CPU structure to make the
1041 * change on. Usually the calling EMT.
1042 * @param fVisible Whether to make it visible (true) or hide it (false).
[62277]1043 *
1044 * @remarks This is "VMMDECL" so that it still links with
1045 * the old APIC code which is in VBoxDD2 and not in
1046 * the VMM module.
[1]1047 */
[62277]1048VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible)
[1]1049{
[61776]1050 bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible;
1051 pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible;
1052 return fOld;
[8111]1053}
1054
[12657]1055
[8111]1056/**
[42034]1057 * Gets the host CPU vendor.
[23794]1058 *
[42034]1059 * @returns CPU vendor.
[58122]1060 * @param pVM The cross context VM structure.
[23794]1061 */
1062VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1063{
[49893]1064 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
[23794]1065}
1066
[42034]1067
[23794]1068/**
[80293]1069 * Gets the host CPU microarchitecture.
[9354]1070 *
[80293]1071 * @returns CPU microarchitecture.
1072 * @param pVM The cross context VM structure.
1073 */
1074VMMDECL(CPUMMICROARCH) CPUMGetHostMicroarch(PCVM pVM)
1075{
1076 return pVM->cpum.s.HostFeatures.enmMicroarch;
1077}
1078
1079
1080/**
1081 * Gets the guest CPU vendor.
1082 *
[42034]1083 * @returns CPU vendor.
[58122]1084 * @param pVM The cross context VM structure.
[9354]1085 */
[23794]1086VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
[9354]1087{
[49893]1088 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
[9354]1089}
[1]1090
1091
[80293]1092/**
1093 * Gets the guest CPU microarchitecture.
1094 *
1095 * @returns CPU microarchitecture.
1096 * @param pVM The cross context VM structure.
1097 */
1098VMMDECL(CPUMMICROARCH) CPUMGetGuestMicroarch(PCVM pVM)
1099{
1100 return pVM->cpum.s.GuestFeatures.enmMicroarch;
1101}
1102
1103
[88290]1104/**
1105 * Gets the maximum number of physical and linear address bits supported by the
1106 * guest.
1107 *
1108 * @param pVM The cross context VM structure.
1109 * @param pcPhysAddrWidth Where to store the physical address width.
1110 * @param pcLinearAddrWidth Where to store the linear address width.
1111 */
1112VMMDECL(void) CPUMGetGuestAddrWidths(PCVM pVM, uint8_t *pcPhysAddrWidth, uint8_t *pcLinearAddrWidth)
1113{
1114 AssertPtr(pVM);
1115 AssertReturnVoid(pcPhysAddrWidth);
1116 AssertReturnVoid(pcLinearAddrWidth);
1117 *pcPhysAddrWidth = pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth;
1118 *pcLinearAddrWidth = pVM->cpum.s.GuestFeatures.cMaxLinearAddrWidth;
1119}
1120
1121
[80253]1122VMMDECL(int) CPUMSetGuestDR0(PVMCPUCC pVCpu, uint64_t uDr0)
[1]1123{
[18927]1124 pVCpu->cpum.s.Guest.dr[0] = uDr0;
[87346]1125 return CPUMRecalcHyperDRx(pVCpu, 0);
[1]1126}
1127
[12657]1128
[80253]1129VMMDECL(int) CPUMSetGuestDR1(PVMCPUCC pVCpu, uint64_t uDr1)
[1]1130{
[18927]1131 pVCpu->cpum.s.Guest.dr[1] = uDr1;
[87346]1132 return CPUMRecalcHyperDRx(pVCpu, 1);
[1]1133}
1134
[12657]1135
[80253]1136VMMDECL(int) CPUMSetGuestDR2(PVMCPUCC pVCpu, uint64_t uDr2)
[1]1137{
[18927]1138 pVCpu->cpum.s.Guest.dr[2] = uDr2;
[87346]1139 return CPUMRecalcHyperDRx(pVCpu, 2);
[1]1140}
1141
[12657]1142
[80253]1143VMMDECL(int) CPUMSetGuestDR3(PVMCPUCC pVCpu, uint64_t uDr3)
[1]1144{
[18927]1145 pVCpu->cpum.s.Guest.dr[3] = uDr3;
[87346]1146 return CPUMRecalcHyperDRx(pVCpu, 3);
[1]1147}
1148
[12657]1149
[18927]1150VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
[1]1151{
[18927]1152 pVCpu->cpum.s.Guest.dr[6] = uDr6;
[72689]1153 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR6;
[47660]1154 return VINF_SUCCESS; /* No need to recalc. */
[1]1155}
1156
[12657]1157
[80253]1158VMMDECL(int) CPUMSetGuestDR7(PVMCPUCC pVCpu, uint64_t uDr7)
[1]1159{
[18927]1160 pVCpu->cpum.s.Guest.dr[7] = uDr7;
[72689]1161 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR7;
[87346]1162 return CPUMRecalcHyperDRx(pVCpu, 7);
[1]1163}
1164
[12657]1165
[80253]1166VMMDECL(int) CPUMSetGuestDRx(PVMCPUCC pVCpu, uint32_t iReg, uint64_t Value)
[1]1167{
[41728]1168 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
[12657]1169 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1170 if (iReg == 4 || iReg == 5)
1171 iReg += 2;
[18927]1172 pVCpu->cpum.s.Guest.dr[iReg] = Value;
[87346]1173 return CPUMRecalcHyperDRx(pVCpu, iReg);
[1]1174}
1175
1176
1177/**
[47660]1178 * Recalculates the hypervisor DRx register values based on current guest
1179 * registers and DBGF breakpoints, updating changed registers depending on the
1180 * context.
[1]1181 *
[47660]1182 * This is called whenever a guest DRx register is modified (any context) and
1183 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
[1]1184 *
[47660]1185 * In raw-mode context this function will reload any (hyper) DRx registers which
1186 * comes out with a different value. It may also have to save the host debug
1187 * registers if that haven't been done already. In this context though, we'll
1188 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1189 * are only important when breakpoints are actually enabled.
1190 *
1191 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1192 * reloaded by the HM code if it changes. Further more, we will only use the
1193 * combined register set when the VBox debugger is actually using hardware BPs,
1194 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1195 * concern us here).
1196 *
1197 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1198 * all the time.
1199 *
[1]1200 * @returns VINF_SUCCESS.
[58123]1201 * @param pVCpu The cross context virtual CPU structure.
[47660]1202 * @param iGstReg The guest debug register number that was modified.
1203 * UINT8_MAX if not guest register.
[1]1204 */
[87346]1205VMMDECL(int) CPUMRecalcHyperDRx(PVMCPUCC pVCpu, uint8_t iGstReg)
[1]1206{
[18927]1207 PVM pVM = pVCpu->CTX_SUFF(pVM);
[62601]1208#ifndef IN_RING0
1209 RT_NOREF_PV(iGstReg);
1210#endif
[18927]1211
[1]1212 /*
1213 * Compare the DR7s first.
1214 *
[47660]1215 * We only care about the enabled flags. GD is virtualized when we
1216 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1217 * always have the LE and GE bits set, so no need to check and disable
1218 * stuff if they're cleared like we have to for the guest DR7.
[1]1219 */
[47660]1220 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
[72129]1221 /** @todo This isn't correct. BPs work without setting LE and GE under AMD-V. They are also documented as unsupported by P6+. */
[47660]1222 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1223 uGstDr7 = 0;
1224 else if (!(uGstDr7 & X86_DR7_LE))
1225 uGstDr7 &= ~X86_DR7_LE_ALL;
1226 else if (!(uGstDr7 & X86_DR7_GE))
1227 uGstDr7 &= ~X86_DR7_GE_ALL;
1228
[1]1229 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
[87346]1230 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
[1]1231 {
[47660]1232 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1233
[1]1234 /*
[47660]1235 * Ok, something is enabled. Recalc each of the breakpoints, taking
1236 * the VM debugger ones of the guest ones. In raw-mode context we will
1237 * not allow breakpoints with values inside the hypervisor area.
[1]1238 */
[47328]1239 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
[1]1240
1241 /* bp 0 */
1242 RTGCUINTREG uNewDr0;
1243 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1244 {
1245 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1246 uNewDr0 = DBGFBpGetDR0(pVM);
1247 }
1248 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1249 {
[18927]1250 uNewDr0 = CPUMGetGuestDR0(pVCpu);
[80050]1251 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
[1]1252 }
1253 else
[47660]1254 uNewDr0 = 0;
[1]1255
1256 /* bp 1 */
1257 RTGCUINTREG uNewDr1;
1258 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1259 {
1260 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1261 uNewDr1 = DBGFBpGetDR1(pVM);
1262 }
1263 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1264 {
[18927]1265 uNewDr1 = CPUMGetGuestDR1(pVCpu);
[80050]1266 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
[1]1267 }
1268 else
[47660]1269 uNewDr1 = 0;
[1]1270
1271 /* bp 2 */
1272 RTGCUINTREG uNewDr2;
1273 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1274 {
1275 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1276 uNewDr2 = DBGFBpGetDR2(pVM);
1277 }
1278 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1279 {
[18927]1280 uNewDr2 = CPUMGetGuestDR2(pVCpu);
[80050]1281 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
[1]1282 }
1283 else
[47660]1284 uNewDr2 = 0;
[1]1285
1286 /* bp 3 */
1287 RTGCUINTREG uNewDr3;
1288 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1289 {
1290 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1291 uNewDr3 = DBGFBpGetDR3(pVM);
1292 }
1293 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1294 {
[18927]1295 uNewDr3 = CPUMGetGuestDR3(pVCpu);
[80050]1296 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
[1]1297 }
1298 else
[47660]1299 uNewDr3 = 0;
[1]1300
1301 /*
1302 * Apply the updates.
1303 */
[80050]1304 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
1305 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1306 CPUMSetHyperDR3(pVCpu, uNewDr3);
1307 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1308 CPUMSetHyperDR2(pVCpu, uNewDr2);
1309 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1310 CPUMSetHyperDR1(pVCpu, uNewDr1);
1311 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1312 CPUMSetHyperDR0(pVCpu, uNewDr0);
1313 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1314 CPUMSetHyperDR7(pVCpu, uNewDr7);
[1]1315 }
[47660]1316#ifdef IN_RING0
1317 else if (CPUMIsGuestDebugStateActive(pVCpu))
1318 {
1319 /*
1320 * Reload the register that was modified. Normally this won't happen
1321 * as we won't intercept DRx writes when not having the hyper debug
1322 * state loaded, but in case we do for some reason we'll simply deal
1323 * with it.
1324 */
1325 switch (iGstReg)
1326 {
1327 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
1328 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
1329 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
1330 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
1331 default:
1332 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
1333 }
1334 }
1335#endif
[1]1336 else
1337 {
[47660]1338 /*
1339 * No active debug state any more. In raw-mode this means we have to
1340 * make sure DR7 has everything disabled now, if we armed it already.
[47714]1341 * In ring-0 we might end up here when just single stepping.
[47660]1342 */
[80064]1343#ifdef IN_RING0
[47660]1344 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
[1]1345 {
[47660]1346 if (pVCpu->cpum.s.Hyper.dr[0])
1347 ASMSetDR0(0);
1348 if (pVCpu->cpum.s.Hyper.dr[1])
1349 ASMSetDR1(0);
1350 if (pVCpu->cpum.s.Hyper.dr[2])
1351 ASMSetDR2(0);
1352 if (pVCpu->cpum.s.Hyper.dr[3])
1353 ASMSetDR3(0);
1354 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
[1]1355 }
1356#endif
[47660]1357 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
1358
1359 /* Clear all the registers. */
1360 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
1361 pVCpu->cpum.s.Hyper.dr[3] = 0;
1362 pVCpu->cpum.s.Hyper.dr[2] = 0;
1363 pVCpu->cpum.s.Hyper.dr[1] = 0;
1364 pVCpu->cpum.s.Hyper.dr[0] = 0;
1365
[1]1366 }
1367 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
[18927]1368 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
[47660]1369 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1370 pVCpu->cpum.s.Hyper.dr[7]));
[1]1371
1372 return VINF_SUCCESS;
1373}
1374
[25835]1375
1376/**
[55289]1377 * Set the guest XCR0 register.
1378 *
[55312]1379 * Will load additional state if the FPU state is already loaded (in ring-0 &
1380 * raw-mode context).
1381 *
[55289]1382 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
1383 * value.
[58123]1384 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
[55289]1385 * @param uNewValue The new value.
1386 * @thread EMT(pVCpu)
1387 */
[80253]1388VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPUCC pVCpu, uint64_t uNewValue)
[55289]1389{
[72676]1390 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_XCRx);
[55289]1391 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
1392 /* The X87 bit cannot be cleared. */
1393 && (uNewValue & XSAVE_C_X87)
1394 /* AVX requires SSE. */
1395 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
1396 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
1397 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
1398 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
1399 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
1400 )
1401 {
1402 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
[55312]1403
1404 /* If more state components are enabled, we need to take care to load
1405 them if the FPU/SSE state is already loaded. May otherwise leak
1406 host state to the guest. */
1407 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
1408 if (fNewComponents)
1409 {
[80064]1410#ifdef IN_RING0
[61058]1411 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
[55312]1412 {
1413 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
1414 /* Adding more components. */
[91281]1415 ASMXRstor(&pVCpu->cpum.s.Guest.XState, fNewComponents);
[55312]1416 else
1417 {
1418 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
1419 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
1420 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
[91281]1421 ASMXRstor(&pVCpu->cpum.s.Guest.XState, uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
[55312]1422 }
1423 }
1424#endif
1425 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
1426 }
[55289]1427 return VINF_SUCCESS;
1428 }
1429 return VERR_CPUM_RAISE_GP_0;
1430}
1431
1432
1433/**
[25835]1434 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1435 *
1436 * @returns true if in real mode, otherwise false.
[58123]1437 * @param pVCpu The cross context virtual CPU structure.
[25835]1438 */
[79146]1439VMMDECL(bool) CPUMIsGuestNXEnabled(PCVMCPU pVCpu)
[25835]1440{
[72676]1441 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
[25835]1442 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1443}
1444
1445
1446/**
[25866]1447 * Tests if the guest has the Page Size Extension enabled (PSE).
1448 *
1449 * @returns true if in real mode, otherwise false.
[58123]1450 * @param pVCpu The cross context virtual CPU structure.
[25866]1451 */
[79146]1452VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu)
[25866]1453{
[72676]1454 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
[26635]1455 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
[26673]1456 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
[25866]1457}
1458
1459
1460/**
1461 * Tests if the guest has the paging enabled (PG).
1462 *
1463 * @returns true if in real mode, otherwise false.
[58123]1464 * @param pVCpu The cross context virtual CPU structure.
[25866]1465 */
[79146]1466VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu)
[25866]1467{
[72676]1468 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
[25866]1469 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1470}
1471
1472
1473/**
1474 * Tests if the guest has the paging enabled (PG).
1475 *
1476 * @returns true if in real mode, otherwise false.
[58123]1477 * @param pVCpu The cross context virtual CPU structure.
[25866]1478 */
[79146]1479VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PCVMCPU pVCpu)
[25866]1480{
[72676]1481 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
[25866]1482 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1483}
1484
1485
1486/**
[25835]1487 * Tests if the guest is running in real mode or not.
1488 *
1489 * @returns true if in real mode, otherwise false.
[58123]1490 * @param pVCpu The cross context virtual CPU structure.
[25835]1491 */
[79146]1492VMMDECL(bool) CPUMIsGuestInRealMode(PCVMCPU pVCpu)
[25835]1493{
[72676]1494 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
[25835]1495 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1496}
1497
1498
1499/**
[36639]1500 * Tests if the guest is running in real or virtual 8086 mode.
1501 *
1502 * @returns @c true if it is, @c false if not.
[58123]1503 * @param pVCpu The cross context virtual CPU structure.
[36639]1504 */
[79146]1505VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PCVMCPU pVCpu)
[36639]1506{
[72676]1507 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS);
[36639]1508 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1509 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
1510}
1511
1512
1513/**
[25835]1514 * Tests if the guest is running in protected or not.
1515 *
1516 * @returns true if in protected mode, otherwise false.
[58123]1517 * @param pVCpu The cross context virtual CPU structure.
[25835]1518 */
[79146]1519VMMDECL(bool) CPUMIsGuestInProtectedMode(PCVMCPU pVCpu)
[25835]1520{
[72676]1521 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
[25835]1522 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1523}
1524
1525
1526/**
1527 * Tests if the guest is running in paged protected or not.
1528 *
1529 * @returns true if in paged protected mode, otherwise false.
[58123]1530 * @param pVCpu The cross context virtual CPU structure.
[25835]1531 */
[79146]1532VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PCVMCPU pVCpu)
[25835]1533{
[72676]1534 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
[25835]1535 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1536}
1537
1538
1539/**
1540 * Tests if the guest is running in long mode or not.
1541 *
1542 * @returns true if in long mode, otherwise false.
[58123]1543 * @param pVCpu The cross context virtual CPU structure.
[25835]1544 */
[79146]1545VMMDECL(bool) CPUMIsGuestInLongMode(PCVMCPU pVCpu)
[25835]1546{
[72676]1547 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
[25835]1548 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1549}
1550
1551
1552/**
1553 * Tests if the guest is running in PAE mode or not.
1554 *
1555 * @returns true if in PAE mode, otherwise false.
[58123]1556 * @param pVCpu The cross context virtual CPU structure.
[25835]1557 */
[79146]1558VMMDECL(bool) CPUMIsGuestInPAEMode(PCVMCPU pVCpu)
[25835]1559{
[72676]1560 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
[49849]1561 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1562 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
[25835]1563 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
[45291]1564 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
[49849]1565 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
[25835]1566}
1567
1568
[42165]1569/**
1570 * Tests if the guest is running in 64 bits mode or not.
1571 *
1572 * @returns true if in 64 bits protected mode, otherwise false.
[58123]1573 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
[42165]1574 */
1575VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
1576{
[72676]1577 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
[42165]1578 if (!CPUMIsGuestInLongMode(pVCpu))
1579 return false;
[42407]1580 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
[42165]1581 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
1582}
1583
1584
1585/**
1586 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
1587 * registers.
1588 *
1589 * @returns true if in 64 bits protected mode, otherwise false.
1590 * @param pCtx Pointer to the current guest CPU context.
1591 */
1592VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
1593{
1594 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
1595}
1596
[46165]1597
[42407]1598/**
[1]1599 * Sets the specified changed flags (CPUM_CHANGED_*).
1600 *
[58123]1601 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
[58126]1602 * @param fChangedAdd The changed flags to add.
[1]1603 */
[58126]1604VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
[1]1605{
[58126]1606 pVCpu->cpum.s.fChanged |= fChangedAdd;
[1]1607}
1608
[12657]1609
[1]1610/**
[55062]1611 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
1612 *
[1]1613 * @returns true if supported.
1614 * @returns false if not supported.
[58122]1615 * @param pVM The cross context VM structure.
[1]1616 */
[55062]1617VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
[1]1618{
[55062]1619 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
[1]1620}
1621
1622
1623/**
1624 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
1625 * @returns true if used.
1626 * @returns false if not used.
[58122]1627 * @param pVM The cross context VM structure.
[1]1628 */
[12989]1629VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
[1]1630{
[47660]1631 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
[1]1632}
1633
1634
1635/**
1636 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
1637 * @returns true if used.
1638 * @returns false if not used.
[58122]1639 * @param pVM The cross context VM structure.
[1]1640 */
[12989]1641VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
[1]1642{
[47660]1643 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
[1]1644}
1645
[5695]1646
[1]1647/**
[42034]1648 * Checks if we activated the FPU/XMM state of the guest OS.
[61058]1649 *
[87345]1650 * Obsolete: This differs from CPUMIsGuestFPUStateLoaded() in that it refers to
1651 * the next time we'll be executing guest code, so it may return true for
1652 * 64-on-32 when we still haven't actually loaded the FPU status, just scheduled
1653 * it to be loaded the next time we go thru the world switcher
1654 * (CPUM_SYNC_FPU_STATE).
[61066]1655 *
1656 * @returns true / false.
[58123]1657 * @param pVCpu The cross context virtual CPU structure.
[1]1658 */
[13960]1659VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
[1]1660{
[87361]1661 bool fRet = RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
1662 AssertMsg(fRet == pVCpu->cpum.s.Guest.fUsedFpuGuest, ("fRet=%d\n", fRet));
1663 return fRet;
[61066]1664}
1665
1666
1667/**
1668 * Checks if we've really loaded the FPU/XMM state of the guest OS.
1669 *
1670 * @returns true / false.
1671 * @param pVCpu The cross context virtual CPU structure.
1672 */
1673VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu)
1674{
[87361]1675 bool fRet = RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
1676 AssertMsg(fRet == pVCpu->cpum.s.Guest.fUsedFpuGuest, ("fRet=%d\n", fRet));
1677 return fRet;
[1]1678}
1679
[5695]1680
[1]1681/**
[61058]1682 * Checks if we saved the FPU/XMM state of the host OS.
1683 *
1684 * @returns true / false.
1685 * @param pVCpu The cross context virtual CPU structure.
1686 */
1687VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu)
1688{
1689 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST);
1690}
1691
1692
1693/**
[42034]1694 * Checks if the guest debug state is active.
[12578]1695 *
1696 * @returns boolean
[58126]1697 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
[12578]1698 */
[18927]1699VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
[12578]1700{
[47660]1701 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
[12578]1702}
1703
[48544]1704
[21252]1705/**
[42034]1706 * Checks if the hyper debug state is active.
[21252]1707 *
1708 * @returns boolean
[58126]1709 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
[21252]1710 */
1711VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
1712{
[47660]1713 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
[21252]1714}
[12657]1715
[21252]1716
[12578]1717/**
[30263]1718 * Mark the guest's debug state as inactive.
[12578]1719 *
1720 * @returns boolean
[58126]1721 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
[47660]1722 * @todo This API doesn't make sense any more.
[12578]1723 */
[18927]1724VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
[12578]1725{
[47660]1726 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
[57856]1727 NOREF(pVCpu);
[12578]1728}
1729
1730
1731/**
[1828]1732 * Get the current privilege level of the guest.
1733 *
[41939]1734 * @returns CPL
[58123]1735 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
[1828]1736 */
[41939]1737VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
[1828]1738{
[42166]1739 /*
1740 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
1741 *
1742 * Note! We used to check CS.DPL here, assuming it was always equal to
[66227]1743 * CPL even if a conforming segment was loaded. But this turned out to
[42166]1744 * only apply to older AMD-V. With VT-x we had an ACP2 regression
1745 * during install after a far call to ring 2 with VT-x. Then on newer
1746 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
1747 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
1748 *
1749 * So, forget CS.DPL, always use SS.DPL.
1750 *
1751 * Note! The SS RPL is always equal to the CPL, while the CS RPL
1752 * isn't necessarily equal if the segment is conforming.
1753 * See section 4.11.1 in the AMD manual.
[47225]1754 *
1755 * Update: Where the heck does it say CS.RPL can differ from CPL other than
1756 * right after real->prot mode switch and when in V8086 mode? That
1757 * section says the RPL specified in a direct transfere (call, jmp,
1758 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
1759 * it would be impossible for an exception handle or the iret
1760 * instruction to figure out whether SS:ESP are part of the frame
1761 * or not. VBox or qemu bug must've lead to this misconception.
[47242]1762 *
1763 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
1764 * selector into SS with an RPL other than the CPL when CPL != 3 and
1765 * we're in 64-bit mode. The intel dev box doesn't allow this, on
1766 * RPL = CPL. Weird.
[42166]1767 */
[72676]1768 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
[41939]1769 uint32_t uCpl;
[42166]1770 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
[10523]1771 {
[42166]1772 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
[31512]1773 {
[42407]1774 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
[41939]1775 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
[31512]1776 else
[42166]1777 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
[1969]1778 }
1779 else
[42166]1780 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
[1828]1781 }
1782 else
[42166]1783 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
[41939]1784 return uCpl;
[1828]1785}
[4207]1786
1787
1788/**
1789 * Gets the current guest CPU mode.
1790 *
1791 * If paging mode is what you need, check out PGMGetGuestMode().
1792 *
1793 * @returns The CPU mode.
[58123]1794 * @param pVCpu The cross context virtual CPU structure.
[4207]1795 */
[18927]1796VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
[4207]1797{
[72676]1798 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
[4207]1799 CPUMMODE enmMode;
[18927]1800 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
[4207]1801 enmMode = CPUMMODE_REAL;
[18927]1802 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
[4207]1803 enmMode = CPUMMODE_PROTECTED;
[9543]1804 else
1805 enmMode = CPUMMODE_LONG;
[4207]1806
1807 return enmMode;
1808}
[42186]1809
1810
1811/**
1812 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
1813 *
1814 * @returns 16, 32 or 64.
[58123]1815 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
[42186]1816 */
1817VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
1818{
[72676]1819 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
1820
[42186]1821 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1822 return 16;
1823
1824 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1825 {
1826 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
1827 return 16;
1828 }
1829
[42407]1830 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
[42186]1831 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
1832 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1833 return 64;
1834
1835 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
1836 return 32;
1837
1838 return 16;
1839}
1840
1841
1842VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
1843{
[72676]1844 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
1845
[42186]1846 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
1847 return DISCPUMODE_16BIT;
1848
1849 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
1850 {
1851 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
1852 return DISCPUMODE_16BIT;
1853 }
1854
[42407]1855 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
[42186]1856 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
1857 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
1858 return DISCPUMODE_64BIT;
1859
1860 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
1861 return DISCPUMODE_32BIT;
1862
1863 return DISCPUMODE_16BIT;
1864}
1865
[66403]1866
1867/**
1868 * Gets the guest MXCSR_MASK value.
1869 *
1870 * This does not access the x87 state, but the value we determined at VM
1871 * initialization.
1872 *
1873 * @returns MXCSR mask.
1874 * @param pVM The cross context VM structure.
1875 */
1876VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM)
1877{
1878 return pVM->cpum.s.GuestInfo.fMxCsrMask;
1879}
1880
[67258]1881
1882/**
[75830]1883 * Returns whether the guest has physical interrupts enabled.
[75729]1884 *
[75830]1885 * @returns @c true if interrupts are enabled, @c false otherwise.
1886 * @param pVCpu The cross context virtual CPU structure.
[75729]1887 *
[75830]1888 * @remarks Warning! This function does -not- take into account the global-interrupt
1889 * flag (GIF).
1890 */
1891VMM_INT_DECL(bool) CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu)
1892{
1893 if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest))
1894 {
1895 uint32_t const fEFlags = pVCpu->cpum.s.Guest.eflags.u;
1896 return RT_BOOL(fEFlags & X86_EFL_IF);
1897 }
1898
1899 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest))
[81665]1900 return CPUMIsGuestVmxPhysIntrEnabled(&pVCpu->cpum.s.Guest);
[75830]1901
1902 Assert(CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.s.Guest));
1903 return CPUMIsGuestSvmPhysIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
1904}
1905
1906
1907/**
1908 * Returns whether the nested-guest has virtual interrupts enabled.
1909 *
1910 * @returns @c true if interrupts are enabled, @c false otherwise.
[75729]1911 * @param pVCpu The cross context virtual CPU structure.
[75830]1912 *
1913 * @remarks Warning! This function does -not- take into account the global-interrupt
1914 * flag (GIF).
[75729]1915 */
[75830]1916VMM_INT_DECL(bool) CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu)
[75729]1917{
[81665]1918 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
1919 Assert(CPUMIsGuestInNestedHwvirtMode(pCtx));
[75729]1920
[81665]1921 if (CPUMIsGuestInVmxNonRootMode(pCtx))
1922 return CPUMIsGuestVmxVirtIntrEnabled(pCtx);
[75729]1923
[81665]1924 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
1925 return CPUMIsGuestSvmVirtIntrEnabled(pVCpu, pCtx);
[75729]1926}
1927
1928
1929/**
[75646]1930 * Calculates the interruptiblity of the guest.
1931 *
1932 * @returns Interruptibility level.
1933 * @param pVCpu The cross context virtual CPU structure.
1934 */
1935VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu)
1936{
[75729]1937#if 1
[75830]1938 /* Global-interrupt flag blocks pretty much everything we care about here. */
1939 if (CPUMGetGuestGif(&pVCpu->cpum.s.Guest))
[75729]1940 {
[75830]1941 /*
1942 * Physical interrupts are primarily blocked using EFLAGS. However, we cannot access
1943 * it directly here. If and how EFLAGS are used depends on the context (nested-guest
1944 * or raw-mode). Hence we use the function below which handles the details.
1945 */
1946 if ( CPUMIsGuestPhysIntrEnabled(pVCpu)
1947 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
[75729]1948 {
[75830]1949 if ( !CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest)
1950 || CPUMIsGuestVirtIntrEnabled(pVCpu))
[75729]1951 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
1952
[75830]1953 /* Physical interrupts are enabled, but nested-guest virtual interrupts are disabled. */
1954 return CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED;
1955 }
1956
1957 /*
1958 * Blocking the delivery of NMIs during an interrupt shadow is CPU implementation
1959 * specific. Therefore, in practice, we can't deliver an NMI in an interrupt shadow.
1960 * However, there is some uncertainity regarding the converse, i.e. whether
1961 * NMI-blocking until IRET blocks delivery of physical interrupts.
1962 *
1963 * See Intel spec. 25.4.1 "Event Blocking".
1964 */
1965 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
[75729]1966 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
[75830]1967
1968 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1969 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
1970
1971 return CPUMINTERRUPTIBILITY_INT_DISABLED;
[75729]1972 }
1973 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
1974#else
[75646]1975 if (pVCpu->cpum.s.Guest.rflags.Bits.u1IF)
1976 {
1977 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
1978 {
1979 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
1980 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
1981
1982 /** @todo does blocking NMIs mean interrupts are also inhibited? */
1983 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1984 {
1985 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1986 return CPUMINTERRUPTIBILITY_INT_INHIBITED;
1987 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
1988 }
1989 AssertFailed();
1990 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
1991 }
1992 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
1993 }
1994 else
1995 {
1996 if (pVCpu->cpum.s.Guest.hwvirt.fGif)
1997 {
1998 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1999 return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
2000 return CPUMINTERRUPTIBILITY_INT_DISABLED;
2001 }
2002 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
2003 }
[75729]2004#endif
[75646]2005}
2006
2007
2008/**
[77712]2009 * Gets whether the guest (or nested-guest) is currently blocking delivery of NMIs.
2010 *
2011 * @returns @c true if NMIs are blocked, @c false otherwise.
2012 * @param pVCpu The cross context virtual CPU structure.
2013 */
[79164]2014VMM_INT_DECL(bool) CPUMIsGuestNmiBlocking(PCVMCPU pVCpu)
[77712]2015{
2016 /*
2017 * Return the state of guest-NMI blocking in any of the following cases:
2018 * - We're not executing a nested-guest.
2019 * - We're executing an SVM nested-guest[1].
2020 * - We're executing a VMX nested-guest without virtual-NMIs enabled.
2021 *
2022 * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
2023 * SVM hypervisors must track NMI blocking themselves by intercepting
2024 * the IRET instruction after injection of an NMI.
2025 */
2026 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2027 if ( !CPUMIsGuestInNestedHwvirtMode(pCtx)
2028 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
[81665]2029 || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
[77712]2030 return VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2031
2032 /*
2033 * Return the state of virtual-NMI blocking, if we are executing a
2034 * VMX nested-guest with virtual-NMIs enabled.
2035 */
[81665]2036 return CPUMIsGuestVmxVirtNmiBlocking(pCtx);
[77712]2037}
2038
2039
2040/**
[79096]2041 * Sets blocking delivery of NMIs to the guest.
2042 *
2043 * @param pVCpu The cross context virtual CPU structure.
2044 * @param fBlock Whether NMIs are blocked or not.
2045 */
2046VMM_INT_DECL(void) CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock)
2047{
2048 /*
2049 * Set the state of guest-NMI blocking in any of the following cases:
2050 * - We're not executing a nested-guest.
2051 * - We're executing an SVM nested-guest[1].
2052 * - We're executing a VMX nested-guest without virtual-NMIs enabled.
2053 *
2054 * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
2055 * SVM hypervisors must track NMI blocking themselves by intercepting
2056 * the IRET instruction after injection of an NMI.
2057 */
2058 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2059 if ( !CPUMIsGuestInNestedHwvirtMode(pCtx)
2060 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
[81665]2061 || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
[79096]2062 {
2063 if (fBlock)
2064 {
2065 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2066 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2067 }
2068 else
2069 {
2070 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2071 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2072 }
2073 return;
2074 }
2075
2076 /*
2077 * Set the state of virtual-NMI blocking, if we are executing a
2078 * VMX nested-guest with virtual-NMIs enabled.
2079 */
[81665]2080 return CPUMSetGuestVmxVirtNmiBlocking(pCtx, fBlock);
[79096]2081}
2082
2083
2084/**
[75830]2085 * Checks whether the SVM nested-guest has physical interrupts enabled.
[75413]2086 *
[75830]2087 * @returns true if interrupts are enabled, false otherwise.
[75413]2088 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2089 * @param pCtx The guest-CPU context.
[75830]2090 *
2091 * @remarks This does -not- take into account the global-interrupt flag.
[75413]2092 */
[78866]2093VMM_INT_DECL(bool) CPUMIsGuestSvmPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
[67258]2094{
[70234]2095 /** @todo Optimization: Avoid this function call and use a pointer to the
2096 * relevant eflags instead (setup during VMRUN instruction emulation). */
[67258]2097 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2098
[72065]2099 X86EFLAGS fEFlags;
2100 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
2101 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
2102 else
2103 fEFlags.u = pCtx->eflags.u;
[67258]2104
[72065]2105 return fEFlags.Bits.u1IF;
[67258]2106}
2107
2108
2109/**
[70781]2110 * Checks whether the SVM nested-guest is in a state to receive virtual (setup
2111 * for injection by VMRUN instruction) interrupts.
[67258]2112 *
2113 * @returns VBox status code.
2114 * @retval true if it's ready, false otherwise.
2115 *
[70781]2116 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2117 * @param pCtx The guest-CPU context.
[67258]2118 */
[78866]2119VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
[67258]2120{
[77149]2121 RT_NOREF(pVCpu);
[67258]2122 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2123
[91287]2124 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.Vmcb.ctrl;
[72065]2125 PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
[75759]2126 Assert(!pVmcbIntCtrl->n.u1VGifEnable); /* We don't support passing virtual-GIF feature to the guest yet. */
[72065]2127 if ( !pVmcbIntCtrl->n.u1IgnoreTPR
2128 && pVmcbIntCtrl->n.u4VIntrPrio <= pVmcbIntCtrl->n.u8VTPR)
2129 return false;
[67258]2130
[77148]2131 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
[67258]2132}
2133
2134
2135/**
[75830]2136 * Gets the pending SVM nested-guest interruptvector.
[67258]2137 *
2138 * @returns The nested-guest interrupt to inject.
2139 * @param pCtx The guest-CPU context.
2140 */
[75830]2141VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx)
[67258]2142{
[91287]2143 return pCtx->hwvirt.svm.Vmcb.ctrl.IntCtrl.n.u8VIntrVector;
[67258]2144}
2145
[68226]2146
2147/**
2148 * Restores the host-state from the host-state save area as part of a \#VMEXIT.
2149 *
[69408]2150 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
[68226]2151 * @param pCtx The guest-CPU context.
2152 */
[80253]2153VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPUCC pVCpu, PCPUMCTX pCtx)
[68226]2154{
[68438]2155 /*
[68226]2156 * Reload the guest's "host state".
2157 */
2158 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2159 pCtx->es = pHostState->es;
2160 pCtx->cs = pHostState->cs;
2161 pCtx->ss = pHostState->ss;
2162 pCtx->ds = pHostState->ds;
2163 pCtx->gdtr = pHostState->gdtr;
2164 pCtx->idtr = pHostState->idtr;
[74101]2165 CPUMSetGuestEferMsrNoChecks(pVCpu, pCtx->msrEFER, pHostState->uEferMsr);
[69408]2166 CPUMSetGuestCR0(pVCpu, pHostState->uCr0 | X86_CR0_PE);
[68226]2167 pCtx->cr3 = pHostState->uCr3;
[69408]2168 CPUMSetGuestCR4(pVCpu, pHostState->uCr4);
[68226]2169 pCtx->rflags = pHostState->rflags;
2170 pCtx->rflags.Bits.u1VM = 0;
2171 pCtx->rip = pHostState->uRip;
2172 pCtx->rsp = pHostState->uRsp;
2173 pCtx->rax = pHostState->uRax;
2174 pCtx->dr[7] &= ~(X86_DR7_ENABLED_MASK | X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
2175 pCtx->dr[7] |= X86_DR7_RA1_MASK;
[69032]2176 Assert(pCtx->ss.Attr.n.u2Dpl == 0);
[68226]2177
2178 /** @todo if RIP is not canonical or outside the CS segment limit, we need to
2179 * raise \#GP(0) in the guest. */
2180
2181 /** @todo check the loaded host-state for consistency. Figure out what
2182 * exactly this involves? */
2183}
2184
2185
2186/**
2187 * Saves the host-state to the host-state save area as part of a VMRUN.
2188 *
2189 * @param pCtx The guest-CPU context.
2190 * @param cbInstr The length of the VMRUN instruction in bytes.
2191 */
2192VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr)
2193{
2194 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2195 pHostState->es = pCtx->es;
2196 pHostState->cs = pCtx->cs;
2197 pHostState->ss = pCtx->ss;
2198 pHostState->ds = pCtx->ds;
2199 pHostState->gdtr = pCtx->gdtr;
2200 pHostState->idtr = pCtx->idtr;
2201 pHostState->uEferMsr = pCtx->msrEFER;
2202 pHostState->uCr0 = pCtx->cr0;
2203 pHostState->uCr3 = pCtx->cr3;
2204 pHostState->uCr4 = pCtx->cr4;
2205 pHostState->rflags = pCtx->rflags;
2206 pHostState->uRip = pCtx->rip + cbInstr;
2207 pHostState->uRsp = pCtx->rsp;
2208 pHostState->uRax = pCtx->rax;
2209}
2210
[70056]2211
2212/**
[78220]2213 * Applies the TSC offset of a nested-guest if any and returns the TSC value for the
2214 * nested-guest.
[70056]2215 *
2216 * @returns The TSC offset after applying any nested-guest TSC offset.
2217 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
[80064]2218 * @param uTscValue The guest TSC.
[70782]2219 *
[78220]2220 * @sa CPUMRemoveNestedGuestTscOffset.
[70056]2221 */
[80064]2222VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
[70056]2223{
[70782]2224 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
[75109]2225 if (CPUMIsGuestInVmxNonRootMode(pCtx))
2226 {
[81665]2227 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
[91297]2228 return uTscValue + pCtx->hwvirt.vmx.Vmcs.u64TscOffset.u;
[80064]2229 return uTscValue;
[75109]2230 }
2231
[70056]2232 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2233 {
[80064]2234 uint64_t offTsc;
2235 if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
[91287]2236 offTsc = pCtx->hwvirt.svm.Vmcb.ctrl.u64TSCOffset;
[80064]2237 return uTscValue + offTsc;
[70056]2238 }
[80064]2239 return uTscValue;
[70056]2240}
2241
[72488]2242
2243/**
[78220]2244 * Removes the TSC offset of a nested-guest if any and returns the TSC value for the
2245 * guest.
2246 *
2247 * @returns The TSC offset after removing any nested-guest TSC offset.
2248 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
[80064]2249 * @param uTscValue The nested-guest TSC.
[78220]2250 *
2251 * @sa CPUMApplyNestedGuestTscOffset.
2252 */
[80064]2253VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
[78220]2254{
2255 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2256 if (CPUMIsGuestInVmxNonRootMode(pCtx))
2257 {
[81665]2258 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
[91297]2259 return uTscValue - pCtx->hwvirt.vmx.Vmcs.u64TscOffset.u;
[80064]2260 return uTscValue;
[78220]2261 }
2262
2263 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2264 {
[80064]2265 uint64_t offTsc;
2266 if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
[91287]2267 offTsc = pCtx->hwvirt.svm.Vmcb.ctrl.u64TSCOffset;
[80064]2268 return uTscValue - offTsc;
[78220]2269 }
[80064]2270 return uTscValue;
[78220]2271}
2272
2273
2274/**
[72488]2275 * Used to dynamically imports state residing in NEM or HM.
2276 *
2277 * This is a worker for the CPUM_IMPORT_EXTRN_RET() macro and various IEM ones.
2278 *
2279 * @returns VBox status code.
2280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2281 * @param fExtrnImport The fields to import.
2282 * @thread EMT(pVCpu)
2283 */
[80253]2284VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPUCC pVCpu, uint64_t fExtrnImport)
[72488]2285{
2286 VMCPU_ASSERT_EMT(pVCpu);
2287 if (pVCpu->cpum.s.Guest.fExtrn & fExtrnImport)
2288 {
2289 switch (pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_KEEPER_MASK)
2290 {
2291 case CPUMCTX_EXTRN_KEEPER_NEM:
2292 {
[72917]2293 int rc = NEMImportStateOnDemand(pVCpu, fExtrnImport);
[72488]2294 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
2295 return rc;
2296 }
2297
[72643]2298 case CPUMCTX_EXTRN_KEEPER_HM:
2299 {
2300#ifdef IN_RING0
[72744]2301 int rc = HMR0ImportStateOnDemand(pVCpu, fExtrnImport);
[72643]2302 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
2303 return rc;
2304#else
[72879]2305 AssertLogRelMsgFailed(("TODO Fetch HM state: %#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport));
[72643]2306 return VINF_SUCCESS;
2307#endif
2308 }
[72488]2309 default:
2310 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
2311 }
2312 }
2313 return VINF_SUCCESS;
2314}
2315
[76548]2316
2317/**
2318 * Gets valid CR4 bits for the guest.
2319 *
2320 * @returns Valid CR4 bits.
2321 * @param pVM The cross context VM structure.
2322 */
2323VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM)
2324{
2325 PCCPUMFEATURES pGuestFeatures = &pVM->cpum.s.GuestFeatures;
2326 uint64_t fMask = X86_CR4_VME | X86_CR4_PVI
2327 | X86_CR4_TSD | X86_CR4_DE
[91275]2328 | X86_CR4_MCE | X86_CR4_PCE;
2329 if (pGuestFeatures->fPae)
2330 fMask |= X86_CR4_PAE;
2331 if (pGuestFeatures->fPge)
2332 fMask |= X86_CR4_PGE;
2333 if (pGuestFeatures->fPse)
2334 fMask |= X86_CR4_PSE;
[76548]2335 if (pGuestFeatures->fFxSaveRstor)
2336 fMask |= X86_CR4_OSFXSR;
2337 if (pGuestFeatures->fVmx)
2338 fMask |= X86_CR4_VMXE;
2339 if (pGuestFeatures->fXSaveRstor)
2340 fMask |= X86_CR4_OSXSAVE;
2341 if (pGuestFeatures->fPcid)
2342 fMask |= X86_CR4_PCIDE;
2343 if (pGuestFeatures->fFsGsBase)
2344 fMask |= X86_CR4_FSGSBASE;
[91275]2345 if (pGuestFeatures->fSse)
2346 fMask |= X86_CR4_OSXMMEEXCPT;
[76548]2347 return fMask;
2348}
2349
[78371]2350
2351/**
[91580]2352 * Sets the PAE PDPEs for the guest.
[91271]2353 *
2354 * @param pVCpu The cross context virtual CPU structure of the calling thread.
[91580]2355 * @param paPaePdpes The PAE PDPEs to set.
[91271]2356 */
2357VMM_INT_DECL(void) CPUMSetGuestPaePdpes(PVMCPU pVCpu, PCX86PDPE paPaePdpes)
2358{
2359 Assert(paPaePdpes);
2360 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.s.Guest.aPaePdpes); i++)
2361 pVCpu->cpum.s.Guest.aPaePdpes[i].u = paPaePdpes[i].u;
2362 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
2363}
2364
2365
2366/**
[91580]2367 * Gets the PAE PDPTEs for the guest.
2368 *
2369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2370 * @param paPaePdpes Where to store the PAE PDPEs.
2371 */
2372VMM_INT_DECL(void) CPUMGetGuestPaePdpes(PVMCPU pVCpu, PX86PDPE paPaePdpes)
2373{
2374 Assert(paPaePdpes);
2375 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
2376 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.s.Guest.aPaePdpes); i++)
2377 paPaePdpes[i].u = pVCpu->cpum.s.Guest.aPaePdpes[i].u;
2378}
2379
2380
2381/**
[81786]2382 * Starts a VMX-preemption timer to expire as specified by the nested hypervisor.
2383 *
2384 * @returns VBox status code.
2385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2386 * @param uTimer The VMCS preemption timer value.
2387 * @param cShift The VMX-preemption timer shift (usually based on guest
2388 * VMX MSR rate).
2389 * @param pu64EntryTick Where to store the current tick when the timer is
2390 * programmed.
2391 * @thread EMT(pVCpu)
2392 */
2393VMM_INT_DECL(int) CPUMStartGuestVmxPremptTimer(PVMCPUCC pVCpu, uint32_t uTimer, uint8_t cShift, uint64_t *pu64EntryTick)
2394{
2395 Assert(uTimer);
2396 Assert(cShift <= 31);
2397 Assert(pu64EntryTick);
2398 VMCPU_ASSERT_EMT(pVCpu);
2399 uint64_t const cTicksToNext = uTimer << cShift;
[87766]2400 return TMTimerSetRelative(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.s.hNestedVmxPreemptTimer, cTicksToNext, pu64EntryTick);
[81786]2401}
2402
2403
2404/**
2405 * Stops the VMX-preemption timer from firing.
2406 *
2407 * @returns VBox status code.
2408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2409 * @thread EMT.
2410 *
2411 * @remarks This can be called during VM reset, so we cannot assume it will be on
2412 * the EMT corresponding to @c pVCpu.
2413 */
2414VMM_INT_DECL(int) CPUMStopGuestVmxPremptTimer(PVMCPUCC pVCpu)
2415{
2416 /*
2417 * CPUM gets initialized before TM, so we defer creation of timers till CPUMR3InitCompleted().
[87766]2418 * However, we still get called during CPUMR3Init() and hence we need to check if we have
[81786]2419 * a valid timer object before trying to stop it.
2420 */
[87766]2421 int rc;
2422 TMTIMERHANDLE hTimer = pVCpu->cpum.s.hNestedVmxPreemptTimer;
2423 if (hTimer != NIL_TMTIMERHANDLE)
[81786]2424 {
[87766]2425 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2426 rc = TMTimerLock(pVM, hTimer, VERR_IGNORED);
2427 if (rc == VINF_SUCCESS)
2428 {
2429 if (TMTimerIsActive(pVM, hTimer))
2430 TMTimerStop(pVM, hTimer);
2431 TMTimerUnlock(pVM, hTimer);
2432 }
[81786]2433 }
[87766]2434 else
2435 rc = VERR_NOT_FOUND;
[81786]2436 return rc;
2437}
2438
2439
2440/**
[78371]2441 * Gets the read and write permission bits for an MSR in an MSR bitmap.
2442 *
2443 * @returns VMXMSRPM_XXX - the MSR permission.
2444 * @param pvMsrBitmap Pointer to the MSR bitmap.
2445 * @param idMsr The MSR to get permissions for.
2446 *
2447 * @sa hmR0VmxSetMsrPermission.
2448 */
2449VMM_INT_DECL(uint32_t) CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr)
2450{
2451 AssertPtrReturn(pvMsrBitmap, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
2452
2453 uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap;
2454
2455 /*
2456 * MSR Layout:
2457 * Byte index MSR range Interpreted as
2458 * 0x000 - 0x3ff 0x00000000 - 0x00001fff Low MSR read bits.
2459 * 0x400 - 0x7ff 0xc0000000 - 0xc0001fff High MSR read bits.
2460 * 0x800 - 0xbff 0x00000000 - 0x00001fff Low MSR write bits.
2461 * 0xc00 - 0xfff 0xc0000000 - 0xc0001fff High MSR write bits.
2462 *
2463 * A bit corresponding to an MSR within the above range causes a VM-exit
2464 * if the bit is 1 on executions of RDMSR/WRMSR. If an MSR falls out of
2465 * the MSR range, it always cause a VM-exit.
2466 *
2467 * See Intel spec. 24.6.9 "MSR-Bitmap Address".
2468 */
2469 uint32_t const offBitmapRead = 0;
2470 uint32_t const offBitmapWrite = 0x800;
2471 uint32_t offMsr;
2472 uint32_t iBit;
2473 if (idMsr <= UINT32_C(0x00001fff))
2474 {
2475 offMsr = 0;
2476 iBit = idMsr;
2477 }
2478 else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
2479 {
2480 offMsr = 0x400;
2481 iBit = idMsr - UINT32_C(0xc0000000);
2482 }
2483 else
2484 {
2485 LogFunc(("Warning! Out of range MSR %#RX32\n", idMsr));
2486 return VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR;
2487 }
2488
2489 /*
2490 * Get the MSR read permissions.
2491 */
2492 uint32_t fRet;
2493 uint32_t const offMsrRead = offBitmapRead + offMsr;
2494 Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
2495 if (ASMBitTest(pbMsrBitmap + offMsrRead, iBit))
2496 fRet = VMXMSRPM_EXIT_RD;
2497 else
2498 fRet = VMXMSRPM_ALLOW_RD;
2499
2500 /*
2501 * Get the MSR write permissions.
2502 */
2503 uint32_t const offMsrWrite = offBitmapWrite + offMsr;
2504 Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
2505 if (ASMBitTest(pbMsrBitmap + offMsrWrite, iBit))
2506 fRet |= VMXMSRPM_EXIT_WR;
2507 else
2508 fRet |= VMXMSRPM_ALLOW_WR;
2509
2510 Assert(VMXMSRPM_IS_FLAG_VALID(fRet));
2511 return fRet;
2512}
2513
2514
2515/**
[80806]2516 * Checks the permission bits for the specified I/O port from the given I/O bitmap
2517 * to see if causes a VM-exit.
[78371]2518 *
2519 * @returns @c true if the I/O port access must cause a VM-exit, @c false otherwise.
[91305]2520 * @param pbIoBitmap Pointer to I/O bitmap.
[80806]2521 * @param uPort The I/O port being accessed.
2522 * @param cbAccess e size of the I/O access in bytes (1, 2 or 4 bytes).
[78371]2523 */
[91305]2524static bool cpumGetVmxIoBitmapPermission(uint8_t const *pbIoBitmap, uint16_t uPort, uint8_t cbAccess)
[78371]2525{
2526 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
2527
2528 /*
[80803]2529 * If the I/O port access wraps around the 16-bit port I/O space, we must cause a
2530 * VM-exit.
[78371]2531 *
[80804]2532 * Reading 1, 2, 4 bytes at ports 0xffff, 0xfffe and 0xfffc are valid and do not
2533 * constitute a wrap around. However, reading 2 bytes at port 0xffff or 4 bytes
2534 * from port 0xffff/0xfffe/0xfffd constitute a wrap around. In other words, any
2535 * access to -both- ports 0xffff and port 0 is a wrap around.
2536 *
[78371]2537 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2538 */
2539 uint32_t const uPortLast = uPort + cbAccess;
2540 if (uPortLast > 0x10000)
2541 return true;
2542
[80803]2543 /*
2544 * If any bit corresponding to the I/O access is set, we must cause a VM-exit.
2545 */
[80814]2546 uint16_t const offPerm = uPort >> 3; /* Byte offset of the port. */
2547 uint16_t const idxPermBit = uPort - (offPerm << 3); /* Bit offset within byte. */
2548 Assert(idxPermBit < 8);
2549 static const uint8_t s_afMask[] = { 0x0, 0x1, 0x3, 0x7, 0xf }; /* Bit-mask for all access sizes. */
2550 uint16_t const fMask = s_afMask[cbAccess] << idxPermBit; /* Bit-mask of the access. */
[80803]2551
[80814]2552 /* Fetch 8 or 16-bits depending on whether the access spans 8-bit boundary. */
2553 RTUINT16U uPerm;
[91305]2554 uPerm.s.Lo = pbIoBitmap[offPerm];
[80814]2555 if (idxPermBit + cbAccess > 8)
[91305]2556 uPerm.s.Hi = pbIoBitmap[offPerm + 1];
[80814]2557 else
2558 uPerm.s.Hi = 0;
[80803]2559
[80814]2560 /* If any bit for the access is 1, we must cause a VM-exit. */
2561 if (uPerm.u & fMask)
2562 return true;
[80803]2563
2564 return false;
[78371]2565}
2566
2567
2568/**
[79194]2569 * Returns whether the given VMCS field is valid and supported for the guest.
2570 *
2571 * @param pVM The cross context VM structure.
2572 * @param u64VmcsField The VMCS field.
2573 *
2574 * @remarks This takes into account the CPU features exposed to the guest.
2575 */
[80253]2576VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVMCC pVM, uint64_t u64VmcsField)
[79194]2577{
2578 uint32_t const uFieldEncHi = RT_HI_U32(u64VmcsField);
2579 uint32_t const uFieldEncLo = RT_LO_U32(u64VmcsField);
2580 if (!uFieldEncHi)
2581 { /* likely */ }
2582 else
2583 return false;
2584
2585 PCCPUMFEATURES pFeat = &pVM->cpum.s.GuestFeatures;
2586 switch (uFieldEncLo)
2587 {
2588 /*
2589 * 16-bit fields.
2590 */
2591 /* Control fields. */
2592 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
2593 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
2594 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
2595
2596 /* Guest-state fields. */
2597 case VMX_VMCS16_GUEST_ES_SEL:
2598 case VMX_VMCS16_GUEST_CS_SEL:
2599 case VMX_VMCS16_GUEST_SS_SEL:
2600 case VMX_VMCS16_GUEST_DS_SEL:
2601 case VMX_VMCS16_GUEST_FS_SEL:
2602 case VMX_VMCS16_GUEST_GS_SEL:
2603 case VMX_VMCS16_GUEST_LDTR_SEL:
2604 case VMX_VMCS16_GUEST_TR_SEL: return true;
2605 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
2606 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
2607
2608 /* Host-state fields. */
2609 case VMX_VMCS16_HOST_ES_SEL:
2610 case VMX_VMCS16_HOST_CS_SEL:
2611 case VMX_VMCS16_HOST_SS_SEL:
2612 case VMX_VMCS16_HOST_DS_SEL:
2613 case VMX_VMCS16_HOST_FS_SEL:
2614 case VMX_VMCS16_HOST_GS_SEL:
2615 case VMX_VMCS16_HOST_TR_SEL: return true;
2616
2617 /*
2618 * 64-bit fields.
2619 */
2620 /* Control fields. */
2621 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
2622 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
2623 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
2624 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
2625 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
2626 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
2627 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
2628 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
2629 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
2630 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
2631 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
2632 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
2633 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
2634 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
2635 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
2636 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
2637 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
2638 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
2639 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
2640 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
2641 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
2642 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
2643 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
2644 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
2645 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
2646 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
2647 case VMX_VMCS64_CTRL_EPTP_FULL:
2648 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
2649 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
2650 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
2651 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
2652 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
2653 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
2654 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
2655 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
2656 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
2657 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
2658 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
2659 {
[80253]2660 PCVMCPU pVCpu = pVM->CTX_SUFF(apCpus)[0];
[79194]2661 uint64_t const uVmFuncMsr = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64VmFunc;
2662 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
2663 }
2664 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
2665 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
2666 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
2667 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
[90932]2668 case VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL:
2669 case VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
[79194]2670 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
2671 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
2672 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
2673 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
[91037]2674 case VMX_VMCS64_CTRL_PROC_EXEC3_FULL:
2675 case VMX_VMCS64_CTRL_PROC_EXEC3_HIGH: return pFeat->fVmxTertiaryExecCtls;
[79194]2676
2677 /* Read-only data fields. */
2678 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
2679 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
2680
2681 /* Guest-state fields. */
2682 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
2683 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
2684 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
2685 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
2686 case VMX_VMCS64_GUEST_PAT_FULL:
2687 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
2688 case VMX_VMCS64_GUEST_EFER_FULL:
2689 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
2690 case VMX_VMCS64_GUEST_PDPTE0_FULL:
2691 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
2692 case VMX_VMCS64_GUEST_PDPTE1_FULL:
2693 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
2694 case VMX_VMCS64_GUEST_PDPTE2_FULL:
2695 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
2696 case VMX_VMCS64_GUEST_PDPTE3_FULL:
2697 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
2698
2699 /* Host-state fields. */
2700 case VMX_VMCS64_HOST_PAT_FULL:
2701 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
2702 case VMX_VMCS64_HOST_EFER_FULL:
2703 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
2704
2705 /*
2706 * 32-bit fields.
2707 */
2708 /* Control fields. */
2709 case VMX_VMCS32_CTRL_PIN_EXEC:
2710 case VMX_VMCS32_CTRL_PROC_EXEC:
2711 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
2712 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
2713 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
2714 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
2715 case VMX_VMCS32_CTRL_EXIT:
2716 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
2717 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
2718 case VMX_VMCS32_CTRL_ENTRY:
2719 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
2720 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
2721 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
2722 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
2723 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
2724 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
2725 case VMX_VMCS32_CTRL_PLE_GAP:
2726 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
2727
2728 /* Read-only data fields. */
2729 case VMX_VMCS32_RO_VM_INSTR_ERROR:
2730 case VMX_VMCS32_RO_EXIT_REASON:
2731 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
2732 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
2733 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
2734 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
2735 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
2736 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
2737
2738 /* Guest-state fields. */
2739 case VMX_VMCS32_GUEST_ES_LIMIT:
2740 case VMX_VMCS32_GUEST_CS_LIMIT:
2741 case VMX_VMCS32_GUEST_SS_LIMIT:
2742 case VMX_VMCS32_GUEST_DS_LIMIT:
2743 case VMX_VMCS32_GUEST_FS_LIMIT:
2744 case VMX_VMCS32_GUEST_GS_LIMIT:
2745 case VMX_VMCS32_GUEST_LDTR_LIMIT:
2746 case VMX_VMCS32_GUEST_TR_LIMIT:
2747 case VMX_VMCS32_GUEST_GDTR_LIMIT:
2748 case VMX_VMCS32_GUEST_IDTR_LIMIT:
2749 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
2750 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
2751 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
2752 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
2753 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
2754 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
2755 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
2756 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
2757 case VMX_VMCS32_GUEST_INT_STATE:
2758 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
2759 case VMX_VMCS32_GUEST_SMBASE:
2760 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
2761 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
2762
2763 /* Host-state fields. */
2764 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
2765
2766 /*
2767 * Natural-width fields.
2768 */
2769 /* Control fields. */
2770 case VMX_VMCS_CTRL_CR0_MASK:
2771 case VMX_VMCS_CTRL_CR4_MASK:
2772 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
2773 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
2774 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
2775 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
2776 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
2777 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
2778
2779 /* Read-only data fields. */
2780 case VMX_VMCS_RO_EXIT_QUALIFICATION:
2781 case VMX_VMCS_RO_IO_RCX:
2782 case VMX_VMCS_RO_IO_RSI:
2783 case VMX_VMCS_RO_IO_RDI:
2784 case VMX_VMCS_RO_IO_RIP:
2785 case VMX_VMCS_RO_GUEST_LINEAR_ADDR: return true;
2786
2787 /* Guest-state fields. */
2788 case VMX_VMCS_GUEST_CR0:
2789 case VMX_VMCS_GUEST_CR3:
2790 case VMX_VMCS_GUEST_CR4:
2791 case VMX_VMCS_GUEST_ES_BASE:
2792 case VMX_VMCS_GUEST_CS_BASE:
2793 case VMX_VMCS_GUEST_SS_BASE:
2794 case VMX_VMCS_GUEST_DS_BASE:
2795 case VMX_VMCS_GUEST_FS_BASE:
2796 case VMX_VMCS_GUEST_GS_BASE:
2797 case VMX_VMCS_GUEST_LDTR_BASE:
2798 case VMX_VMCS_GUEST_TR_BASE:
2799 case VMX_VMCS_GUEST_GDTR_BASE:
2800 case VMX_VMCS_GUEST_IDTR_BASE:
2801 case VMX_VMCS_GUEST_DR7:
2802 case VMX_VMCS_GUEST_RSP:
2803 case VMX_VMCS_GUEST_RIP:
2804 case VMX_VMCS_GUEST_RFLAGS:
2805 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
2806 case VMX_VMCS_GUEST_SYSENTER_ESP:
2807 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
2808
2809 /* Host-state fields. */
2810 case VMX_VMCS_HOST_CR0:
2811 case VMX_VMCS_HOST_CR3:
2812 case VMX_VMCS_HOST_CR4:
2813 case VMX_VMCS_HOST_FS_BASE:
2814 case VMX_VMCS_HOST_GS_BASE:
2815 case VMX_VMCS_HOST_TR_BASE:
2816 case VMX_VMCS_HOST_GDTR_BASE:
2817 case VMX_VMCS_HOST_IDTR_BASE:
2818 case VMX_VMCS_HOST_SYSENTER_ESP:
2819 case VMX_VMCS_HOST_SYSENTER_EIP:
2820 case VMX_VMCS_HOST_RSP:
2821 case VMX_VMCS_HOST_RIP: return true;
2822 }
2823
2824 return false;
2825}
2826
2827
2828/**
[78454]2829 * Checks whether the given I/O access should cause a nested-guest VM-exit.
2830 *
[79376]2831 * @returns @c true if it causes a VM-exit, @c false otherwise.
[78454]2832 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2833 * @param u16Port The I/O port being accessed.
2834 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
2835 */
[78863]2836VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess)
[78454]2837{
2838 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
[81665]2839 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_UNCOND_IO_EXIT))
[78454]2840 return true;
2841
[81665]2842 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_IO_BITMAPS))
[91305]2843 return cpumGetVmxIoBitmapPermission(pCtx->hwvirt.vmx.abIoBitmap, u16Port, cbAccess);
[78454]2844
2845 return false;
2846}
2847
2848
2849/**
[79376]2850 * Checks whether the Mov-to-CR3 instruction causes a nested-guest VM-exit.
2851 *
2852 * @returns @c true if it causes a VM-exit, @c false otherwise.
2853 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2854 * @param uNewCr3 The CR3 value being written.
2855 */
2856VMM_INT_DECL(bool) CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3)
2857{
2858 /*
2859 * If the CR3-load exiting control is set and the new CR3 value does not
2860 * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
2861 *
2862 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2863 */
[91297]2864 PCCPUMCTX const pCtx = &pVCpu->cpum.s.Guest;
[81665]2865 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_CR3_LOAD_EXIT))
[79376]2866 {
[91297]2867 uint32_t const uCr3TargetCount = pCtx->hwvirt.vmx.Vmcs.u32Cr3TargetCount;
[79376]2868 Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
2869
[79379]2870 /* If the CR3-target count is 0, cause a VM-exit. */
[79376]2871 if (uCr3TargetCount == 0)
2872 return true;
2873
[79379]2874 /* If the CR3 being written doesn't match any of the target values, cause a VM-exit. */
[79376]2875 AssertCompile(VMX_V_CR3_TARGET_COUNT == 4);
[91297]2876 if ( uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target0.u
2877 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target1.u
2878 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target2.u
2879 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target3.u)
[79376]2880 return true;
2881 }
2882 return false;
2883}
2884
2885
2886/**
[78861]2887 * Checks whether a VMREAD or VMWRITE instruction for the given VMCS field causes a
2888 * VM-exit or not.
2889 *
2890 * @returns @c true if the VMREAD/VMWRITE is intercepted, @c false otherwise.
2891 * @param pVCpu The cross context virtual CPU structure.
2892 * @param uExitReason The VM-exit reason (VMX_EXIT_VMREAD or
2893 * VMX_EXIT_VMREAD).
[79316]2894 * @param u64VmcsField The VMCS field.
[78861]2895 */
[79316]2896VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64VmcsField)
[78861]2897{
2898 Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest));
2899 Assert( uExitReason == VMX_EXIT_VMREAD
2900 || uExitReason == VMX_EXIT_VMWRITE);
2901
2902 /*
2903 * Without VMCS shadowing, all VMREAD and VMWRITE instructions are intercepted.
2904 */
[81665]2905 if (!CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.s.Guest, VMX_PROC_CTLS2_VMCS_SHADOWING))
[78861]2906 return true;
2907
2908 /*
2909 * If any reserved bit in the 64-bit VMCS field encoding is set, the VMREAD/VMWRITE
2910 * is intercepted. This excludes any reserved bits in the valid parts of the field
2911 * encoding (i.e. bit 12).
2912 */
[79316]2913 if (u64VmcsField & VMX_VMCSFIELD_RSVD_MASK)
[78861]2914 return true;
2915
2916 /*
2917 * Finally, consult the VMREAD/VMWRITE bitmap whether to intercept the instruction or not.
2918 */
[91301]2919 uint32_t const u32VmcsField = RT_LO_U32(u64VmcsField);
2920 uint8_t const * const pbBitmap = uExitReason == VMX_EXIT_VMREAD
2921 ? &pVCpu->cpum.s.Guest.hwvirt.vmx.abVmreadBitmap[0]
2922 : &pVCpu->cpum.s.Guest.hwvirt.vmx.abVmwriteBitmap[0];
[78861]2923 Assert(pbBitmap);
[79316]2924 Assert(u32VmcsField >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
[91301]2925 return ASMBitTest(&pbBitmap[u32VmcsField >> 3], u32VmcsField & 7);
[78861]2926}
2927
2928
2929
2930/**
[78483]2931 * Determines whether the given I/O access should cause a nested-guest \#VMEXIT.
[78371]2932 *
2933 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
2934 * @param u16Port The IO port being accessed.
2935 * @param enmIoType The type of IO access.
2936 * @param cbReg The IO operand size in bytes.
2937 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
2938 * @param iEffSeg The effective segment number.
2939 * @param fRep Whether this is a repeating IO instruction (REP prefix).
2940 * @param fStrIo Whether this is a string IO instruction.
2941 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO struct to be filled.
2942 * Optional, can be NULL.
2943 */
[78483]2944VMM_INT_DECL(bool) CPUMIsSvmIoInterceptSet(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
2945 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
2946 PSVMIOIOEXITINFO pIoExitInfo)
[78371]2947{
2948 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
2949 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
2950
2951 /*
2952 * The IOPM layout:
2953 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
2954 * two 4K pages.
2955 *
2956 * For IO instructions that access more than a single byte, the permission bits
2957 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
2958 *
2959 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),
2960 * we need 3 extra bits beyond the second 4K page.
2961 */
2962 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 };
2963
2964 uint16_t const offIopm = u16Port >> 3;
2965 uint16_t const fSizeMask = s_auSizeMasks[(cAddrSizeBits >> SVM_IOIO_OP_SIZE_SHIFT) & 7];
2966 uint8_t const cShift = u16Port - (offIopm << 3);
2967 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);
2968
2969 uint8_t const *pbIopm = (uint8_t *)pvIoBitmap;
2970 Assert(pbIopm);
2971 pbIopm += offIopm;
2972 uint16_t const u16Iopm = *(uint16_t *)pbIopm;
2973 if (u16Iopm & fIopmMask)
2974 {
2975 if (pIoExitInfo)
2976 {
2977 static const uint32_t s_auIoOpSize[] =
2978 { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
2979
2980 static const uint32_t s_auIoAddrSize[] =
2981 { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
2982
2983 pIoExitInfo->u = s_auIoOpSize[cbReg & 7];
2984 pIoExitInfo->u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
2985 pIoExitInfo->n.u1Str = fStrIo;
2986 pIoExitInfo->n.u1Rep = fRep;
2987 pIoExitInfo->n.u3Seg = iEffSeg & 7;
2988 pIoExitInfo->n.u1Type = enmIoType;
2989 pIoExitInfo->n.u16Port = u16Port;
2990 }
2991 return true;
2992 }
2993
2994 /** @todo remove later (for debugging as VirtualBox always traps all IO
2995 * intercepts). */
2996 AssertMsgFailed(("CPUMSvmIsIOInterceptActive: We expect an IO intercept here!\n"));
2997 return false;
2998}
2999
3000
3001/**
3002 * Gets the MSR permission bitmap byte and bit offset for the specified MSR.
3003 *
3004 * @returns VBox status code.
3005 * @param idMsr The MSR being requested.
3006 * @param pbOffMsrpm Where to store the byte offset in the MSR permission
3007 * bitmap for @a idMsr.
3008 * @param puMsrpmBit Where to store the bit offset starting at the byte
3009 * returned in @a pbOffMsrpm.
3010 */
3011VMM_INT_DECL(int) CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit)
3012{
3013 Assert(pbOffMsrpm);
3014 Assert(puMsrpmBit);
3015
3016 /*
3017 * MSRPM Layout:
3018 * Byte offset MSR range
3019 * 0x000 - 0x7ff 0x00000000 - 0x00001fff
3020 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
3021 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
3022 * 0x1800 - 0x1fff Reserved
3023 *
3024 * Each MSR is represented by 2 permission bits (read and write).
3025 */
3026 if (idMsr <= 0x00001fff)
3027 {
3028 /* Pentium-compatible MSRs. */
3029 uint32_t const bitoffMsr = idMsr << 1;
3030 *pbOffMsrpm = bitoffMsr >> 3;
3031 *puMsrpmBit = bitoffMsr & 7;
3032 return VINF_SUCCESS;
3033 }
3034
3035 if ( idMsr >= 0xc0000000
3036 && idMsr <= 0xc0001fff)
3037 {
3038 /* AMD Sixth Generation x86 Processor MSRs. */
3039 uint32_t const bitoffMsr = (idMsr - 0xc0000000) << 1;
3040 *pbOffMsrpm = 0x800 + (bitoffMsr >> 3);
3041 *puMsrpmBit = bitoffMsr & 7;
3042 return VINF_SUCCESS;
3043 }
3044
3045 if ( idMsr >= 0xc0010000
3046 && idMsr <= 0xc0011fff)
3047 {
3048 /* AMD Seventh and Eighth Generation Processor MSRs. */
3049 uint32_t const bitoffMsr = (idMsr - 0xc0010000) << 1;
3050 *pbOffMsrpm = 0x1000 + (bitoffMsr >> 3);
3051 *puMsrpmBit = bitoffMsr & 7;
3052 return VINF_SUCCESS;
3053 }
3054
3055 *pbOffMsrpm = 0;
3056 *puMsrpmBit = 0;
3057 return VERR_OUT_OF_RANGE;
3058}
3059
[91710]3060
3061/**
3062 * Checks whether the guest is in VMX non-root mode and using EPT paging.
3063 *
3064 * @returns @c true if in VMX non-root operation with EPT, @c false otherwise.
3065 * @param pVCpu The cross context virtual CPU structure.
3066 */
3067VMM_INT_DECL(bool) CPUMIsGuestVmxEptPagingEnabled(PCVMCPUCC pVCpu)
3068{
[91951]3069 return CPUMIsGuestVmxEptPagingEnabledEx(&pVCpu->cpum.s.Guest);
[91710]3070}
3071
[92541]3072
3073/**
3074 * Checks whether the guest is in VMX non-root mode and using EPT paging and the
3075 * nested-guest is in PAE mode.
3076 *
3077 * @returns @c true if in VMX non-root operation with EPT, @c false otherwise.
3078 * @param pVCpu The cross context virtual CPU structure.
3079 */
3080VMM_INT_DECL(bool) CPUMIsGuestVmxEptPaePagingEnabled(PCVMCPUCC pVCpu)
3081{
3082 return CPUMIsGuestVmxEptPagingEnabledEx(&pVCpu->cpum.s.Guest)
3083 && CPUMIsGuestInPAEModeEx(&pVCpu->cpum.s.Guest);
3084}
3085
[93922]3086
3087/**
3088 * Returns the guest-physical address of the APIC-access page when executing a
3089 * nested-guest.
3090 *
3091 * @returns The APIC-access page guest-physical address.
3092 * @param pVCpu The cross context virtual CPU structure.
3093 */
3094VMM_INT_DECL(uint64_t) CPUMGetGuestVmxApicAccessPageAddr(PCVMCPUCC pVCpu)
3095{
3096 return CPUMGetGuestVmxApicAccessPageAddrEx(&pVCpu->cpum.s.Guest);
3097}
3098
[95315]3099
3100/**
3101 * Returns whether the given page is the active APIC-access page.
3102 *
3103 * @returns @c true if the page is the active APIC-access page, @c false otherwises.
3104 * @param pVCpu The cross context virtual CPU structure.
3105 * @param GCPhysPage The guest-physical address to check.
3106 *
[95316]3107 * @remarks This function does not assume the guest is executing in VMX non-root
3108 * mode or in VMX root-mode. However, it does assert that the VMCS has
3109 * been initialized and the virtual-APIC access VM-execution control is
[95315]3110 * enabled.
3111 * @note This is meant to be used by PGM while syncing the page-table entry for
3112 * the APIC-access page. All other queries for the APIC-access page address
3113 * should almost certainly use CPUMGetGuestVmxApicAccessPageAddr() instead!
3114 */
3115VMM_INT_DECL(bool) CPUMIsGuestVmxApicAccessPageAddr(PCVMCPUCC pVCpu, RTGCPHYS GCPhysPage)
3116{
[95349]3117 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
3118 PCVMXVVMCS pVmcs = &pCtx->hwvirt.vmx.Vmcs;
[95315]3119 if ( pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fVmx /* VMX CPU feature is enabled for the guest. */
[95351]3120 && CPUMIsGuestVmxCurrentVmcsValid(pCtx) /* A VMCS is currently active. */
[95315]3121 && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)) /* Virtual-APIC access VM-execution control is set. */
3122 {
3123 Assert(!(pVmcs->u64AddrApicAccess.u & X86_PAGE_4K_OFFSET_MASK)); /* Intel spec. mandates that this is 4K aligned. */
3124 Assert(!(GCPhysPage & GUEST_PAGE_OFFSET_MASK)); /* Caller must be passing us an aligned page. */
3125 return pVmcs->u64AddrApicAccess.u == GCPhysPage;
3126 }
3127 return false;
3128}
3129
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use