| 1 | /* $Id: CPUMAllRegs.cpp 107650 2025-01-10 13:42:28Z vboxsync $ */
|
|---|
| 2 | /** @file
|
|---|
| 3 | * CPUM - CPU Monitor(/Manager) - Getters and Setters.
|
|---|
| 4 | */
|
|---|
| 5 |
|
|---|
| 6 | /*
|
|---|
| 7 | * Copyright (C) 2006-2024 Oracle and/or its affiliates.
|
|---|
| 8 | *
|
|---|
| 9 | * This file is part of VirtualBox base platform packages, as
|
|---|
| 10 | * available from https://www.virtualbox.org.
|
|---|
| 11 | *
|
|---|
| 12 | * This program is free software; you can redistribute it and/or
|
|---|
| 13 | * modify it under the terms of the GNU General Public License
|
|---|
| 14 | * as published by the Free Software Foundation, in version 3 of the
|
|---|
| 15 | * License.
|
|---|
| 16 | *
|
|---|
| 17 | * This program is distributed in the hope that it will be useful, but
|
|---|
| 18 | * WITHOUT ANY WARRANTY; without even the implied warranty of
|
|---|
| 19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|---|
| 20 | * General Public License for more details.
|
|---|
| 21 | *
|
|---|
| 22 | * You should have received a copy of the GNU General Public License
|
|---|
| 23 | * along with this program; if not, see <https://www.gnu.org/licenses>.
|
|---|
| 24 | *
|
|---|
| 25 | * SPDX-License-Identifier: GPL-3.0-only
|
|---|
| 26 | */
|
|---|
| 27 |
|
|---|
| 28 |
|
|---|
| 29 | /*********************************************************************************************************************************
|
|---|
| 30 | * Header Files *
|
|---|
| 31 | *********************************************************************************************************************************/
|
|---|
| 32 | #define LOG_GROUP LOG_GROUP_CPUM
|
|---|
| 33 | #include <VBox/vmm/cpum.h>
|
|---|
| 34 | #include <VBox/vmm/dbgf.h>
|
|---|
| 35 | #include <VBox/vmm/pdmapic.h>
|
|---|
| 36 | #include <VBox/vmm/pgm.h>
|
|---|
| 37 | #include <VBox/vmm/mm.h>
|
|---|
| 38 | #include <VBox/vmm/em.h>
|
|---|
| 39 | #include <VBox/vmm/nem.h>
|
|---|
| 40 | #include <VBox/vmm/hm.h>
|
|---|
| 41 | #include "CPUMInternal.h"
|
|---|
| 42 | #include <VBox/vmm/vmcc.h>
|
|---|
| 43 | #include <VBox/err.h>
|
|---|
| 44 | #include <VBox/dis.h>
|
|---|
| 45 | #include <VBox/log.h>
|
|---|
| 46 | #include <VBox/vmm/hm.h>
|
|---|
| 47 | #include <VBox/vmm/tm.h>
|
|---|
| 48 | #include <iprt/assert.h>
|
|---|
| 49 | #include <iprt/asm.h>
|
|---|
| 50 | #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
|
|---|
| 51 | # include <iprt/asm-amd64-x86.h>
|
|---|
| 52 | #endif
|
|---|
| 53 | #ifdef IN_RING3
|
|---|
| 54 | # include <iprt/thread.h>
|
|---|
| 55 | #endif
|
|---|
| 56 |
|
|---|
| 57 | /** Disable stack frame pointer generation here. */
|
|---|
| 58 | #if defined(_MSC_VER) && !defined(DEBUG) && defined(RT_ARCH_X86)
|
|---|
| 59 | # pragma optimize("y", off)
|
|---|
| 60 | #endif
|
|---|
| 61 |
|
|---|
| 62 | AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
|
|---|
| 63 |
|
|---|
| 64 |
|
|---|
| 65 | /*********************************************************************************************************************************
|
|---|
| 66 | * Defined Constants And Macros *
|
|---|
| 67 | *********************************************************************************************************************************/
|
|---|
| 68 | /**
|
|---|
| 69 | * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
|
|---|
| 70 | *
|
|---|
| 71 | * @returns Pointer to the Virtual CPU.
|
|---|
| 72 | * @param a_pGuestCtx Pointer to the guest context.
|
|---|
| 73 | */
|
|---|
| 74 | #define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
|
|---|
| 75 |
|
|---|
| 76 | /**
|
|---|
| 77 | * Lazily loads the hidden parts of a selector register when using raw-mode.
|
|---|
| 78 | */
|
|---|
| 79 | #define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
|
|---|
| 80 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg))
|
|---|
| 81 |
|
|---|
| 82 | /** @def CPUM_INT_ASSERT_NOT_EXTRN
|
|---|
| 83 | * Macro for asserting that @a a_fNotExtrn are present.
|
|---|
| 84 | *
|
|---|
| 85 | * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
|
|---|
| 86 | * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
|
|---|
| 87 | */
|
|---|
| 88 | #define CPUM_INT_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
|
|---|
| 89 | AssertMsg(!((a_pVCpu)->cpum.s.Guest.fExtrn & (a_fNotExtrn)), \
|
|---|
| 90 | ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.s.Guest.fExtrn, (a_fNotExtrn)))
|
|---|
| 91 |
|
|---|
| 92 |
|
|---|
| 93 | VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
|
|---|
| 94 | {
|
|---|
| 95 | pVCpu->cpum.s.Hyper.cr3 = cr3;
|
|---|
| 96 | }
|
|---|
| 97 |
|
|---|
| 98 | VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
|
|---|
| 99 | {
|
|---|
| 100 | return pVCpu->cpum.s.Hyper.cr3;
|
|---|
| 101 | }
|
|---|
| 102 |
|
|---|
| 103 |
|
|---|
| 104 | /** @def MAYBE_LOAD_DRx
|
|---|
| 105 | * Macro for updating DRx values in raw-mode and ring-0 contexts.
|
|---|
| 106 | */
|
|---|
| 107 | #ifdef IN_RING0
|
|---|
| 108 | # define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { \
|
|---|
| 109 | if ((a_pVCpu)->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER)) \
|
|---|
| 110 | a_fnLoad(a_uValue); \
|
|---|
| 111 | } while (0)
|
|---|
| 112 | #else
|
|---|
| 113 | # define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
|
|---|
| 114 | #endif
|
|---|
| 115 |
|
|---|
| 116 | static void cpumSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
|
|---|
| 117 | {
|
|---|
| 118 | pVCpu->cpum.s.Hyper.dr[0] = uDr0;
|
|---|
| 119 | MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
|
|---|
| 120 | }
|
|---|
| 121 |
|
|---|
| 122 |
|
|---|
| 123 | static void cpumSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
|
|---|
| 124 | {
|
|---|
| 125 | pVCpu->cpum.s.Hyper.dr[1] = uDr1;
|
|---|
| 126 | MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
|
|---|
| 127 | }
|
|---|
| 128 |
|
|---|
| 129 |
|
|---|
| 130 | static void cpumSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
|
|---|
| 131 | {
|
|---|
| 132 | pVCpu->cpum.s.Hyper.dr[2] = uDr2;
|
|---|
| 133 | MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
|
|---|
| 134 | }
|
|---|
| 135 |
|
|---|
| 136 |
|
|---|
| 137 | static void cpumSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
|
|---|
| 138 | {
|
|---|
| 139 | pVCpu->cpum.s.Hyper.dr[3] = uDr3;
|
|---|
| 140 | MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
|
|---|
| 141 | }
|
|---|
| 142 |
|
|---|
| 143 |
|
|---|
| 144 | VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
|
|---|
| 145 | {
|
|---|
| 146 | pVCpu->cpum.s.Hyper.dr[6] = uDr6;
|
|---|
| 147 | }
|
|---|
| 148 |
|
|---|
| 149 |
|
|---|
| 150 | VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
|
|---|
| 151 | {
|
|---|
| 152 | pVCpu->cpum.s.Hyper.dr[7] = uDr7;
|
|---|
| 153 | }
|
|---|
| 154 |
|
|---|
| 155 |
|
|---|
| 156 | VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
|
|---|
| 157 | {
|
|---|
| 158 | return pVCpu->cpum.s.Hyper.dr[0];
|
|---|
| 159 | }
|
|---|
| 160 |
|
|---|
| 161 |
|
|---|
| 162 | VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
|
|---|
| 163 | {
|
|---|
| 164 | return pVCpu->cpum.s.Hyper.dr[1];
|
|---|
| 165 | }
|
|---|
| 166 |
|
|---|
| 167 |
|
|---|
| 168 | VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
|
|---|
| 169 | {
|
|---|
| 170 | return pVCpu->cpum.s.Hyper.dr[2];
|
|---|
| 171 | }
|
|---|
| 172 |
|
|---|
| 173 |
|
|---|
| 174 | VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
|
|---|
| 175 | {
|
|---|
| 176 | return pVCpu->cpum.s.Hyper.dr[3];
|
|---|
| 177 | }
|
|---|
| 178 |
|
|---|
| 179 |
|
|---|
| 180 | VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
|
|---|
| 181 | {
|
|---|
| 182 | return pVCpu->cpum.s.Hyper.dr[6];
|
|---|
| 183 | }
|
|---|
| 184 |
|
|---|
| 185 |
|
|---|
| 186 | VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
|
|---|
| 187 | {
|
|---|
| 188 | return pVCpu->cpum.s.Hyper.dr[7];
|
|---|
| 189 | }
|
|---|
| 190 |
|
|---|
| 191 |
|
|---|
| 192 | /**
|
|---|
| 193 | * Checks that the special cookie stored in unused reserved RFLAGS bits
|
|---|
| 194 | *
|
|---|
| 195 | * @retval true if cookie is ok.
|
|---|
| 196 | * @retval false if cookie is not ok.
|
|---|
| 197 | * @param pVM The cross context VM structure.
|
|---|
| 198 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 199 | */
|
|---|
| 200 | VMM_INT_DECL(bool) CPUMAssertGuestRFlagsCookie(PVM pVM, PVMCPU pVCpu)
|
|---|
| 201 | {
|
|---|
| 202 | AssertLogRelMsgReturn( ( pVCpu->cpum.s.Guest.rflags.uBoth
|
|---|
| 203 | & ~(uint64_t)(CPUMX86EFLAGS_HW_MASK_64 | CPUMX86EFLAGS_INT_MASK_64))
|
|---|
| 204 | == pVM->cpum.s.fReservedRFlagsCookie
|
|---|
| 205 | && (pVCpu->cpum.s.Guest.rflags.uBoth & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK
|
|---|
| 206 | && (pVCpu->cpum.s.Guest.rflags.uBoth & X86_EFL_RAZ_MASK & CPUMX86EFLAGS_HW_MASK_64) == 0,
|
|---|
| 207 | ("rflags=%#RX64 vs fReservedRFlagsCookie=%#RX64\n",
|
|---|
| 208 | pVCpu->cpum.s.Guest.rflags.uBoth, pVM->cpum.s.fReservedRFlagsCookie),
|
|---|
| 209 | false);
|
|---|
| 210 | return true;
|
|---|
| 211 | }
|
|---|
| 212 |
|
|---|
| 213 |
|
|---|
| 214 | /**
|
|---|
| 215 | * Queries the pointer to the internal CPUMCTX structure.
|
|---|
| 216 | *
|
|---|
| 217 | * @returns The CPUMCTX pointer.
|
|---|
| 218 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 219 | */
|
|---|
| 220 | VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
|
|---|
| 221 | {
|
|---|
| 222 | return &pVCpu->cpum.s.Guest;
|
|---|
| 223 | }
|
|---|
| 224 |
|
|---|
| 225 |
|
|---|
| 226 | /**
|
|---|
| 227 | * Queries the pointer to the internal CPUMCTXMSRS structure.
|
|---|
| 228 | *
|
|---|
| 229 | * This is for NEM only.
|
|---|
| 230 | *
|
|---|
| 231 | * @returns The CPUMCTX pointer.
|
|---|
| 232 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 233 | */
|
|---|
| 234 | VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu)
|
|---|
| 235 | {
|
|---|
| 236 | return &pVCpu->cpum.s.GuestMsrs;
|
|---|
| 237 | }
|
|---|
| 238 |
|
|---|
| 239 |
|
|---|
| 240 | VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
|
|---|
| 241 | {
|
|---|
| 242 | pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
|
|---|
| 243 | pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
|
|---|
| 244 | pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_GDTR;
|
|---|
| 245 | pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
|
|---|
| 246 | return VINF_SUCCESS; /* formality, consider it void. */
|
|---|
| 247 | }
|
|---|
| 248 |
|
|---|
| 249 |
|
|---|
| 250 | VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
|
|---|
| 251 | {
|
|---|
| 252 | pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
|
|---|
| 253 | pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
|
|---|
| 254 | pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_IDTR;
|
|---|
| 255 | pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
|
|---|
| 256 | return VINF_SUCCESS; /* formality, consider it void. */
|
|---|
| 257 | }
|
|---|
| 258 |
|
|---|
| 259 |
|
|---|
| 260 | VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
|
|---|
| 261 | {
|
|---|
| 262 | pVCpu->cpum.s.Guest.tr.Sel = tr;
|
|---|
| 263 | pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
|
|---|
| 264 | return VINF_SUCCESS; /* formality, consider it void. */
|
|---|
| 265 | }
|
|---|
| 266 |
|
|---|
| 267 |
|
|---|
| 268 | VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
|
|---|
| 269 | {
|
|---|
| 270 | pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
|
|---|
| 271 | /* The caller will set more hidden bits if it has them. */
|
|---|
| 272 | pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
|
|---|
| 273 | pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
|
|---|
| 274 | pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
|
|---|
| 275 | return VINF_SUCCESS; /* formality, consider it void. */
|
|---|
| 276 | }
|
|---|
| 277 |
|
|---|
| 278 |
|
|---|
| 279 | /**
|
|---|
| 280 | * Set the guest CR0.
|
|---|
| 281 | *
|
|---|
| 282 | * When called in GC, the hyper CR0 may be updated if that is
|
|---|
| 283 | * required. The caller only has to take special action if AM,
|
|---|
| 284 | * WP, PG or PE changes.
|
|---|
| 285 | *
|
|---|
| 286 | * @returns VINF_SUCCESS (consider it void).
|
|---|
| 287 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 288 | * @param cr0 The new CR0 value.
|
|---|
| 289 | */
|
|---|
| 290 | VMMDECL(int) CPUMSetGuestCR0(PVMCPUCC pVCpu, uint64_t cr0)
|
|---|
| 291 | {
|
|---|
| 292 | /*
|
|---|
| 293 | * Check for changes causing TLB flushes (for REM).
|
|---|
| 294 | * The caller is responsible for calling PGM when appropriate.
|
|---|
| 295 | */
|
|---|
| 296 | if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
|
|---|
| 297 | != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
|
|---|
| 298 | pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
|
|---|
| 299 | pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
|
|---|
| 300 |
|
|---|
| 301 | /*
|
|---|
| 302 | * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
|
|---|
| 303 | */
|
|---|
| 304 | if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
|
|---|
| 305 | PGMCr0WpEnabled(pVCpu);
|
|---|
| 306 |
|
|---|
| 307 | /* The ET flag is settable on a 386 and hardwired on 486+. */
|
|---|
| 308 | if ( !(cr0 & X86_CR0_ET)
|
|---|
| 309 | && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
|
|---|
| 310 | cr0 |= X86_CR0_ET;
|
|---|
| 311 |
|
|---|
| 312 | pVCpu->cpum.s.Guest.cr0 = cr0;
|
|---|
| 313 | pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR0;
|
|---|
| 314 | return VINF_SUCCESS;
|
|---|
| 315 | }
|
|---|
| 316 |
|
|---|
| 317 |
|
|---|
| 318 | VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
|
|---|
| 319 | {
|
|---|
| 320 | pVCpu->cpum.s.Guest.cr2 = cr2;
|
|---|
| 321 | pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR2;
|
|---|
| 322 | return VINF_SUCCESS;
|
|---|
| 323 | }
|
|---|
| 324 |
|
|---|
| 325 |
|
|---|
| 326 | VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
|
|---|
| 327 | {
|
|---|
| 328 | pVCpu->cpum.s.Guest.cr3 = cr3;
|
|---|
| 329 | pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
|
|---|
| 330 | pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
|
|---|
| 331 | return VINF_SUCCESS;
|
|---|
| 332 | }
|
|---|
| 333 |
|
|---|
| 334 |
|
|---|
| 335 | VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
|
|---|
| 336 | {
|
|---|
| 337 | /* Note! We don't bother with OSXSAVE and legacy CPUID patches. */
|
|---|
| 338 |
|
|---|
| 339 | if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
|
|---|
| 340 | != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
|
|---|
| 341 | pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
|
|---|
| 342 |
|
|---|
| 343 | pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
|
|---|
| 344 | pVCpu->cpum.s.Guest.cr4 = cr4;
|
|---|
| 345 | pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR4;
|
|---|
| 346 | return VINF_SUCCESS;
|
|---|
| 347 | }
|
|---|
| 348 |
|
|---|
| 349 |
|
|---|
| 350 | VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
|
|---|
| 351 | {
|
|---|
| 352 | pVCpu->cpum.s.Guest.eflags.u = eflags;
|
|---|
| 353 | pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
|
|---|
| 354 | return VINF_SUCCESS;
|
|---|
| 355 | }
|
|---|
| 356 |
|
|---|
| 357 |
|
|---|
| 358 | VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
|
|---|
| 359 | {
|
|---|
| 360 | pVCpu->cpum.s.Guest.eip = eip;
|
|---|
| 361 | return VINF_SUCCESS;
|
|---|
| 362 | }
|
|---|
| 363 |
|
|---|
| 364 |
|
|---|
| 365 | VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
|
|---|
| 366 | {
|
|---|
| 367 | pVCpu->cpum.s.Guest.eax = eax;
|
|---|
| 368 | return VINF_SUCCESS;
|
|---|
| 369 | }
|
|---|
| 370 |
|
|---|
| 371 |
|
|---|
| 372 | VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
|
|---|
| 373 | {
|
|---|
| 374 | pVCpu->cpum.s.Guest.ebx = ebx;
|
|---|
| 375 | return VINF_SUCCESS;
|
|---|
| 376 | }
|
|---|
| 377 |
|
|---|
| 378 |
|
|---|
| 379 | VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
|
|---|
| 380 | {
|
|---|
| 381 | pVCpu->cpum.s.Guest.ecx = ecx;
|
|---|
| 382 | return VINF_SUCCESS;
|
|---|
| 383 | }
|
|---|
| 384 |
|
|---|
| 385 |
|
|---|
| 386 | VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
|
|---|
| 387 | {
|
|---|
| 388 | pVCpu->cpum.s.Guest.edx = edx;
|
|---|
| 389 | return VINF_SUCCESS;
|
|---|
| 390 | }
|
|---|
| 391 |
|
|---|
| 392 |
|
|---|
| 393 | VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
|
|---|
| 394 | {
|
|---|
| 395 | pVCpu->cpum.s.Guest.esp = esp;
|
|---|
| 396 | return VINF_SUCCESS;
|
|---|
| 397 | }
|
|---|
| 398 |
|
|---|
| 399 |
|
|---|
| 400 | VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
|
|---|
| 401 | {
|
|---|
| 402 | pVCpu->cpum.s.Guest.ebp = ebp;
|
|---|
| 403 | return VINF_SUCCESS;
|
|---|
| 404 | }
|
|---|
| 405 |
|
|---|
| 406 |
|
|---|
| 407 | VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
|
|---|
| 408 | {
|
|---|
| 409 | pVCpu->cpum.s.Guest.esi = esi;
|
|---|
| 410 | return VINF_SUCCESS;
|
|---|
| 411 | }
|
|---|
| 412 |
|
|---|
| 413 |
|
|---|
| 414 | VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
|
|---|
| 415 | {
|
|---|
| 416 | pVCpu->cpum.s.Guest.edi = edi;
|
|---|
| 417 | return VINF_SUCCESS;
|
|---|
| 418 | }
|
|---|
| 419 |
|
|---|
| 420 |
|
|---|
| 421 | VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
|
|---|
| 422 | {
|
|---|
| 423 | pVCpu->cpum.s.Guest.ss.Sel = ss;
|
|---|
| 424 | return VINF_SUCCESS;
|
|---|
| 425 | }
|
|---|
| 426 |
|
|---|
| 427 |
|
|---|
| 428 | VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
|
|---|
| 429 | {
|
|---|
| 430 | pVCpu->cpum.s.Guest.cs.Sel = cs;
|
|---|
| 431 | return VINF_SUCCESS;
|
|---|
| 432 | }
|
|---|
| 433 |
|
|---|
| 434 |
|
|---|
| 435 | VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
|
|---|
| 436 | {
|
|---|
| 437 | pVCpu->cpum.s.Guest.ds.Sel = ds;
|
|---|
| 438 | return VINF_SUCCESS;
|
|---|
| 439 | }
|
|---|
| 440 |
|
|---|
| 441 |
|
|---|
| 442 | VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
|
|---|
| 443 | {
|
|---|
| 444 | pVCpu->cpum.s.Guest.es.Sel = es;
|
|---|
| 445 | return VINF_SUCCESS;
|
|---|
| 446 | }
|
|---|
| 447 |
|
|---|
| 448 |
|
|---|
| 449 | VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
|
|---|
| 450 | {
|
|---|
| 451 | pVCpu->cpum.s.Guest.fs.Sel = fs;
|
|---|
| 452 | return VINF_SUCCESS;
|
|---|
| 453 | }
|
|---|
| 454 |
|
|---|
| 455 |
|
|---|
| 456 | VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
|
|---|
| 457 | {
|
|---|
| 458 | pVCpu->cpum.s.Guest.gs.Sel = gs;
|
|---|
| 459 | return VINF_SUCCESS;
|
|---|
| 460 | }
|
|---|
| 461 |
|
|---|
| 462 |
|
|---|
| 463 | VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
|
|---|
| 464 | {
|
|---|
| 465 | pVCpu->cpum.s.Guest.msrEFER = val;
|
|---|
| 466 | pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_EFER;
|
|---|
| 467 | }
|
|---|
| 468 |
|
|---|
| 469 |
|
|---|
| 470 | VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PCVMCPU pVCpu, uint16_t *pcbLimit)
|
|---|
| 471 | {
|
|---|
| 472 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_IDTR);
|
|---|
| 473 | if (pcbLimit)
|
|---|
| 474 | *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
|
|---|
| 475 | return pVCpu->cpum.s.Guest.idtr.pIdt;
|
|---|
| 476 | }
|
|---|
| 477 |
|
|---|
| 478 |
|
|---|
| 479 | VMMDECL(RTSEL) CPUMGetGuestTR(PCVMCPU pVCpu, PCPUMSELREGHID pHidden)
|
|---|
| 480 | {
|
|---|
| 481 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_TR);
|
|---|
| 482 | if (pHidden)
|
|---|
| 483 | *pHidden = pVCpu->cpum.s.Guest.tr;
|
|---|
| 484 | return pVCpu->cpum.s.Guest.tr.Sel;
|
|---|
| 485 | }
|
|---|
| 486 |
|
|---|
| 487 |
|
|---|
| 488 | VMMDECL(RTSEL) CPUMGetGuestCS(PCVMCPU pVCpu)
|
|---|
| 489 | {
|
|---|
| 490 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS);
|
|---|
| 491 | return pVCpu->cpum.s.Guest.cs.Sel;
|
|---|
| 492 | }
|
|---|
| 493 |
|
|---|
| 494 |
|
|---|
| 495 | VMMDECL(RTSEL) CPUMGetGuestDS(PCVMCPU pVCpu)
|
|---|
| 496 | {
|
|---|
| 497 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DS);
|
|---|
| 498 | return pVCpu->cpum.s.Guest.ds.Sel;
|
|---|
| 499 | }
|
|---|
| 500 |
|
|---|
| 501 |
|
|---|
| 502 | VMMDECL(RTSEL) CPUMGetGuestES(PCVMCPU pVCpu)
|
|---|
| 503 | {
|
|---|
| 504 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_ES);
|
|---|
| 505 | return pVCpu->cpum.s.Guest.es.Sel;
|
|---|
| 506 | }
|
|---|
| 507 |
|
|---|
| 508 |
|
|---|
| 509 | VMMDECL(RTSEL) CPUMGetGuestFS(PCVMCPU pVCpu)
|
|---|
| 510 | {
|
|---|
| 511 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_FS);
|
|---|
| 512 | return pVCpu->cpum.s.Guest.fs.Sel;
|
|---|
| 513 | }
|
|---|
| 514 |
|
|---|
| 515 |
|
|---|
| 516 | VMMDECL(RTSEL) CPUMGetGuestGS(PCVMCPU pVCpu)
|
|---|
| 517 | {
|
|---|
| 518 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GS);
|
|---|
| 519 | return pVCpu->cpum.s.Guest.gs.Sel;
|
|---|
| 520 | }
|
|---|
| 521 |
|
|---|
| 522 |
|
|---|
| 523 | VMMDECL(RTSEL) CPUMGetGuestSS(PCVMCPU pVCpu)
|
|---|
| 524 | {
|
|---|
| 525 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SS);
|
|---|
| 526 | return pVCpu->cpum.s.Guest.ss.Sel;
|
|---|
| 527 | }
|
|---|
| 528 |
|
|---|
| 529 |
|
|---|
| 530 | VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu)
|
|---|
| 531 | {
|
|---|
| 532 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
|
|---|
| 533 | CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
|
|---|
| 534 | if ( !CPUMIsGuestInLongMode(pVCpu)
|
|---|
| 535 | || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
|
|---|
| 536 | return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.cs.u64Base;
|
|---|
| 537 | return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.cs.u64Base;
|
|---|
| 538 | }
|
|---|
| 539 |
|
|---|
| 540 |
|
|---|
| 541 | VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu)
|
|---|
| 542 | {
|
|---|
| 543 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
|
|---|
| 544 | CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
|
|---|
| 545 | if ( !CPUMIsGuestInLongMode(pVCpu)
|
|---|
| 546 | || !pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
|
|---|
| 547 | return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.ss.u64Base;
|
|---|
| 548 | return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.ss.u64Base;
|
|---|
| 549 | }
|
|---|
| 550 |
|
|---|
| 551 |
|
|---|
| 552 | VMMDECL(RTSEL) CPUMGetGuestLDTR(PCVMCPU pVCpu)
|
|---|
| 553 | {
|
|---|
| 554 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
|
|---|
| 555 | return pVCpu->cpum.s.Guest.ldtr.Sel;
|
|---|
| 556 | }
|
|---|
| 557 |
|
|---|
| 558 |
|
|---|
| 559 | VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PCVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
|
|---|
| 560 | {
|
|---|
| 561 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_LDTR);
|
|---|
| 562 | *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
|
|---|
| 563 | *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
|
|---|
| 564 | return pVCpu->cpum.s.Guest.ldtr.Sel;
|
|---|
| 565 | }
|
|---|
| 566 |
|
|---|
| 567 |
|
|---|
| 568 | VMMDECL(uint64_t) CPUMGetGuestCR0(PCVMCPU pVCpu)
|
|---|
| 569 | {
|
|---|
| 570 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
|
|---|
| 571 | return pVCpu->cpum.s.Guest.cr0;
|
|---|
| 572 | }
|
|---|
| 573 |
|
|---|
| 574 |
|
|---|
| 575 | VMMDECL(uint64_t) CPUMGetGuestCR2(PCVMCPU pVCpu)
|
|---|
| 576 | {
|
|---|
| 577 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
|
|---|
| 578 | return pVCpu->cpum.s.Guest.cr2;
|
|---|
| 579 | }
|
|---|
| 580 |
|
|---|
| 581 |
|
|---|
| 582 | VMMDECL(uint64_t) CPUMGetGuestCR3(PCVMCPU pVCpu)
|
|---|
| 583 | {
|
|---|
| 584 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
|
|---|
| 585 | return pVCpu->cpum.s.Guest.cr3;
|
|---|
| 586 | }
|
|---|
| 587 |
|
|---|
| 588 |
|
|---|
| 589 | VMMDECL(uint64_t) CPUMGetGuestCR4(PCVMCPU pVCpu)
|
|---|
| 590 | {
|
|---|
| 591 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
|
|---|
| 592 | return pVCpu->cpum.s.Guest.cr4;
|
|---|
| 593 | }
|
|---|
| 594 |
|
|---|
| 595 |
|
|---|
| 596 | VMMDECL(uint64_t) CPUMGetGuestCR8(PCVMCPUCC pVCpu)
|
|---|
| 597 | {
|
|---|
| 598 | uint64_t u64;
|
|---|
| 599 | int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
|
|---|
| 600 | if (RT_FAILURE(rc))
|
|---|
| 601 | u64 = 0;
|
|---|
| 602 | return u64;
|
|---|
| 603 | }
|
|---|
| 604 |
|
|---|
| 605 |
|
|---|
| 606 | VMMDECL(void) CPUMGetGuestGDTR(PCVMCPU pVCpu, PVBOXGDTR pGDTR)
|
|---|
| 607 | {
|
|---|
| 608 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_GDTR);
|
|---|
| 609 | *pGDTR = pVCpu->cpum.s.Guest.gdtr;
|
|---|
| 610 | }
|
|---|
| 611 |
|
|---|
| 612 |
|
|---|
| 613 | VMMDECL(uint32_t) CPUMGetGuestEIP(PCVMCPU pVCpu)
|
|---|
| 614 | {
|
|---|
| 615 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
|
|---|
| 616 | return pVCpu->cpum.s.Guest.eip;
|
|---|
| 617 | }
|
|---|
| 618 |
|
|---|
| 619 |
|
|---|
| 620 | VMMDECL(uint64_t) CPUMGetGuestRIP(PCVMCPU pVCpu)
|
|---|
| 621 | {
|
|---|
| 622 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
|
|---|
| 623 | return pVCpu->cpum.s.Guest.rip;
|
|---|
| 624 | }
|
|---|
| 625 |
|
|---|
| 626 |
|
|---|
| 627 | VMMDECL(uint32_t) CPUMGetGuestEAX(PCVMCPU pVCpu)
|
|---|
| 628 | {
|
|---|
| 629 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RAX);
|
|---|
| 630 | return pVCpu->cpum.s.Guest.eax;
|
|---|
| 631 | }
|
|---|
| 632 |
|
|---|
| 633 |
|
|---|
| 634 | VMMDECL(uint32_t) CPUMGetGuestEBX(PCVMCPU pVCpu)
|
|---|
| 635 | {
|
|---|
| 636 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBX);
|
|---|
| 637 | return pVCpu->cpum.s.Guest.ebx;
|
|---|
| 638 | }
|
|---|
| 639 |
|
|---|
| 640 |
|
|---|
| 641 | VMMDECL(uint32_t) CPUMGetGuestECX(PCVMCPU pVCpu)
|
|---|
| 642 | {
|
|---|
| 643 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RCX);
|
|---|
| 644 | return pVCpu->cpum.s.Guest.ecx;
|
|---|
| 645 | }
|
|---|
| 646 |
|
|---|
| 647 |
|
|---|
| 648 | VMMDECL(uint32_t) CPUMGetGuestEDX(PCVMCPU pVCpu)
|
|---|
| 649 | {
|
|---|
| 650 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDX);
|
|---|
| 651 | return pVCpu->cpum.s.Guest.edx;
|
|---|
| 652 | }
|
|---|
| 653 |
|
|---|
| 654 |
|
|---|
| 655 | VMMDECL(uint32_t) CPUMGetGuestESI(PCVMCPU pVCpu)
|
|---|
| 656 | {
|
|---|
| 657 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSI);
|
|---|
| 658 | return pVCpu->cpum.s.Guest.esi;
|
|---|
| 659 | }
|
|---|
| 660 |
|
|---|
| 661 |
|
|---|
| 662 | VMMDECL(uint32_t) CPUMGetGuestEDI(PCVMCPU pVCpu)
|
|---|
| 663 | {
|
|---|
| 664 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RDI);
|
|---|
| 665 | return pVCpu->cpum.s.Guest.edi;
|
|---|
| 666 | }
|
|---|
| 667 |
|
|---|
| 668 |
|
|---|
| 669 | VMMDECL(uint32_t) CPUMGetGuestESP(PCVMCPU pVCpu)
|
|---|
| 670 | {
|
|---|
| 671 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RSP);
|
|---|
| 672 | return pVCpu->cpum.s.Guest.esp;
|
|---|
| 673 | }
|
|---|
| 674 |
|
|---|
| 675 |
|
|---|
| 676 | VMMDECL(uint32_t) CPUMGetGuestEBP(PCVMCPU pVCpu)
|
|---|
| 677 | {
|
|---|
| 678 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RBP);
|
|---|
| 679 | return pVCpu->cpum.s.Guest.ebp;
|
|---|
| 680 | }
|
|---|
| 681 |
|
|---|
| 682 |
|
|---|
| 683 | VMMDECL(uint32_t) CPUMGetGuestEFlags(PCVMCPU pVCpu)
|
|---|
| 684 | {
|
|---|
| 685 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
|
|---|
| 686 | return pVCpu->cpum.s.Guest.eflags.u;
|
|---|
| 687 | }
|
|---|
| 688 |
|
|---|
| 689 |
|
|---|
| 690 | VMMDECL(int) CPUMGetGuestCRx(PCVMCPUCC pVCpu, unsigned iReg, uint64_t *pValue)
|
|---|
| 691 | {
|
|---|
| 692 | switch (iReg)
|
|---|
| 693 | {
|
|---|
| 694 | case DISCREG_CR0:
|
|---|
| 695 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
|
|---|
| 696 | *pValue = pVCpu->cpum.s.Guest.cr0;
|
|---|
| 697 | break;
|
|---|
| 698 |
|
|---|
| 699 | case DISCREG_CR2:
|
|---|
| 700 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR2);
|
|---|
| 701 | *pValue = pVCpu->cpum.s.Guest.cr2;
|
|---|
| 702 | break;
|
|---|
| 703 |
|
|---|
| 704 | case DISCREG_CR3:
|
|---|
| 705 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
|
|---|
| 706 | *pValue = pVCpu->cpum.s.Guest.cr3;
|
|---|
| 707 | break;
|
|---|
| 708 |
|
|---|
| 709 | case DISCREG_CR4:
|
|---|
| 710 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
|
|---|
| 711 | *pValue = pVCpu->cpum.s.Guest.cr4;
|
|---|
| 712 | break;
|
|---|
| 713 |
|
|---|
| 714 | case DISCREG_CR8:
|
|---|
| 715 | {
|
|---|
| 716 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
|
|---|
| 717 | uint8_t u8Tpr;
|
|---|
| 718 | int rc = PDMApicGetTpr(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
|
|---|
| 719 | if (RT_FAILURE(rc))
|
|---|
| 720 | {
|
|---|
| 721 | AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
|
|---|
| 722 | *pValue = 0;
|
|---|
| 723 | return rc;
|
|---|
| 724 | }
|
|---|
| 725 | *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0 */
|
|---|
| 726 | break;
|
|---|
| 727 | }
|
|---|
| 728 |
|
|---|
| 729 | default:
|
|---|
| 730 | return VERR_INVALID_PARAMETER;
|
|---|
| 731 | }
|
|---|
| 732 | return VINF_SUCCESS;
|
|---|
| 733 | }
|
|---|
| 734 |
|
|---|
| 735 |
|
|---|
| 736 | VMMDECL(uint64_t) CPUMGetGuestDR0(PCVMCPU pVCpu)
|
|---|
| 737 | {
|
|---|
| 738 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
|
|---|
| 739 | return pVCpu->cpum.s.Guest.dr[0];
|
|---|
| 740 | }
|
|---|
| 741 |
|
|---|
| 742 |
|
|---|
| 743 | VMMDECL(uint64_t) CPUMGetGuestDR1(PCVMCPU pVCpu)
|
|---|
| 744 | {
|
|---|
| 745 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
|
|---|
| 746 | return pVCpu->cpum.s.Guest.dr[1];
|
|---|
| 747 | }
|
|---|
| 748 |
|
|---|
| 749 |
|
|---|
| 750 | VMMDECL(uint64_t) CPUMGetGuestDR2(PCVMCPU pVCpu)
|
|---|
| 751 | {
|
|---|
| 752 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
|
|---|
| 753 | return pVCpu->cpum.s.Guest.dr[2];
|
|---|
| 754 | }
|
|---|
| 755 |
|
|---|
| 756 |
|
|---|
| 757 | VMMDECL(uint64_t) CPUMGetGuestDR3(PCVMCPU pVCpu)
|
|---|
| 758 | {
|
|---|
| 759 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
|
|---|
| 760 | return pVCpu->cpum.s.Guest.dr[3];
|
|---|
| 761 | }
|
|---|
| 762 |
|
|---|
| 763 |
|
|---|
| 764 | VMMDECL(uint64_t) CPUMGetGuestDR6(PCVMCPU pVCpu)
|
|---|
| 765 | {
|
|---|
| 766 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR6);
|
|---|
| 767 | return pVCpu->cpum.s.Guest.dr[6];
|
|---|
| 768 | }
|
|---|
| 769 |
|
|---|
| 770 |
|
|---|
| 771 | VMMDECL(uint64_t) CPUMGetGuestDR7(PCVMCPU pVCpu)
|
|---|
| 772 | {
|
|---|
| 773 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR7);
|
|---|
| 774 | return pVCpu->cpum.s.Guest.dr[7];
|
|---|
| 775 | }
|
|---|
| 776 |
|
|---|
| 777 |
|
|---|
| 778 | VMMDECL(int) CPUMGetGuestDRx(PCVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
|
|---|
| 779 | {
|
|---|
| 780 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR_MASK);
|
|---|
| 781 | AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
|
|---|
| 782 | /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
|
|---|
| 783 | if (iReg == 4 || iReg == 5)
|
|---|
| 784 | iReg += 2;
|
|---|
| 785 | *pValue = pVCpu->cpum.s.Guest.dr[iReg];
|
|---|
| 786 | return VINF_SUCCESS;
|
|---|
| 787 | }
|
|---|
| 788 |
|
|---|
| 789 |
|
|---|
| 790 | VMMDECL(uint64_t) CPUMGetGuestEFER(PCVMCPU pVCpu)
|
|---|
| 791 | {
|
|---|
| 792 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
|
|---|
| 793 | return pVCpu->cpum.s.Guest.msrEFER;
|
|---|
| 794 | }
|
|---|
| 795 |
|
|---|
| 796 |
|
|---|
| 797 | /**
|
|---|
| 798 | * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
|
|---|
| 799 | *
|
|---|
| 800 | * @returns Pointer to the leaf if found, NULL if not.
|
|---|
| 801 | *
|
|---|
| 802 | * @param pVM The cross context VM structure.
|
|---|
| 803 | * @param uLeaf The leaf to get.
|
|---|
| 804 | */
|
|---|
| 805 | PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
|
|---|
| 806 | {
|
|---|
| 807 | unsigned iEnd = RT_MIN(pVM->cpum.s.GuestInfo.cCpuIdLeaves, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves));
|
|---|
| 808 | if (iEnd)
|
|---|
| 809 | {
|
|---|
| 810 | unsigned iStart = 0;
|
|---|
| 811 | PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.aCpuIdLeaves;
|
|---|
| 812 | for (;;)
|
|---|
| 813 | {
|
|---|
| 814 | unsigned i = iStart + (iEnd - iStart) / 2U;
|
|---|
| 815 | if (uLeaf < paLeaves[i].uLeaf)
|
|---|
| 816 | {
|
|---|
| 817 | if (i <= iStart)
|
|---|
| 818 | return NULL;
|
|---|
| 819 | iEnd = i;
|
|---|
| 820 | }
|
|---|
| 821 | else if (uLeaf > paLeaves[i].uLeaf)
|
|---|
| 822 | {
|
|---|
| 823 | i += 1;
|
|---|
| 824 | if (i >= iEnd)
|
|---|
| 825 | return NULL;
|
|---|
| 826 | iStart = i;
|
|---|
| 827 | }
|
|---|
| 828 | else
|
|---|
| 829 | {
|
|---|
| 830 | if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
|
|---|
| 831 | return &paLeaves[i];
|
|---|
| 832 |
|
|---|
| 833 | /* This shouldn't normally happen. But in case the it does due
|
|---|
| 834 | to user configuration overrids or something, just return the
|
|---|
| 835 | first sub-leaf. */
|
|---|
| 836 | AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
|
|---|
| 837 | uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
|
|---|
| 838 | while ( paLeaves[i].uSubLeaf != 0
|
|---|
| 839 | && i > 0
|
|---|
| 840 | && uLeaf == paLeaves[i - 1].uLeaf)
|
|---|
| 841 | i--;
|
|---|
| 842 | return &paLeaves[i];
|
|---|
| 843 | }
|
|---|
| 844 | }
|
|---|
| 845 | }
|
|---|
| 846 |
|
|---|
| 847 | return NULL;
|
|---|
| 848 | }
|
|---|
| 849 |
|
|---|
| 850 |
|
|---|
| 851 | /**
|
|---|
| 852 | * Looks up a CPUID leaf in the CPUID leaf array.
|
|---|
| 853 | *
|
|---|
| 854 | * @returns Pointer to the leaf if found, NULL if not.
|
|---|
| 855 | *
|
|---|
| 856 | * @param pVM The cross context VM structure.
|
|---|
| 857 | * @param uLeaf The leaf to get.
|
|---|
| 858 | * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
|
|---|
| 859 | * isn't.
|
|---|
| 860 | * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
|
|---|
| 861 | */
|
|---|
| 862 | PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
|
|---|
| 863 | {
|
|---|
| 864 | unsigned iEnd = RT_MIN(pVM->cpum.s.GuestInfo.cCpuIdLeaves, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves));
|
|---|
| 865 | if (iEnd)
|
|---|
| 866 | {
|
|---|
| 867 | unsigned iStart = 0;
|
|---|
| 868 | PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.aCpuIdLeaves;
|
|---|
| 869 | for (;;)
|
|---|
| 870 | {
|
|---|
| 871 | unsigned i = iStart + (iEnd - iStart) / 2U;
|
|---|
| 872 | if (uLeaf < paLeaves[i].uLeaf)
|
|---|
| 873 | {
|
|---|
| 874 | if (i <= iStart)
|
|---|
| 875 | return NULL;
|
|---|
| 876 | iEnd = i;
|
|---|
| 877 | }
|
|---|
| 878 | else if (uLeaf > paLeaves[i].uLeaf)
|
|---|
| 879 | {
|
|---|
| 880 | i += 1;
|
|---|
| 881 | if (i >= iEnd)
|
|---|
| 882 | return NULL;
|
|---|
| 883 | iStart = i;
|
|---|
| 884 | }
|
|---|
| 885 | else
|
|---|
| 886 | {
|
|---|
| 887 | uSubLeaf &= paLeaves[i].fSubLeafMask;
|
|---|
| 888 | if (uSubLeaf == paLeaves[i].uSubLeaf)
|
|---|
| 889 | *pfExactSubLeafHit = true;
|
|---|
| 890 | else
|
|---|
| 891 | {
|
|---|
| 892 | /* Find the right subleaf. We return the last one before
|
|---|
| 893 | uSubLeaf if we don't find an exact match. */
|
|---|
| 894 | if (uSubLeaf < paLeaves[i].uSubLeaf)
|
|---|
| 895 | while ( i > 0
|
|---|
| 896 | && uLeaf == paLeaves[i - 1].uLeaf
|
|---|
| 897 | && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
|
|---|
| 898 | i--;
|
|---|
| 899 | else
|
|---|
| 900 | while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
|
|---|
| 901 | && uLeaf == paLeaves[i + 1].uLeaf
|
|---|
| 902 | && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
|
|---|
| 903 | i++;
|
|---|
| 904 | *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
|
|---|
| 905 | }
|
|---|
| 906 | return &paLeaves[i];
|
|---|
| 907 | }
|
|---|
| 908 | }
|
|---|
| 909 | }
|
|---|
| 910 |
|
|---|
| 911 | *pfExactSubLeafHit = false;
|
|---|
| 912 | return NULL;
|
|---|
| 913 | }
|
|---|
| 914 |
|
|---|
| 915 |
|
|---|
| 916 | /**
|
|---|
| 917 | * Gets a CPUID leaf.
|
|---|
| 918 | *
|
|---|
| 919 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 920 | * @param uLeaf The CPUID leaf to get.
|
|---|
| 921 | * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
|
|---|
| 922 | * @param f64BitMode A tristate indicate if the caller is in 64-bit mode or
|
|---|
| 923 | * not: 1=true, 0=false, 1=whatever. This affect how the
|
|---|
| 924 | * X86_CPUID_EXT_FEATURE_EDX_SYSCALL flag is returned on
|
|---|
| 925 | * Intel CPUs, where it's only returned in 64-bit mode.
|
|---|
| 926 | * @param pEax Where to store the EAX value.
|
|---|
| 927 | * @param pEbx Where to store the EBX value.
|
|---|
| 928 | * @param pEcx Where to store the ECX value.
|
|---|
| 929 | * @param pEdx Where to store the EDX value.
|
|---|
| 930 | */
|
|---|
| 931 | VMMDECL(void) CPUMGetGuestCpuId(PVMCPUCC pVCpu, uint32_t uLeaf, uint32_t uSubLeaf, int f64BitMode,
|
|---|
| 932 | uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
|
|---|
| 933 | {
|
|---|
| 934 | bool fExactSubLeafHit;
|
|---|
| 935 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
|---|
| 936 | PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
|
|---|
| 937 | if (pLeaf)
|
|---|
| 938 | {
|
|---|
| 939 | AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
|
|---|
| 940 | if (fExactSubLeafHit)
|
|---|
| 941 | {
|
|---|
| 942 | *pEax = pLeaf->uEax;
|
|---|
| 943 | *pEbx = pLeaf->uEbx;
|
|---|
| 944 | *pEcx = pLeaf->uEcx;
|
|---|
| 945 | *pEdx = pLeaf->uEdx;
|
|---|
| 946 |
|
|---|
| 947 | /*
|
|---|
| 948 | * Deal with CPU specific information.
|
|---|
| 949 | */
|
|---|
| 950 | if (pLeaf->fFlags & ( CPUMCPUIDLEAF_F_CONTAINS_APIC_ID
|
|---|
| 951 | | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
|
|---|
| 952 | | CPUMCPUIDLEAF_F_CONTAINS_APIC ))
|
|---|
| 953 | {
|
|---|
| 954 | if (uLeaf == 1)
|
|---|
| 955 | {
|
|---|
| 956 | /* EBX: Bits 31-24: Initial APIC ID. */
|
|---|
| 957 | Assert(pVCpu->idCpu <= 255);
|
|---|
| 958 | AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
|
|---|
| 959 | *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
|
|---|
| 960 |
|
|---|
| 961 | /* EDX: Bit 9: AND with APICBASE.EN. */
|
|---|
| 962 | if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
|
|---|
| 963 | *pEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
|
|---|
| 964 |
|
|---|
| 965 | /* ECX: Bit 27: CR4.OSXSAVE mirror. */
|
|---|
| 966 | *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
|
|---|
| 967 | | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
|
|---|
| 968 | }
|
|---|
| 969 | else if (uLeaf == 0xb)
|
|---|
| 970 | {
|
|---|
| 971 | /* EDX: Initial extended APIC ID. */
|
|---|
| 972 | AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
|
|---|
| 973 | *pEdx = pVCpu->idCpu;
|
|---|
| 974 | Assert(!(pLeaf->fFlags & ~(CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)));
|
|---|
| 975 | }
|
|---|
| 976 | else if (uLeaf == UINT32_C(0x8000001e))
|
|---|
| 977 | {
|
|---|
| 978 | /* EAX: Initial extended APIC ID. */
|
|---|
| 979 | AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
|
|---|
| 980 | *pEax = pVCpu->idCpu;
|
|---|
| 981 | Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC_ID));
|
|---|
| 982 | }
|
|---|
| 983 | else if (uLeaf == UINT32_C(0x80000001))
|
|---|
| 984 | {
|
|---|
| 985 | /* EDX: Bit 9: AND with APICBASE.EN. */
|
|---|
| 986 | if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible)
|
|---|
| 987 | *pEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
|
|---|
| 988 | Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC));
|
|---|
| 989 | }
|
|---|
| 990 | else
|
|---|
| 991 | AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
|
|---|
| 992 | }
|
|---|
| 993 |
|
|---|
| 994 | /* Intel CPUs suppress the SYSCALL bit when not executing in 64-bit mode: */
|
|---|
| 995 | if ( uLeaf == UINT32_C(0x80000001)
|
|---|
| 996 | && f64BitMode == false
|
|---|
| 997 | && (*pEdx & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
|
|---|
| 998 | && ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL
|
|---|
| 999 | || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_VIA /*?*/
|
|---|
| 1000 | || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_SHANGHAI /*?*/ ) )
|
|---|
| 1001 | *pEdx &= ~X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
|
|---|
| 1002 |
|
|---|
| 1003 | }
|
|---|
| 1004 | /*
|
|---|
| 1005 | * Out of range sub-leaves aren't quite as easy and pretty as we emulate
|
|---|
| 1006 | * them here, but we do the best we can here...
|
|---|
| 1007 | */
|
|---|
| 1008 | else
|
|---|
| 1009 | {
|
|---|
| 1010 | *pEax = *pEbx = *pEcx = *pEdx = 0;
|
|---|
| 1011 | if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
|
|---|
| 1012 | {
|
|---|
| 1013 | *pEcx = uSubLeaf & 0xff;
|
|---|
| 1014 | *pEdx = pVCpu->idCpu;
|
|---|
| 1015 | }
|
|---|
| 1016 | }
|
|---|
| 1017 | }
|
|---|
| 1018 | else
|
|---|
| 1019 | {
|
|---|
| 1020 | /*
|
|---|
| 1021 | * Different CPUs have different ways of dealing with unknown CPUID leaves.
|
|---|
| 1022 | */
|
|---|
| 1023 | switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
|
|---|
| 1024 | {
|
|---|
| 1025 | default:
|
|---|
| 1026 | AssertFailed();
|
|---|
| 1027 | RT_FALL_THRU();
|
|---|
| 1028 | case CPUMUNKNOWNCPUID_DEFAULTS:
|
|---|
| 1029 | case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
|
|---|
| 1030 | case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
|
|---|
| 1031 | *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
|
|---|
| 1032 | *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
|
|---|
| 1033 | *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
|
|---|
| 1034 | *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
|
|---|
| 1035 | break;
|
|---|
| 1036 | case CPUMUNKNOWNCPUID_PASSTHRU:
|
|---|
| 1037 | *pEax = uLeaf;
|
|---|
| 1038 | *pEbx = 0;
|
|---|
| 1039 | *pEcx = uSubLeaf;
|
|---|
| 1040 | *pEdx = 0;
|
|---|
| 1041 | break;
|
|---|
| 1042 | }
|
|---|
| 1043 | }
|
|---|
| 1044 | Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
|
|---|
| 1045 | }
|
|---|
| 1046 |
|
|---|
| 1047 |
|
|---|
| 1048 | /**
|
|---|
| 1049 | * Sets the visibility of the X86_CPUID_FEATURE_EDX_APIC and
|
|---|
| 1050 | * X86_CPUID_AMD_FEATURE_EDX_APIC CPUID bits.
|
|---|
| 1051 | *
|
|---|
| 1052 | * @returns Previous value.
|
|---|
| 1053 | * @param pVCpu The cross context virtual CPU structure to make the
|
|---|
| 1054 | * change on. Usually the calling EMT.
|
|---|
| 1055 | * @param fVisible Whether to make it visible (true) or hide it (false).
|
|---|
| 1056 | *
|
|---|
| 1057 | * @remarks This is "VMMDECL" so that it still links with
|
|---|
| 1058 | * the old APIC code which is in VBoxDD2 and not in
|
|---|
| 1059 | * the VMM module.
|
|---|
| 1060 | */
|
|---|
| 1061 | VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible)
|
|---|
| 1062 | {
|
|---|
| 1063 | bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible;
|
|---|
| 1064 | pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible;
|
|---|
| 1065 | return fOld;
|
|---|
| 1066 | }
|
|---|
| 1067 |
|
|---|
| 1068 |
|
|---|
| 1069 | /**
|
|---|
| 1070 | * Gets the host CPU vendor.
|
|---|
| 1071 | *
|
|---|
| 1072 | * @returns CPU vendor.
|
|---|
| 1073 | * @param pVM The cross context VM structure.
|
|---|
| 1074 | */
|
|---|
| 1075 | VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
|
|---|
| 1076 | {
|
|---|
| 1077 | return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.Common.enmCpuVendor;
|
|---|
| 1078 | }
|
|---|
| 1079 |
|
|---|
| 1080 |
|
|---|
| 1081 | /**
|
|---|
| 1082 | * Gets the host CPU microarchitecture.
|
|---|
| 1083 | *
|
|---|
| 1084 | * @returns CPU microarchitecture.
|
|---|
| 1085 | * @param pVM The cross context VM structure.
|
|---|
| 1086 | */
|
|---|
| 1087 | VMMDECL(CPUMMICROARCH) CPUMGetHostMicroarch(PCVM pVM)
|
|---|
| 1088 | {
|
|---|
| 1089 | return pVM->cpum.s.HostFeatures.Common.enmMicroarch;
|
|---|
| 1090 | }
|
|---|
| 1091 |
|
|---|
| 1092 |
|
|---|
| 1093 | /**
|
|---|
| 1094 | * Gets the guest CPU vendor.
|
|---|
| 1095 | *
|
|---|
| 1096 | * @returns CPU vendor.
|
|---|
| 1097 | * @param pVM The cross context VM structure.
|
|---|
| 1098 | */
|
|---|
| 1099 | VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
|
|---|
| 1100 | {
|
|---|
| 1101 | return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
|
|---|
| 1102 | }
|
|---|
| 1103 |
|
|---|
| 1104 |
|
|---|
| 1105 | /**
|
|---|
| 1106 | * Gets the guest CPU architecture.
|
|---|
| 1107 | *
|
|---|
| 1108 | * @returns CPU architecture.
|
|---|
| 1109 | * @param pVM The cross context VM structure.
|
|---|
| 1110 | */
|
|---|
| 1111 | VMMDECL(CPUMARCH) CPUMGetGuestArch(PCVM pVM)
|
|---|
| 1112 | {
|
|---|
| 1113 | RT_NOREF(pVM);
|
|---|
| 1114 | return kCpumArch_X86; /* Static as we are in the x86 VMM module here. */
|
|---|
| 1115 | }
|
|---|
| 1116 |
|
|---|
| 1117 |
|
|---|
| 1118 | /**
|
|---|
| 1119 | * Gets the guest CPU microarchitecture.
|
|---|
| 1120 | *
|
|---|
| 1121 | * @returns CPU microarchitecture.
|
|---|
| 1122 | * @param pVM The cross context VM structure.
|
|---|
| 1123 | */
|
|---|
| 1124 | VMMDECL(CPUMMICROARCH) CPUMGetGuestMicroarch(PCVM pVM)
|
|---|
| 1125 | {
|
|---|
| 1126 | return pVM->cpum.s.GuestFeatures.enmMicroarch;
|
|---|
| 1127 | }
|
|---|
| 1128 |
|
|---|
| 1129 |
|
|---|
| 1130 | /**
|
|---|
| 1131 | * Gets the maximum number of physical and linear address bits supported by the
|
|---|
| 1132 | * guest.
|
|---|
| 1133 | *
|
|---|
| 1134 | * @param pVM The cross context VM structure.
|
|---|
| 1135 | * @param pcPhysAddrWidth Where to store the physical address width.
|
|---|
| 1136 | * @param pcLinearAddrWidth Where to store the linear address width.
|
|---|
| 1137 | */
|
|---|
| 1138 | VMMDECL(void) CPUMGetGuestAddrWidths(PCVM pVM, uint8_t *pcPhysAddrWidth, uint8_t *pcLinearAddrWidth)
|
|---|
| 1139 | {
|
|---|
| 1140 | AssertPtr(pVM);
|
|---|
| 1141 | AssertReturnVoid(pcPhysAddrWidth);
|
|---|
| 1142 | AssertReturnVoid(pcLinearAddrWidth);
|
|---|
| 1143 | *pcPhysAddrWidth = pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth;
|
|---|
| 1144 | *pcLinearAddrWidth = pVM->cpum.s.GuestFeatures.cMaxLinearAddrWidth;
|
|---|
| 1145 | }
|
|---|
| 1146 |
|
|---|
| 1147 |
|
|---|
| 1148 | VMMDECL(int) CPUMSetGuestDR0(PVMCPUCC pVCpu, uint64_t uDr0)
|
|---|
| 1149 | {
|
|---|
| 1150 | pVCpu->cpum.s.Guest.dr[0] = uDr0;
|
|---|
| 1151 | return CPUMRecalcHyperDRx(pVCpu, 0);
|
|---|
| 1152 | }
|
|---|
| 1153 |
|
|---|
| 1154 |
|
|---|
| 1155 | VMMDECL(int) CPUMSetGuestDR1(PVMCPUCC pVCpu, uint64_t uDr1)
|
|---|
| 1156 | {
|
|---|
| 1157 | pVCpu->cpum.s.Guest.dr[1] = uDr1;
|
|---|
| 1158 | return CPUMRecalcHyperDRx(pVCpu, 1);
|
|---|
| 1159 | }
|
|---|
| 1160 |
|
|---|
| 1161 |
|
|---|
| 1162 | VMMDECL(int) CPUMSetGuestDR2(PVMCPUCC pVCpu, uint64_t uDr2)
|
|---|
| 1163 | {
|
|---|
| 1164 | pVCpu->cpum.s.Guest.dr[2] = uDr2;
|
|---|
| 1165 | return CPUMRecalcHyperDRx(pVCpu, 2);
|
|---|
| 1166 | }
|
|---|
| 1167 |
|
|---|
| 1168 |
|
|---|
| 1169 | VMMDECL(int) CPUMSetGuestDR3(PVMCPUCC pVCpu, uint64_t uDr3)
|
|---|
| 1170 | {
|
|---|
| 1171 | pVCpu->cpum.s.Guest.dr[3] = uDr3;
|
|---|
| 1172 | return CPUMRecalcHyperDRx(pVCpu, 3);
|
|---|
| 1173 | }
|
|---|
| 1174 |
|
|---|
| 1175 |
|
|---|
| 1176 | VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
|
|---|
| 1177 | {
|
|---|
| 1178 | pVCpu->cpum.s.Guest.dr[6] = uDr6;
|
|---|
| 1179 | pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR6;
|
|---|
| 1180 | return VINF_SUCCESS; /* No need to recalc. */
|
|---|
| 1181 | }
|
|---|
| 1182 |
|
|---|
| 1183 |
|
|---|
| 1184 | VMMDECL(int) CPUMSetGuestDR7(PVMCPUCC pVCpu, uint64_t uDr7)
|
|---|
| 1185 | {
|
|---|
| 1186 | pVCpu->cpum.s.Guest.dr[7] = uDr7;
|
|---|
| 1187 | pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_DR7;
|
|---|
| 1188 | return CPUMRecalcHyperDRx(pVCpu, 7);
|
|---|
| 1189 | }
|
|---|
| 1190 |
|
|---|
| 1191 |
|
|---|
| 1192 | VMMDECL(int) CPUMSetGuestDRx(PVMCPUCC pVCpu, uint32_t iReg, uint64_t Value)
|
|---|
| 1193 | {
|
|---|
| 1194 | AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
|
|---|
| 1195 | /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
|
|---|
| 1196 | if (iReg == 4 || iReg == 5)
|
|---|
| 1197 | iReg += 2;
|
|---|
| 1198 | pVCpu->cpum.s.Guest.dr[iReg] = Value;
|
|---|
| 1199 | return CPUMRecalcHyperDRx(pVCpu, iReg);
|
|---|
| 1200 | }
|
|---|
| 1201 |
|
|---|
| 1202 |
|
|---|
| 1203 | /**
|
|---|
| 1204 | * Recalculates the hypervisor DRx register values based on current guest
|
|---|
| 1205 | * registers and DBGF breakpoints, updating changed registers depending on the
|
|---|
| 1206 | * context.
|
|---|
| 1207 | *
|
|---|
| 1208 | * This is called whenever a guest DRx register is modified (any context) and
|
|---|
| 1209 | * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
|
|---|
| 1210 | *
|
|---|
| 1211 | * In raw-mode context this function will reload any (hyper) DRx registers which
|
|---|
| 1212 | * comes out with a different value. It may also have to save the host debug
|
|---|
| 1213 | * registers if that haven't been done already. In this context though, we'll
|
|---|
| 1214 | * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
|
|---|
| 1215 | * are only important when breakpoints are actually enabled.
|
|---|
| 1216 | *
|
|---|
| 1217 | * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
|
|---|
| 1218 | * reloaded by the HM code if it changes. Further more, we will only use the
|
|---|
| 1219 | * combined register set when the VBox debugger is actually using hardware BPs,
|
|---|
| 1220 | * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
|
|---|
| 1221 | * concern us here).
|
|---|
| 1222 | *
|
|---|
| 1223 | * In ring-3 we won't be loading anything, so well calculate hypervisor values
|
|---|
| 1224 | * all the time.
|
|---|
| 1225 | *
|
|---|
| 1226 | * @returns VINF_SUCCESS.
|
|---|
| 1227 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 1228 | * @param iGstReg The guest debug register number that was modified.
|
|---|
| 1229 | * UINT8_MAX if not guest register.
|
|---|
| 1230 | */
|
|---|
| 1231 | VMMDECL(int) CPUMRecalcHyperDRx(PVMCPUCC pVCpu, uint8_t iGstReg)
|
|---|
| 1232 | {
|
|---|
| 1233 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
|---|
| 1234 | #ifndef IN_RING0
|
|---|
| 1235 | RT_NOREF_PV(iGstReg);
|
|---|
| 1236 | #endif
|
|---|
| 1237 |
|
|---|
| 1238 | /*
|
|---|
| 1239 | * Compare the DR7s first.
|
|---|
| 1240 | *
|
|---|
| 1241 | * We only care about the enabled flags. GD is virtualized when we
|
|---|
| 1242 | * dispatch the #DB, we never enable it. The DBGF DR7 value is will
|
|---|
| 1243 | * always have the LE and GE bits set, so no need to check and disable
|
|---|
| 1244 | * stuff if they're cleared like we have to for the guest DR7.
|
|---|
| 1245 | */
|
|---|
| 1246 | const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
|
|---|
| 1247 | if (uDbgfDr7 & X86_DR7_ENABLED_MASK)
|
|---|
| 1248 | {
|
|---|
| 1249 | Assert(!CPUMIsGuestDebugStateActive(pVCpu));
|
|---|
| 1250 |
|
|---|
| 1251 | RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
|
|---|
| 1252 | /** @todo This isn't correct. BPs work without setting LE and GE under AMD-V. They are also documented as unsupported by P6+. */
|
|---|
| 1253 | if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
|
|---|
| 1254 | uGstDr7 = 0;
|
|---|
| 1255 | else if (!(uGstDr7 & X86_DR7_LE))
|
|---|
| 1256 | uGstDr7 &= ~X86_DR7_LE_ALL;
|
|---|
| 1257 | else if (!(uGstDr7 & X86_DR7_GE))
|
|---|
| 1258 | uGstDr7 &= ~X86_DR7_GE_ALL;
|
|---|
| 1259 |
|
|---|
| 1260 | /*
|
|---|
| 1261 | * Ok, something is enabled. Recalc each of the breakpoints, taking
|
|---|
| 1262 | * the VM debugger ones of the guest ones. In raw-mode context we will
|
|---|
| 1263 | * not allow breakpoints with values inside the hypervisor area.
|
|---|
| 1264 | */
|
|---|
| 1265 | RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
|
|---|
| 1266 |
|
|---|
| 1267 | /* bp 0 */
|
|---|
| 1268 | RTGCUINTREG uNewDr0;
|
|---|
| 1269 | if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
|
|---|
| 1270 | {
|
|---|
| 1271 | uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
|
|---|
| 1272 | uNewDr0 = DBGFBpGetDR0(pVM);
|
|---|
| 1273 | }
|
|---|
| 1274 | else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
|
|---|
| 1275 | {
|
|---|
| 1276 | uNewDr0 = CPUMGetGuestDR0(pVCpu);
|
|---|
| 1277 | uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
|
|---|
| 1278 | }
|
|---|
| 1279 | else
|
|---|
| 1280 | uNewDr0 = 0;
|
|---|
| 1281 |
|
|---|
| 1282 | /* bp 1 */
|
|---|
| 1283 | RTGCUINTREG uNewDr1;
|
|---|
| 1284 | if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
|
|---|
| 1285 | {
|
|---|
| 1286 | uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
|
|---|
| 1287 | uNewDr1 = DBGFBpGetDR1(pVM);
|
|---|
| 1288 | }
|
|---|
| 1289 | else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
|
|---|
| 1290 | {
|
|---|
| 1291 | uNewDr1 = CPUMGetGuestDR1(pVCpu);
|
|---|
| 1292 | uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
|
|---|
| 1293 | }
|
|---|
| 1294 | else
|
|---|
| 1295 | uNewDr1 = 0;
|
|---|
| 1296 |
|
|---|
| 1297 | /* bp 2 */
|
|---|
| 1298 | RTGCUINTREG uNewDr2;
|
|---|
| 1299 | if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
|
|---|
| 1300 | {
|
|---|
| 1301 | uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
|
|---|
| 1302 | uNewDr2 = DBGFBpGetDR2(pVM);
|
|---|
| 1303 | }
|
|---|
| 1304 | else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
|
|---|
| 1305 | {
|
|---|
| 1306 | uNewDr2 = CPUMGetGuestDR2(pVCpu);
|
|---|
| 1307 | uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
|
|---|
| 1308 | }
|
|---|
| 1309 | else
|
|---|
| 1310 | uNewDr2 = 0;
|
|---|
| 1311 |
|
|---|
| 1312 | /* bp 3 */
|
|---|
| 1313 | RTGCUINTREG uNewDr3;
|
|---|
| 1314 | if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
|
|---|
| 1315 | {
|
|---|
| 1316 | uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
|
|---|
| 1317 | uNewDr3 = DBGFBpGetDR3(pVM);
|
|---|
| 1318 | }
|
|---|
| 1319 | else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
|
|---|
| 1320 | {
|
|---|
| 1321 | uNewDr3 = CPUMGetGuestDR3(pVCpu);
|
|---|
| 1322 | uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
|
|---|
| 1323 | }
|
|---|
| 1324 | else
|
|---|
| 1325 | uNewDr3 = 0;
|
|---|
| 1326 |
|
|---|
| 1327 | /*
|
|---|
| 1328 | * Apply the updates.
|
|---|
| 1329 | */
|
|---|
| 1330 | pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
|
|---|
| 1331 | if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
|
|---|
| 1332 | cpumSetHyperDR3(pVCpu, uNewDr3);
|
|---|
| 1333 | if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
|
|---|
| 1334 | cpumSetHyperDR2(pVCpu, uNewDr2);
|
|---|
| 1335 | if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
|
|---|
| 1336 | cpumSetHyperDR1(pVCpu, uNewDr1);
|
|---|
| 1337 | if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
|
|---|
| 1338 | cpumSetHyperDR0(pVCpu, uNewDr0);
|
|---|
| 1339 | if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
|
|---|
| 1340 | CPUMSetHyperDR7(pVCpu, uNewDr7);
|
|---|
| 1341 | }
|
|---|
| 1342 | #ifdef IN_RING0
|
|---|
| 1343 | else if (CPUMIsGuestDebugStateActive(pVCpu))
|
|---|
| 1344 | {
|
|---|
| 1345 | /*
|
|---|
| 1346 | * Reload the register that was modified. Normally this won't happen
|
|---|
| 1347 | * as we won't intercept DRx writes when not having the hyper debug
|
|---|
| 1348 | * state loaded, but in case we do for some reason we'll simply deal
|
|---|
| 1349 | * with it.
|
|---|
| 1350 | */
|
|---|
| 1351 | switch (iGstReg)
|
|---|
| 1352 | {
|
|---|
| 1353 | case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
|
|---|
| 1354 | case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
|
|---|
| 1355 | case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
|
|---|
| 1356 | case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
|
|---|
| 1357 | default:
|
|---|
| 1358 | AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
|
|---|
| 1359 | }
|
|---|
| 1360 | }
|
|---|
| 1361 | #endif
|
|---|
| 1362 | else
|
|---|
| 1363 | {
|
|---|
| 1364 | /*
|
|---|
| 1365 | * No active debug state any more. In raw-mode this means we have to
|
|---|
| 1366 | * make sure DR7 has everything disabled now, if we armed it already.
|
|---|
| 1367 | * In ring-0 we might end up here when just single stepping.
|
|---|
| 1368 | */
|
|---|
| 1369 | #ifdef IN_RING0
|
|---|
| 1370 | if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
|
|---|
| 1371 | {
|
|---|
| 1372 | if (pVCpu->cpum.s.Hyper.dr[0])
|
|---|
| 1373 | ASMSetDR0(0);
|
|---|
| 1374 | if (pVCpu->cpum.s.Hyper.dr[1])
|
|---|
| 1375 | ASMSetDR1(0);
|
|---|
| 1376 | if (pVCpu->cpum.s.Hyper.dr[2])
|
|---|
| 1377 | ASMSetDR2(0);
|
|---|
| 1378 | if (pVCpu->cpum.s.Hyper.dr[3])
|
|---|
| 1379 | ASMSetDR3(0);
|
|---|
| 1380 | pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
|
|---|
| 1381 | }
|
|---|
| 1382 | #endif
|
|---|
| 1383 | pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
|
|---|
| 1384 |
|
|---|
| 1385 | /* Clear all the registers. */
|
|---|
| 1386 | pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
|
|---|
| 1387 | pVCpu->cpum.s.Hyper.dr[3] = 0;
|
|---|
| 1388 | pVCpu->cpum.s.Hyper.dr[2] = 0;
|
|---|
| 1389 | pVCpu->cpum.s.Hyper.dr[1] = 0;
|
|---|
| 1390 | pVCpu->cpum.s.Hyper.dr[0] = 0;
|
|---|
| 1391 |
|
|---|
| 1392 | }
|
|---|
| 1393 | Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
|
|---|
| 1394 | pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
|
|---|
| 1395 | pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
|
|---|
| 1396 | pVCpu->cpum.s.Hyper.dr[7]));
|
|---|
| 1397 |
|
|---|
| 1398 | return VINF_SUCCESS;
|
|---|
| 1399 | }
|
|---|
| 1400 |
|
|---|
| 1401 |
|
|---|
| 1402 | /**
|
|---|
| 1403 | * Set the guest XCR0 register.
|
|---|
| 1404 | *
|
|---|
| 1405 | * Will load additional state if the FPU state is already loaded (in ring-0 &
|
|---|
| 1406 | * raw-mode context).
|
|---|
| 1407 | *
|
|---|
| 1408 | * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
|
|---|
| 1409 | * value.
|
|---|
| 1410 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
|---|
| 1411 | * @param uNewValue The new value.
|
|---|
| 1412 | * @thread EMT(pVCpu)
|
|---|
| 1413 | */
|
|---|
| 1414 | VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPUCC pVCpu, uint64_t uNewValue)
|
|---|
| 1415 | {
|
|---|
| 1416 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_XCRx);
|
|---|
| 1417 | if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
|
|---|
| 1418 | /* The X87 bit cannot be cleared. */
|
|---|
| 1419 | && (uNewValue & XSAVE_C_X87)
|
|---|
| 1420 | /* AVX requires SSE. */
|
|---|
| 1421 | && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
|
|---|
| 1422 | /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
|
|---|
| 1423 | && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
|
|---|
| 1424 | || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
|
|---|
| 1425 | == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
|
|---|
| 1426 | )
|
|---|
| 1427 | {
|
|---|
| 1428 | pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
|
|---|
| 1429 |
|
|---|
| 1430 | /* If more state components are enabled, we need to take care to load
|
|---|
| 1431 | them if the FPU/SSE state is already loaded. May otherwise leak
|
|---|
| 1432 | host state to the guest. */
|
|---|
| 1433 | uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
|
|---|
| 1434 | if (fNewComponents)
|
|---|
| 1435 | {
|
|---|
| 1436 | #ifdef IN_RING0
|
|---|
| 1437 | if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
|
|---|
| 1438 | {
|
|---|
| 1439 | if (pVCpu->cpum.s.Guest.fXStateMask != 0)
|
|---|
| 1440 | /* Adding more components. */
|
|---|
| 1441 | ASMXRstor(&pVCpu->cpum.s.Guest.XState, fNewComponents);
|
|---|
| 1442 | else
|
|---|
| 1443 | {
|
|---|
| 1444 | /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
|
|---|
| 1445 | pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
|
|---|
| 1446 | if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
|
|---|
| 1447 | ASMXRstor(&pVCpu->cpum.s.Guest.XState, uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
|
|---|
| 1448 | }
|
|---|
| 1449 | }
|
|---|
| 1450 | #endif
|
|---|
| 1451 | pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
|
|---|
| 1452 | }
|
|---|
| 1453 | return VINF_SUCCESS;
|
|---|
| 1454 | }
|
|---|
| 1455 | return VERR_CPUM_RAISE_GP_0;
|
|---|
| 1456 | }
|
|---|
| 1457 |
|
|---|
| 1458 |
|
|---|
| 1459 | /**
|
|---|
| 1460 | * Tests if the guest has No-Execute Page Protection Enabled (NXE).
|
|---|
| 1461 | *
|
|---|
| 1462 | * @returns true if in real mode, otherwise false.
|
|---|
| 1463 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 1464 | */
|
|---|
| 1465 | VMMDECL(bool) CPUMIsGuestNXEnabled(PCVMCPU pVCpu)
|
|---|
| 1466 | {
|
|---|
| 1467 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
|
|---|
| 1468 | return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
|
|---|
| 1469 | }
|
|---|
| 1470 |
|
|---|
| 1471 |
|
|---|
| 1472 | /**
|
|---|
| 1473 | * Tests if the guest has the Page Size Extension enabled (PSE).
|
|---|
| 1474 | *
|
|---|
| 1475 | * @returns true if in real mode, otherwise false.
|
|---|
| 1476 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 1477 | */
|
|---|
| 1478 | VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu)
|
|---|
| 1479 | {
|
|---|
| 1480 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4);
|
|---|
| 1481 | /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
|
|---|
| 1482 | return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
|
|---|
| 1483 | }
|
|---|
| 1484 |
|
|---|
| 1485 |
|
|---|
| 1486 | /**
|
|---|
| 1487 | * Tests if the guest has the paging enabled (PG).
|
|---|
| 1488 | *
|
|---|
| 1489 | * @returns true if in real mode, otherwise false.
|
|---|
| 1490 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 1491 | */
|
|---|
| 1492 | VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu)
|
|---|
| 1493 | {
|
|---|
| 1494 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
|
|---|
| 1495 | return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
|
|---|
| 1496 | }
|
|---|
| 1497 |
|
|---|
| 1498 |
|
|---|
| 1499 | /**
|
|---|
| 1500 | * Tests if the guest has the paging enabled (PG).
|
|---|
| 1501 | *
|
|---|
| 1502 | * @returns true if in real mode, otherwise false.
|
|---|
| 1503 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 1504 | */
|
|---|
| 1505 | VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PCVMCPU pVCpu)
|
|---|
| 1506 | {
|
|---|
| 1507 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
|
|---|
| 1508 | return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
|
|---|
| 1509 | }
|
|---|
| 1510 |
|
|---|
| 1511 |
|
|---|
| 1512 | /**
|
|---|
| 1513 | * Tests if the guest is running in real mode or not.
|
|---|
| 1514 | *
|
|---|
| 1515 | * @returns true if in real mode, otherwise false.
|
|---|
| 1516 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 1517 | */
|
|---|
| 1518 | VMMDECL(bool) CPUMIsGuestInRealMode(PCVMCPU pVCpu)
|
|---|
| 1519 | {
|
|---|
| 1520 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
|
|---|
| 1521 | return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
|
|---|
| 1522 | }
|
|---|
| 1523 |
|
|---|
| 1524 |
|
|---|
| 1525 | /**
|
|---|
| 1526 | * Tests if the guest is running in real or virtual 8086 mode.
|
|---|
| 1527 | *
|
|---|
| 1528 | * @returns @c true if it is, @c false if not.
|
|---|
| 1529 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 1530 | */
|
|---|
| 1531 | VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PCVMCPU pVCpu)
|
|---|
| 1532 | {
|
|---|
| 1533 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS);
|
|---|
| 1534 | return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
|
|---|
| 1535 | || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
|
|---|
| 1536 | }
|
|---|
| 1537 |
|
|---|
| 1538 |
|
|---|
| 1539 | /**
|
|---|
| 1540 | * Tests if the guest is running in protected or not.
|
|---|
| 1541 | *
|
|---|
| 1542 | * @returns true if in protected mode, otherwise false.
|
|---|
| 1543 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 1544 | */
|
|---|
| 1545 | VMMDECL(bool) CPUMIsGuestInProtectedMode(PCVMCPU pVCpu)
|
|---|
| 1546 | {
|
|---|
| 1547 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
|
|---|
| 1548 | return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
|
|---|
| 1549 | }
|
|---|
| 1550 |
|
|---|
| 1551 |
|
|---|
| 1552 | /**
|
|---|
| 1553 | * Tests if the guest is running in paged protected or not.
|
|---|
| 1554 | *
|
|---|
| 1555 | * @returns true if in paged protected mode, otherwise false.
|
|---|
| 1556 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 1557 | */
|
|---|
| 1558 | VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PCVMCPU pVCpu)
|
|---|
| 1559 | {
|
|---|
| 1560 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0);
|
|---|
| 1561 | return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
|
|---|
| 1562 | }
|
|---|
| 1563 |
|
|---|
| 1564 |
|
|---|
| 1565 | /**
|
|---|
| 1566 | * Tests if the guest is running in long mode or not.
|
|---|
| 1567 | *
|
|---|
| 1568 | * @returns true if in long mode, otherwise false.
|
|---|
| 1569 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 1570 | */
|
|---|
| 1571 | VMMDECL(bool) CPUMIsGuestInLongMode(PCVMCPU pVCpu)
|
|---|
| 1572 | {
|
|---|
| 1573 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_EFER);
|
|---|
| 1574 | return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
|
|---|
| 1575 | }
|
|---|
| 1576 |
|
|---|
| 1577 |
|
|---|
| 1578 | /**
|
|---|
| 1579 | * Tests if the guest is running in PAE mode or not.
|
|---|
| 1580 | *
|
|---|
| 1581 | * @returns true if in PAE mode, otherwise false.
|
|---|
| 1582 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 1583 | */
|
|---|
| 1584 | VMMDECL(bool) CPUMIsGuestInPAEMode(PCVMCPU pVCpu)
|
|---|
| 1585 | {
|
|---|
| 1586 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
|
|---|
| 1587 | /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
|
|---|
| 1588 | than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
|
|---|
| 1589 | return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
|
|---|
| 1590 | && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
|
|---|
| 1591 | && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
|
|---|
| 1592 | }
|
|---|
| 1593 |
|
|---|
| 1594 |
|
|---|
| 1595 | /**
|
|---|
| 1596 | * Tests if the guest is running in 64 bits mode or not.
|
|---|
| 1597 | *
|
|---|
| 1598 | * @returns true if in 64 bits protected mode, otherwise false.
|
|---|
| 1599 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
|---|
| 1600 | */
|
|---|
| 1601 | VMMDECL(bool) CPUMIsGuestIn64BitCode(PCVMCPU pVCpu)
|
|---|
| 1602 | {
|
|---|
| 1603 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
|
|---|
| 1604 | if (!CPUMIsGuestInLongMode(pVCpu))
|
|---|
| 1605 | return false;
|
|---|
| 1606 | CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
|
|---|
| 1607 | return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
|
|---|
| 1608 | }
|
|---|
| 1609 |
|
|---|
| 1610 |
|
|---|
| 1611 | /**
|
|---|
| 1612 | * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
|
|---|
| 1613 | * registers.
|
|---|
| 1614 | *
|
|---|
| 1615 | * @returns true if in 64 bits protected mode, otherwise false.
|
|---|
| 1616 | * @param pCtx Pointer to the current guest CPU context.
|
|---|
| 1617 | */
|
|---|
| 1618 | VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCCPUMCTX pCtx)
|
|---|
| 1619 | {
|
|---|
| 1620 | return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
|
|---|
| 1621 | }
|
|---|
| 1622 |
|
|---|
| 1623 |
|
|---|
| 1624 | /**
|
|---|
| 1625 | * Sets the specified changed flags (CPUM_CHANGED_*).
|
|---|
| 1626 | *
|
|---|
| 1627 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
|---|
| 1628 | * @param fChangedAdd The changed flags to add.
|
|---|
| 1629 | */
|
|---|
| 1630 | VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
|
|---|
| 1631 | {
|
|---|
| 1632 | pVCpu->cpum.s.fChanged |= fChangedAdd;
|
|---|
| 1633 | }
|
|---|
| 1634 |
|
|---|
| 1635 |
|
|---|
| 1636 | /**
|
|---|
| 1637 | * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
|
|---|
| 1638 | * @returns true if used.
|
|---|
| 1639 | * @returns false if not used.
|
|---|
| 1640 | * @param pVM The cross context VM structure.
|
|---|
| 1641 | */
|
|---|
| 1642 | VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
|
|---|
| 1643 | {
|
|---|
| 1644 | return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
|
|---|
| 1645 | }
|
|---|
| 1646 |
|
|---|
| 1647 |
|
|---|
| 1648 | /**
|
|---|
| 1649 | * Checks if the host OS uses the SYSCALL / SYSRET instructions.
|
|---|
| 1650 | * @returns true if used.
|
|---|
| 1651 | * @returns false if not used.
|
|---|
| 1652 | * @param pVM The cross context VM structure.
|
|---|
| 1653 | */
|
|---|
| 1654 | VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
|
|---|
| 1655 | {
|
|---|
| 1656 | return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
|
|---|
| 1657 | }
|
|---|
| 1658 |
|
|---|
| 1659 |
|
|---|
| 1660 | /**
|
|---|
| 1661 | * Checks if we activated the FPU/XMM state of the guest OS.
|
|---|
| 1662 | *
|
|---|
| 1663 | * Obsolete: This differs from CPUMIsGuestFPUStateLoaded() in that it refers to
|
|---|
| 1664 | * the next time we'll be executing guest code, so it may return true for
|
|---|
| 1665 | * 64-on-32 when we still haven't actually loaded the FPU status, just scheduled
|
|---|
| 1666 | * it to be loaded the next time we go thru the world switcher
|
|---|
| 1667 | * (CPUM_SYNC_FPU_STATE).
|
|---|
| 1668 | *
|
|---|
| 1669 | * @returns true / false.
|
|---|
| 1670 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 1671 | */
|
|---|
| 1672 | VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
|
|---|
| 1673 | {
|
|---|
| 1674 | bool fRet = RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
|
|---|
| 1675 | AssertMsg(fRet == pVCpu->cpum.s.Guest.fUsedFpuGuest, ("fRet=%d\n", fRet));
|
|---|
| 1676 | return fRet;
|
|---|
| 1677 | }
|
|---|
| 1678 |
|
|---|
| 1679 |
|
|---|
| 1680 | /**
|
|---|
| 1681 | * Checks if we've really loaded the FPU/XMM state of the guest OS.
|
|---|
| 1682 | *
|
|---|
| 1683 | * @returns true / false.
|
|---|
| 1684 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 1685 | */
|
|---|
| 1686 | VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu)
|
|---|
| 1687 | {
|
|---|
| 1688 | bool fRet = RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
|
|---|
| 1689 | AssertMsg(fRet == pVCpu->cpum.s.Guest.fUsedFpuGuest, ("fRet=%d\n", fRet));
|
|---|
| 1690 | return fRet;
|
|---|
| 1691 | }
|
|---|
| 1692 |
|
|---|
| 1693 |
|
|---|
| 1694 | /**
|
|---|
| 1695 | * Checks if we saved the FPU/XMM state of the host OS.
|
|---|
| 1696 | *
|
|---|
| 1697 | * @returns true / false.
|
|---|
| 1698 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 1699 | */
|
|---|
| 1700 | VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu)
|
|---|
| 1701 | {
|
|---|
| 1702 | return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST);
|
|---|
| 1703 | }
|
|---|
| 1704 |
|
|---|
| 1705 |
|
|---|
| 1706 | /**
|
|---|
| 1707 | * Checks if the guest debug state is active.
|
|---|
| 1708 | *
|
|---|
| 1709 | * @returns boolean
|
|---|
| 1710 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
|---|
| 1711 | */
|
|---|
| 1712 | VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
|
|---|
| 1713 | {
|
|---|
| 1714 | return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
|
|---|
| 1715 | }
|
|---|
| 1716 |
|
|---|
| 1717 |
|
|---|
| 1718 | /**
|
|---|
| 1719 | * Checks if the hyper debug state is active.
|
|---|
| 1720 | *
|
|---|
| 1721 | * @returns boolean
|
|---|
| 1722 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
|---|
| 1723 | */
|
|---|
| 1724 | VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
|
|---|
| 1725 | {
|
|---|
| 1726 | return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
|
|---|
| 1727 | }
|
|---|
| 1728 |
|
|---|
| 1729 |
|
|---|
| 1730 | /**
|
|---|
| 1731 | * Mark the guest's debug state as inactive.
|
|---|
| 1732 | *
|
|---|
| 1733 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
|---|
| 1734 | * @todo This API doesn't make sense any more.
|
|---|
| 1735 | */
|
|---|
| 1736 | VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
|
|---|
| 1737 | {
|
|---|
| 1738 | Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
|
|---|
| 1739 | NOREF(pVCpu);
|
|---|
| 1740 | }
|
|---|
| 1741 |
|
|---|
| 1742 |
|
|---|
| 1743 | /**
|
|---|
| 1744 | * Get the current privilege level of the guest.
|
|---|
| 1745 | *
|
|---|
| 1746 | * @returns CPL
|
|---|
| 1747 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
|---|
| 1748 | */
|
|---|
| 1749 | VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
|
|---|
| 1750 | {
|
|---|
| 1751 | /*
|
|---|
| 1752 | * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
|
|---|
| 1753 | *
|
|---|
| 1754 | * Note! We used to check CS.DPL here, assuming it was always equal to
|
|---|
| 1755 | * CPL even if a conforming segment was loaded. But this turned out to
|
|---|
| 1756 | * only apply to older AMD-V. With VT-x we had an ACP2 regression
|
|---|
| 1757 | * during install after a far call to ring 2 with VT-x. Then on newer
|
|---|
| 1758 | * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
|
|---|
| 1759 | * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
|
|---|
| 1760 | *
|
|---|
| 1761 | * So, forget CS.DPL, always use SS.DPL.
|
|---|
| 1762 | *
|
|---|
| 1763 | * Note! The SS RPL is always equal to the CPL, while the CS RPL
|
|---|
| 1764 | * isn't necessarily equal if the segment is conforming.
|
|---|
| 1765 | * See section 4.11.1 in the AMD manual.
|
|---|
| 1766 | *
|
|---|
| 1767 | * Update: Where the heck does it say CS.RPL can differ from CPL other than
|
|---|
| 1768 | * right after real->prot mode switch and when in V8086 mode? That
|
|---|
| 1769 | * section says the RPL specified in a direct transfere (call, jmp,
|
|---|
| 1770 | * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
|
|---|
| 1771 | * it would be impossible for an exception handle or the iret
|
|---|
| 1772 | * instruction to figure out whether SS:ESP are part of the frame
|
|---|
| 1773 | * or not. VBox or qemu bug must've lead to this misconception.
|
|---|
| 1774 | *
|
|---|
| 1775 | * Update2: On an AMD bulldozer system here, I've no trouble loading a null
|
|---|
| 1776 | * selector into SS with an RPL other than the CPL when CPL != 3 and
|
|---|
| 1777 | * we're in 64-bit mode. The intel dev box doesn't allow this, on
|
|---|
| 1778 | * RPL = CPL. Weird.
|
|---|
| 1779 | */
|
|---|
| 1780 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
|
|---|
| 1781 | uint32_t uCpl;
|
|---|
| 1782 | if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
|
|---|
| 1783 | {
|
|---|
| 1784 | if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
|
|---|
| 1785 | {
|
|---|
| 1786 | if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
|
|---|
| 1787 | uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
|
|---|
| 1788 | else
|
|---|
| 1789 | uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
|
|---|
| 1790 | }
|
|---|
| 1791 | else
|
|---|
| 1792 | uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
|
|---|
| 1793 | }
|
|---|
| 1794 | else
|
|---|
| 1795 | uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
|
|---|
| 1796 | return uCpl;
|
|---|
| 1797 | }
|
|---|
| 1798 |
|
|---|
| 1799 |
|
|---|
| 1800 | /**
|
|---|
| 1801 | * Gets the current guest CPU mode.
|
|---|
| 1802 | *
|
|---|
| 1803 | * If paging mode is what you need, check out PGMGetGuestMode().
|
|---|
| 1804 | *
|
|---|
| 1805 | * @returns The CPU mode.
|
|---|
| 1806 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 1807 | */
|
|---|
| 1808 | VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
|
|---|
| 1809 | {
|
|---|
| 1810 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER);
|
|---|
| 1811 | CPUMMODE enmMode;
|
|---|
| 1812 | if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
|
|---|
| 1813 | enmMode = CPUMMODE_REAL;
|
|---|
| 1814 | else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
|
|---|
| 1815 | enmMode = CPUMMODE_PROTECTED;
|
|---|
| 1816 | else
|
|---|
| 1817 | enmMode = CPUMMODE_LONG;
|
|---|
| 1818 |
|
|---|
| 1819 | return enmMode;
|
|---|
| 1820 | }
|
|---|
| 1821 |
|
|---|
| 1822 |
|
|---|
| 1823 | /**
|
|---|
| 1824 | * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
|
|---|
| 1825 | *
|
|---|
| 1826 | * @returns 16, 32 or 64.
|
|---|
| 1827 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
|---|
| 1828 | */
|
|---|
| 1829 | VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
|
|---|
| 1830 | {
|
|---|
| 1831 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
|
|---|
| 1832 |
|
|---|
| 1833 | if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
|
|---|
| 1834 | return 16;
|
|---|
| 1835 |
|
|---|
| 1836 | if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
|
|---|
| 1837 | {
|
|---|
| 1838 | Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
|
|---|
| 1839 | return 16;
|
|---|
| 1840 | }
|
|---|
| 1841 |
|
|---|
| 1842 | CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
|
|---|
| 1843 | if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
|
|---|
| 1844 | && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
|
|---|
| 1845 | return 64;
|
|---|
| 1846 |
|
|---|
| 1847 | if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
|
|---|
| 1848 | return 32;
|
|---|
| 1849 |
|
|---|
| 1850 | return 16;
|
|---|
| 1851 | }
|
|---|
| 1852 |
|
|---|
| 1853 |
|
|---|
| 1854 | VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
|
|---|
| 1855 | {
|
|---|
| 1856 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS);
|
|---|
| 1857 |
|
|---|
| 1858 | if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
|
|---|
| 1859 | return DISCPUMODE_16BIT;
|
|---|
| 1860 |
|
|---|
| 1861 | if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
|
|---|
| 1862 | {
|
|---|
| 1863 | Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
|
|---|
| 1864 | return DISCPUMODE_16BIT;
|
|---|
| 1865 | }
|
|---|
| 1866 |
|
|---|
| 1867 | CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
|
|---|
| 1868 | if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
|
|---|
| 1869 | && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
|
|---|
| 1870 | return DISCPUMODE_64BIT;
|
|---|
| 1871 |
|
|---|
| 1872 | if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
|
|---|
| 1873 | return DISCPUMODE_32BIT;
|
|---|
| 1874 |
|
|---|
| 1875 | return DISCPUMODE_16BIT;
|
|---|
| 1876 | }
|
|---|
| 1877 |
|
|---|
| 1878 |
|
|---|
| 1879 | /**
|
|---|
| 1880 | * Gets the guest MXCSR_MASK value.
|
|---|
| 1881 | *
|
|---|
| 1882 | * This does not access the x87 state, but the value we determined at VM
|
|---|
| 1883 | * initialization.
|
|---|
| 1884 | *
|
|---|
| 1885 | * @returns MXCSR mask.
|
|---|
| 1886 | * @param pVM The cross context VM structure.
|
|---|
| 1887 | */
|
|---|
| 1888 | VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM)
|
|---|
| 1889 | {
|
|---|
| 1890 | return pVM->cpum.s.GuestInfo.fMxCsrMask;
|
|---|
| 1891 | }
|
|---|
| 1892 |
|
|---|
| 1893 |
|
|---|
| 1894 | /**
|
|---|
| 1895 | * Returns whether the guest has physical interrupts enabled.
|
|---|
| 1896 | *
|
|---|
| 1897 | * @returns @c true if interrupts are enabled, @c false otherwise.
|
|---|
| 1898 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 1899 | *
|
|---|
| 1900 | * @remarks Warning! This function does -not- take into account the global-interrupt
|
|---|
| 1901 | * flag (GIF).
|
|---|
| 1902 | */
|
|---|
| 1903 | VMM_INT_DECL(bool) CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu)
|
|---|
| 1904 | {
|
|---|
| 1905 | switch (CPUMGetGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest))
|
|---|
| 1906 | {
|
|---|
| 1907 | case CPUMHWVIRT_NONE:
|
|---|
| 1908 | default:
|
|---|
| 1909 | return pVCpu->cpum.s.Guest.eflags.Bits.u1IF;
|
|---|
| 1910 | case CPUMHWVIRT_VMX:
|
|---|
| 1911 | return CPUMIsGuestVmxPhysIntrEnabled(&pVCpu->cpum.s.Guest);
|
|---|
| 1912 | case CPUMHWVIRT_SVM:
|
|---|
| 1913 | return CPUMIsGuestSvmPhysIntrEnabled(pVCpu, &pVCpu->cpum.s.Guest);
|
|---|
| 1914 | }
|
|---|
| 1915 | }
|
|---|
| 1916 |
|
|---|
| 1917 |
|
|---|
| 1918 | /**
|
|---|
| 1919 | * Returns whether the nested-guest has virtual interrupts enabled.
|
|---|
| 1920 | *
|
|---|
| 1921 | * @returns @c true if interrupts are enabled, @c false otherwise.
|
|---|
| 1922 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 1923 | *
|
|---|
| 1924 | * @remarks Warning! This function does -not- take into account the global-interrupt
|
|---|
| 1925 | * flag (GIF).
|
|---|
| 1926 | */
|
|---|
| 1927 | VMM_INT_DECL(bool) CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu)
|
|---|
| 1928 | {
|
|---|
| 1929 | PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
|
|---|
| 1930 | Assert(CPUMIsGuestInNestedHwvirtMode(pCtx));
|
|---|
| 1931 |
|
|---|
| 1932 | if (CPUMIsGuestInVmxNonRootMode(pCtx))
|
|---|
| 1933 | return CPUMIsGuestVmxVirtIntrEnabled(pCtx);
|
|---|
| 1934 |
|
|---|
| 1935 | Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
|
|---|
| 1936 | return CPUMIsGuestSvmVirtIntrEnabled(pVCpu, pCtx);
|
|---|
| 1937 | }
|
|---|
| 1938 |
|
|---|
| 1939 |
|
|---|
| 1940 | /**
|
|---|
| 1941 | * Calculates the interruptiblity of the guest.
|
|---|
| 1942 | *
|
|---|
| 1943 | * @returns Interruptibility level.
|
|---|
| 1944 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 1945 | */
|
|---|
| 1946 | VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu)
|
|---|
| 1947 | {
|
|---|
| 1948 | #if 1
|
|---|
| 1949 | /* Global-interrupt flag blocks pretty much everything we care about here. */
|
|---|
| 1950 | if (CPUMGetGuestGif(&pVCpu->cpum.s.Guest))
|
|---|
| 1951 | {
|
|---|
| 1952 | /*
|
|---|
| 1953 | * Physical interrupts are primarily blocked using EFLAGS. However, we cannot access
|
|---|
| 1954 | * it directly here. If and how EFLAGS are used depends on the context (nested-guest
|
|---|
| 1955 | * or raw-mode). Hence we use the function below which handles the details.
|
|---|
| 1956 | */
|
|---|
| 1957 | if ( !(pVCpu->cpum.s.Guest.eflags.uBoth & CPUMCTX_INHIBIT_ALL_MASK)
|
|---|
| 1958 | || ( !(pVCpu->cpum.s.Guest.eflags.uBoth & CPUMCTX_INHIBIT_NMI)
|
|---|
| 1959 | && pVCpu->cpum.s.Guest.uRipInhibitInt != pVCpu->cpum.s.Guest.rip))
|
|---|
| 1960 | {
|
|---|
| 1961 | /** @todo OPT: this next call should be inlined! */
|
|---|
| 1962 | if (CPUMIsGuestPhysIntrEnabled(pVCpu))
|
|---|
| 1963 | {
|
|---|
| 1964 | /** @todo OPT: type this out as it repeats tests. */
|
|---|
| 1965 | if ( !CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest)
|
|---|
| 1966 | || CPUMIsGuestVirtIntrEnabled(pVCpu))
|
|---|
| 1967 | return CPUMINTERRUPTIBILITY_UNRESTRAINED;
|
|---|
| 1968 |
|
|---|
| 1969 | /* Physical interrupts are enabled, but nested-guest virtual interrupts are disabled. */
|
|---|
| 1970 | return CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED;
|
|---|
| 1971 | }
|
|---|
| 1972 | return CPUMINTERRUPTIBILITY_INT_DISABLED;
|
|---|
| 1973 | }
|
|---|
| 1974 |
|
|---|
| 1975 | /*
|
|---|
| 1976 | * Blocking the delivery of NMIs during an interrupt shadow is CPU implementation
|
|---|
| 1977 | * specific. Therefore, in practice, we can't deliver an NMI in an interrupt shadow.
|
|---|
| 1978 | * However, there is some uncertainity regarding the converse, i.e. whether
|
|---|
| 1979 | * NMI-blocking until IRET blocks delivery of physical interrupts.
|
|---|
| 1980 | *
|
|---|
| 1981 | * See Intel spec. 25.4.1 "Event Blocking".
|
|---|
| 1982 | */
|
|---|
| 1983 | /** @todo r=bird: The above comment mixes up VMX root-mode and non-root. Section
|
|---|
| 1984 | * 25.4.1 is only applicable to VMX non-root mode. In root mode /
|
|---|
| 1985 | * non-VMX mode, I have not see any evidence in the intel manuals that
|
|---|
| 1986 | * NMIs are not blocked when in an interrupt shadow. Section "6.7
|
|---|
| 1987 | * NONMASKABLE INTERRUPT (NMI)" in SDM 3A seems pretty clear to me.
|
|---|
| 1988 | */
|
|---|
| 1989 | if (!(pVCpu->cpum.s.Guest.eflags.uBoth & CPUMCTX_INHIBIT_NMI))
|
|---|
| 1990 | return CPUMINTERRUPTIBILITY_INT_INHIBITED;
|
|---|
| 1991 | return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
|
|---|
| 1992 | }
|
|---|
| 1993 | return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
|
|---|
| 1994 | #else
|
|---|
| 1995 | if (pVCpu->cpum.s.Guest.rflags.Bits.u1IF)
|
|---|
| 1996 | {
|
|---|
| 1997 | if (pVCpu->cpum.s.Guest.hwvirt.fGif)
|
|---|
| 1998 | {
|
|---|
| 1999 | if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
|
|---|
| 2000 | return CPUMINTERRUPTIBILITY_UNRESTRAINED;
|
|---|
| 2001 |
|
|---|
| 2002 | /** @todo does blocking NMIs mean interrupts are also inhibited? */
|
|---|
| 2003 | if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
|
|---|
| 2004 | {
|
|---|
| 2005 | if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
|
|---|
| 2006 | return CPUMINTERRUPTIBILITY_INT_INHIBITED;
|
|---|
| 2007 | return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
|
|---|
| 2008 | }
|
|---|
| 2009 | AssertFailed();
|
|---|
| 2010 | return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
|
|---|
| 2011 | }
|
|---|
| 2012 | return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
|
|---|
| 2013 | }
|
|---|
| 2014 | else
|
|---|
| 2015 | {
|
|---|
| 2016 | if (pVCpu->cpum.s.Guest.hwvirt.fGif)
|
|---|
| 2017 | {
|
|---|
| 2018 | if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
|
|---|
| 2019 | return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
|
|---|
| 2020 | return CPUMINTERRUPTIBILITY_INT_DISABLED;
|
|---|
| 2021 | }
|
|---|
| 2022 | return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
|
|---|
| 2023 | }
|
|---|
| 2024 | #endif
|
|---|
| 2025 | }
|
|---|
| 2026 |
|
|---|
| 2027 |
|
|---|
| 2028 | /**
|
|---|
| 2029 | * Checks whether the SVM nested-guest has physical interrupts enabled.
|
|---|
| 2030 | *
|
|---|
| 2031 | * @returns true if interrupts are enabled, false otherwise.
|
|---|
| 2032 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
|---|
| 2033 | * @param pCtx The guest-CPU context.
|
|---|
| 2034 | *
|
|---|
| 2035 | * @remarks This does -not- take into account the global-interrupt flag.
|
|---|
| 2036 | */
|
|---|
| 2037 | VMM_INT_DECL(bool) CPUMIsGuestSvmPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
|
|---|
| 2038 | {
|
|---|
| 2039 | /** @todo Optimization: Avoid this function call and use a pointer to the
|
|---|
| 2040 | * relevant eflags instead (setup during VMRUN instruction emulation). */
|
|---|
| 2041 | Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
|
|---|
| 2042 |
|
|---|
| 2043 | X86EFLAGS fEFlags;
|
|---|
| 2044 | if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
|
|---|
| 2045 | fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
|
|---|
| 2046 | else
|
|---|
| 2047 | fEFlags.u = pCtx->eflags.u;
|
|---|
| 2048 |
|
|---|
| 2049 | return fEFlags.Bits.u1IF;
|
|---|
| 2050 | }
|
|---|
| 2051 |
|
|---|
| 2052 |
|
|---|
| 2053 | /**
|
|---|
| 2054 | * Checks whether the SVM nested-guest is in a state to receive virtual (setup
|
|---|
| 2055 | * for injection by VMRUN instruction) interrupts.
|
|---|
| 2056 | *
|
|---|
| 2057 | * @returns VBox status code.
|
|---|
| 2058 | * @retval true if it's ready, false otherwise.
|
|---|
| 2059 | *
|
|---|
| 2060 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
|---|
| 2061 | * @param pCtx The guest-CPU context.
|
|---|
| 2062 | */
|
|---|
| 2063 | VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
|
|---|
| 2064 | {
|
|---|
| 2065 | RT_NOREF(pVCpu);
|
|---|
| 2066 | Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
|
|---|
| 2067 |
|
|---|
| 2068 | PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.Vmcb.ctrl;
|
|---|
| 2069 | PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
|
|---|
| 2070 | Assert(!pVmcbIntCtrl->n.u1VGifEnable); /* We don't support passing virtual-GIF feature to the guest yet. */
|
|---|
| 2071 | if ( !pVmcbIntCtrl->n.u1IgnoreTPR
|
|---|
| 2072 | && pVmcbIntCtrl->n.u4VIntrPrio <= pVmcbIntCtrl->n.u8VTPR)
|
|---|
| 2073 | return false;
|
|---|
| 2074 |
|
|---|
| 2075 | return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
|
|---|
| 2076 | }
|
|---|
| 2077 |
|
|---|
| 2078 |
|
|---|
| 2079 | /**
|
|---|
| 2080 | * Gets the pending SVM nested-guest interruptvector.
|
|---|
| 2081 | *
|
|---|
| 2082 | * @returns The nested-guest interrupt to inject.
|
|---|
| 2083 | * @param pCtx The guest-CPU context.
|
|---|
| 2084 | */
|
|---|
| 2085 | VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx)
|
|---|
| 2086 | {
|
|---|
| 2087 | return pCtx->hwvirt.svm.Vmcb.ctrl.IntCtrl.n.u8VIntrVector;
|
|---|
| 2088 | }
|
|---|
| 2089 |
|
|---|
| 2090 |
|
|---|
| 2091 | /**
|
|---|
| 2092 | * Restores the host-state from the host-state save area as part of a \#VMEXIT.
|
|---|
| 2093 | *
|
|---|
| 2094 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
|---|
| 2095 | * @param pCtx The guest-CPU context.
|
|---|
| 2096 | */
|
|---|
| 2097 | VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPUCC pVCpu, PCPUMCTX pCtx)
|
|---|
| 2098 | {
|
|---|
| 2099 | /*
|
|---|
| 2100 | * Reload the guest's "host state".
|
|---|
| 2101 | */
|
|---|
| 2102 | PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
|
|---|
| 2103 | pCtx->es = pHostState->es;
|
|---|
| 2104 | pCtx->cs = pHostState->cs;
|
|---|
| 2105 | pCtx->ss = pHostState->ss;
|
|---|
| 2106 | pCtx->ds = pHostState->ds;
|
|---|
| 2107 | pCtx->gdtr = pHostState->gdtr;
|
|---|
| 2108 | pCtx->idtr = pHostState->idtr;
|
|---|
| 2109 | CPUMSetGuestEferMsrNoChecks(pVCpu, pCtx->msrEFER, pHostState->uEferMsr);
|
|---|
| 2110 | CPUMSetGuestCR0(pVCpu, pHostState->uCr0 | X86_CR0_PE);
|
|---|
| 2111 | pCtx->cr3 = pHostState->uCr3;
|
|---|
| 2112 | CPUMSetGuestCR4(pVCpu, pHostState->uCr4);
|
|---|
| 2113 | pCtx->rflags.u = pHostState->rflags.u;
|
|---|
| 2114 | pCtx->rflags.Bits.u1VM = 0;
|
|---|
| 2115 | pCtx->rip = pHostState->uRip;
|
|---|
| 2116 | pCtx->rsp = pHostState->uRsp;
|
|---|
| 2117 | pCtx->rax = pHostState->uRax;
|
|---|
| 2118 | pCtx->dr[7] &= ~(X86_DR7_ENABLED_MASK | X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
|
|---|
| 2119 | pCtx->dr[7] |= X86_DR7_RA1_MASK;
|
|---|
| 2120 | Assert(pCtx->ss.Attr.n.u2Dpl == 0);
|
|---|
| 2121 |
|
|---|
| 2122 | /** @todo if RIP is not canonical or outside the CS segment limit, we need to
|
|---|
| 2123 | * raise \#GP(0) in the guest. */
|
|---|
| 2124 |
|
|---|
| 2125 | /** @todo check the loaded host-state for consistency. Figure out what
|
|---|
| 2126 | * exactly this involves? */
|
|---|
| 2127 | }
|
|---|
| 2128 |
|
|---|
| 2129 |
|
|---|
| 2130 | /**
|
|---|
| 2131 | * Saves the host-state to the host-state save area as part of a VMRUN.
|
|---|
| 2132 | *
|
|---|
| 2133 | * @param pCtx The guest-CPU context.
|
|---|
| 2134 | * @param cbInstr The length of the VMRUN instruction in bytes.
|
|---|
| 2135 | */
|
|---|
| 2136 | VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr)
|
|---|
| 2137 | {
|
|---|
| 2138 | PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
|
|---|
| 2139 | pHostState->es = pCtx->es;
|
|---|
| 2140 | pHostState->cs = pCtx->cs;
|
|---|
| 2141 | pHostState->ss = pCtx->ss;
|
|---|
| 2142 | pHostState->ds = pCtx->ds;
|
|---|
| 2143 | pHostState->gdtr = pCtx->gdtr;
|
|---|
| 2144 | pHostState->idtr = pCtx->idtr;
|
|---|
| 2145 | pHostState->uEferMsr = pCtx->msrEFER;
|
|---|
| 2146 | pHostState->uCr0 = pCtx->cr0;
|
|---|
| 2147 | pHostState->uCr3 = pCtx->cr3;
|
|---|
| 2148 | pHostState->uCr4 = pCtx->cr4;
|
|---|
| 2149 | pHostState->rflags.u = pCtx->rflags.u;
|
|---|
| 2150 | pHostState->uRip = pCtx->rip + cbInstr;
|
|---|
| 2151 | pHostState->uRsp = pCtx->rsp;
|
|---|
| 2152 | pHostState->uRax = pCtx->rax;
|
|---|
| 2153 | }
|
|---|
| 2154 |
|
|---|
| 2155 |
|
|---|
| 2156 | /**
|
|---|
| 2157 | * Applies the TSC offset of a nested-guest if any and returns the TSC value for the
|
|---|
| 2158 | * nested-guest.
|
|---|
| 2159 | *
|
|---|
| 2160 | * @returns The TSC offset after applying any nested-guest TSC offset.
|
|---|
| 2161 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
|---|
| 2162 | * @param uTscValue The guest TSC.
|
|---|
| 2163 | *
|
|---|
| 2164 | * @sa CPUMRemoveNestedGuestTscOffset.
|
|---|
| 2165 | */
|
|---|
| 2166 | VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
|
|---|
| 2167 | {
|
|---|
| 2168 | PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
|
|---|
| 2169 | if (CPUMIsGuestInVmxNonRootMode(pCtx))
|
|---|
| 2170 | {
|
|---|
| 2171 | if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
|
|---|
| 2172 | return uTscValue + pCtx->hwvirt.vmx.Vmcs.u64TscOffset.u;
|
|---|
| 2173 | return uTscValue;
|
|---|
| 2174 | }
|
|---|
| 2175 |
|
|---|
| 2176 | if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
|
|---|
| 2177 | {
|
|---|
| 2178 | uint64_t offTsc;
|
|---|
| 2179 | if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
|
|---|
| 2180 | offTsc = pCtx->hwvirt.svm.Vmcb.ctrl.u64TSCOffset;
|
|---|
| 2181 | return uTscValue + offTsc;
|
|---|
| 2182 | }
|
|---|
| 2183 | return uTscValue;
|
|---|
| 2184 | }
|
|---|
| 2185 |
|
|---|
| 2186 |
|
|---|
| 2187 | /**
|
|---|
| 2188 | * Removes the TSC offset of a nested-guest if any and returns the TSC value for the
|
|---|
| 2189 | * guest.
|
|---|
| 2190 | *
|
|---|
| 2191 | * @returns The TSC offset after removing any nested-guest TSC offset.
|
|---|
| 2192 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
|---|
| 2193 | * @param uTscValue The nested-guest TSC.
|
|---|
| 2194 | *
|
|---|
| 2195 | * @sa CPUMApplyNestedGuestTscOffset.
|
|---|
| 2196 | */
|
|---|
| 2197 | VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
|
|---|
| 2198 | {
|
|---|
| 2199 | PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
|
|---|
| 2200 | if (CPUMIsGuestInVmxNonRootMode(pCtx))
|
|---|
| 2201 | {
|
|---|
| 2202 | if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
|
|---|
| 2203 | return uTscValue - pCtx->hwvirt.vmx.Vmcs.u64TscOffset.u;
|
|---|
| 2204 | return uTscValue;
|
|---|
| 2205 | }
|
|---|
| 2206 |
|
|---|
| 2207 | if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
|
|---|
| 2208 | {
|
|---|
| 2209 | uint64_t offTsc;
|
|---|
| 2210 | if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
|
|---|
| 2211 | offTsc = pCtx->hwvirt.svm.Vmcb.ctrl.u64TSCOffset;
|
|---|
| 2212 | return uTscValue - offTsc;
|
|---|
| 2213 | }
|
|---|
| 2214 | return uTscValue;
|
|---|
| 2215 | }
|
|---|
| 2216 |
|
|---|
| 2217 |
|
|---|
| 2218 | /**
|
|---|
| 2219 | * Used to dynamically imports state residing in NEM or HM.
|
|---|
| 2220 | *
|
|---|
| 2221 | * This is a worker for the CPUM_IMPORT_EXTRN_RET() macro and various IEM ones.
|
|---|
| 2222 | *
|
|---|
| 2223 | * @returns VBox status code.
|
|---|
| 2224 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
|---|
| 2225 | * @param fExtrnImport The fields to import.
|
|---|
| 2226 | * @thread EMT(pVCpu)
|
|---|
| 2227 | */
|
|---|
| 2228 | VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPUCC pVCpu, uint64_t fExtrnImport)
|
|---|
| 2229 | {
|
|---|
| 2230 | VMCPU_ASSERT_EMT(pVCpu);
|
|---|
| 2231 | if (pVCpu->cpum.s.Guest.fExtrn & fExtrnImport)
|
|---|
| 2232 | {
|
|---|
| 2233 | switch (pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_KEEPER_MASK)
|
|---|
| 2234 | {
|
|---|
| 2235 | case CPUMCTX_EXTRN_KEEPER_NEM:
|
|---|
| 2236 | {
|
|---|
| 2237 | int rc = NEMImportStateOnDemand(pVCpu, fExtrnImport);
|
|---|
| 2238 | Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
|
|---|
| 2239 | return rc;
|
|---|
| 2240 | }
|
|---|
| 2241 |
|
|---|
| 2242 | case CPUMCTX_EXTRN_KEEPER_HM:
|
|---|
| 2243 | {
|
|---|
| 2244 | #ifdef IN_RING0
|
|---|
| 2245 | int rc = HMR0ImportStateOnDemand(pVCpu, fExtrnImport);
|
|---|
| 2246 | Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
|
|---|
| 2247 | return rc;
|
|---|
| 2248 | #else
|
|---|
| 2249 | AssertLogRelMsgFailed(("TODO Fetch HM state: %#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport));
|
|---|
| 2250 | return VINF_SUCCESS;
|
|---|
| 2251 | #endif
|
|---|
| 2252 | }
|
|---|
| 2253 | default:
|
|---|
| 2254 | AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
|
|---|
| 2255 | }
|
|---|
| 2256 | }
|
|---|
| 2257 | return VINF_SUCCESS;
|
|---|
| 2258 | }
|
|---|
| 2259 |
|
|---|
| 2260 |
|
|---|
| 2261 | /**
|
|---|
| 2262 | * Gets valid CR4 bits for the guest.
|
|---|
| 2263 | *
|
|---|
| 2264 | * @returns Valid CR4 bits.
|
|---|
| 2265 | * @param pVM The cross context VM structure.
|
|---|
| 2266 | */
|
|---|
| 2267 | VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM)
|
|---|
| 2268 | {
|
|---|
| 2269 | PCCPUMFEATURES pGuestFeatures = &pVM->cpum.s.GuestFeatures;
|
|---|
| 2270 | uint64_t fMask = X86_CR4_VME | X86_CR4_PVI
|
|---|
| 2271 | | X86_CR4_TSD | X86_CR4_DE
|
|---|
| 2272 | | X86_CR4_MCE | X86_CR4_PCE;
|
|---|
| 2273 | if (pGuestFeatures->fPae)
|
|---|
| 2274 | fMask |= X86_CR4_PAE;
|
|---|
| 2275 | if (pGuestFeatures->fPge)
|
|---|
| 2276 | fMask |= X86_CR4_PGE;
|
|---|
| 2277 | if (pGuestFeatures->fPse)
|
|---|
| 2278 | fMask |= X86_CR4_PSE;
|
|---|
| 2279 | if (pGuestFeatures->fFxSaveRstor)
|
|---|
| 2280 | fMask |= X86_CR4_OSFXSR;
|
|---|
| 2281 | if (pGuestFeatures->fVmx)
|
|---|
| 2282 | fMask |= X86_CR4_VMXE;
|
|---|
| 2283 | if (pGuestFeatures->fXSaveRstor)
|
|---|
| 2284 | fMask |= X86_CR4_OSXSAVE;
|
|---|
| 2285 | if (pGuestFeatures->fPcid)
|
|---|
| 2286 | fMask |= X86_CR4_PCIDE;
|
|---|
| 2287 | if (pGuestFeatures->fFsGsBase)
|
|---|
| 2288 | fMask |= X86_CR4_FSGSBASE;
|
|---|
| 2289 | if (pGuestFeatures->fSse)
|
|---|
| 2290 | fMask |= X86_CR4_OSXMMEEXCPT;
|
|---|
| 2291 | return fMask;
|
|---|
| 2292 | }
|
|---|
| 2293 |
|
|---|
| 2294 |
|
|---|
| 2295 | /**
|
|---|
| 2296 | * Sets the PAE PDPEs for the guest.
|
|---|
| 2297 | *
|
|---|
| 2298 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
|---|
| 2299 | * @param paPaePdpes The PAE PDPEs to set.
|
|---|
| 2300 | */
|
|---|
| 2301 | VMM_INT_DECL(void) CPUMSetGuestPaePdpes(PVMCPU pVCpu, PCX86PDPE paPaePdpes)
|
|---|
| 2302 | {
|
|---|
| 2303 | Assert(paPaePdpes);
|
|---|
| 2304 | for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.s.Guest.aPaePdpes); i++)
|
|---|
| 2305 | pVCpu->cpum.s.Guest.aPaePdpes[i].u = paPaePdpes[i].u;
|
|---|
| 2306 | pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
|
|---|
| 2307 | }
|
|---|
| 2308 |
|
|---|
| 2309 |
|
|---|
| 2310 | /**
|
|---|
| 2311 | * Gets the PAE PDPTEs for the guest.
|
|---|
| 2312 | *
|
|---|
| 2313 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
|---|
| 2314 | * @param paPaePdpes Where to store the PAE PDPEs.
|
|---|
| 2315 | */
|
|---|
| 2316 | VMM_INT_DECL(void) CPUMGetGuestPaePdpes(PVMCPU pVCpu, PX86PDPE paPaePdpes)
|
|---|
| 2317 | {
|
|---|
| 2318 | Assert(paPaePdpes);
|
|---|
| 2319 | CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
|
|---|
| 2320 | for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.s.Guest.aPaePdpes); i++)
|
|---|
| 2321 | paPaePdpes[i].u = pVCpu->cpum.s.Guest.aPaePdpes[i].u;
|
|---|
| 2322 | }
|
|---|
| 2323 |
|
|---|
| 2324 |
|
|---|
| 2325 | /**
|
|---|
| 2326 | * Starts a VMX-preemption timer to expire as specified by the nested hypervisor.
|
|---|
| 2327 | *
|
|---|
| 2328 | * @returns VBox status code.
|
|---|
| 2329 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
|---|
| 2330 | * @param uTimer The VMCS preemption timer value.
|
|---|
| 2331 | * @param cShift The VMX-preemption timer shift (usually based on guest
|
|---|
| 2332 | * VMX MSR rate).
|
|---|
| 2333 | * @param pu64EntryTick Where to store the current tick when the timer is
|
|---|
| 2334 | * programmed.
|
|---|
| 2335 | * @thread EMT(pVCpu)
|
|---|
| 2336 | */
|
|---|
| 2337 | VMM_INT_DECL(int) CPUMStartGuestVmxPremptTimer(PVMCPUCC pVCpu, uint32_t uTimer, uint8_t cShift, uint64_t *pu64EntryTick)
|
|---|
| 2338 | {
|
|---|
| 2339 | Assert(uTimer);
|
|---|
| 2340 | Assert(cShift <= 31);
|
|---|
| 2341 | Assert(pu64EntryTick);
|
|---|
| 2342 | VMCPU_ASSERT_EMT(pVCpu);
|
|---|
| 2343 | uint64_t const cTicksToNext = uTimer << cShift;
|
|---|
| 2344 | return TMTimerSetRelative(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.s.hNestedVmxPreemptTimer, cTicksToNext, pu64EntryTick);
|
|---|
| 2345 | }
|
|---|
| 2346 |
|
|---|
| 2347 |
|
|---|
| 2348 | /**
|
|---|
| 2349 | * Stops the VMX-preemption timer from firing.
|
|---|
| 2350 | *
|
|---|
| 2351 | * @returns VBox status code.
|
|---|
| 2352 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
|---|
| 2353 | * @thread EMT.
|
|---|
| 2354 | *
|
|---|
| 2355 | * @remarks This can be called during VM reset, so we cannot assume it will be on
|
|---|
| 2356 | * the EMT corresponding to @c pVCpu.
|
|---|
| 2357 | */
|
|---|
| 2358 | VMM_INT_DECL(int) CPUMStopGuestVmxPremptTimer(PVMCPUCC pVCpu)
|
|---|
| 2359 | {
|
|---|
| 2360 | /*
|
|---|
| 2361 | * CPUM gets initialized before TM, so we defer creation of timers till CPUMR3InitCompleted().
|
|---|
| 2362 | * However, we still get called during CPUMR3Init() and hence we need to check if we have
|
|---|
| 2363 | * a valid timer object before trying to stop it.
|
|---|
| 2364 | */
|
|---|
| 2365 | int rc;
|
|---|
| 2366 | TMTIMERHANDLE hTimer = pVCpu->cpum.s.hNestedVmxPreemptTimer;
|
|---|
| 2367 | if (hTimer != NIL_TMTIMERHANDLE)
|
|---|
| 2368 | {
|
|---|
| 2369 | PVMCC pVM = pVCpu->CTX_SUFF(pVM);
|
|---|
| 2370 | rc = TMTimerLock(pVM, hTimer, VERR_IGNORED);
|
|---|
| 2371 | if (rc == VINF_SUCCESS)
|
|---|
| 2372 | {
|
|---|
| 2373 | if (TMTimerIsActive(pVM, hTimer))
|
|---|
| 2374 | TMTimerStop(pVM, hTimer);
|
|---|
| 2375 | TMTimerUnlock(pVM, hTimer);
|
|---|
| 2376 | }
|
|---|
| 2377 | }
|
|---|
| 2378 | else
|
|---|
| 2379 | rc = VERR_NOT_FOUND;
|
|---|
| 2380 | return rc;
|
|---|
| 2381 | }
|
|---|
| 2382 |
|
|---|
| 2383 |
|
|---|
| 2384 | /**
|
|---|
| 2385 | * Gets the read and write permission bits for an MSR in an MSR bitmap.
|
|---|
| 2386 | *
|
|---|
| 2387 | * @returns VMXMSRPM_XXX - the MSR permission.
|
|---|
| 2388 | * @param pvMsrBitmap Pointer to the MSR bitmap.
|
|---|
| 2389 | * @param idMsr The MSR to get permissions for.
|
|---|
| 2390 | *
|
|---|
| 2391 | * @sa hmR0VmxSetMsrPermission.
|
|---|
| 2392 | */
|
|---|
| 2393 | VMM_INT_DECL(uint32_t) CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr)
|
|---|
| 2394 | {
|
|---|
| 2395 | AssertPtrReturn(pvMsrBitmap, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
|
|---|
| 2396 |
|
|---|
| 2397 | uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap;
|
|---|
| 2398 |
|
|---|
| 2399 | /*
|
|---|
| 2400 | * MSR Layout:
|
|---|
| 2401 | * Byte index MSR range Interpreted as
|
|---|
| 2402 | * 0x000 - 0x3ff 0x00000000 - 0x00001fff Low MSR read bits.
|
|---|
| 2403 | * 0x400 - 0x7ff 0xc0000000 - 0xc0001fff High MSR read bits.
|
|---|
| 2404 | * 0x800 - 0xbff 0x00000000 - 0x00001fff Low MSR write bits.
|
|---|
| 2405 | * 0xc00 - 0xfff 0xc0000000 - 0xc0001fff High MSR write bits.
|
|---|
| 2406 | *
|
|---|
| 2407 | * A bit corresponding to an MSR within the above range causes a VM-exit
|
|---|
| 2408 | * if the bit is 1 on executions of RDMSR/WRMSR. If an MSR falls out of
|
|---|
| 2409 | * the MSR range, it always cause a VM-exit.
|
|---|
| 2410 | *
|
|---|
| 2411 | * See Intel spec. 24.6.9 "MSR-Bitmap Address".
|
|---|
| 2412 | */
|
|---|
| 2413 | uint32_t const offBitmapRead = 0;
|
|---|
| 2414 | uint32_t const offBitmapWrite = 0x800;
|
|---|
| 2415 | uint32_t offMsr;
|
|---|
| 2416 | uint32_t iBit;
|
|---|
| 2417 | if (idMsr <= UINT32_C(0x00001fff))
|
|---|
| 2418 | {
|
|---|
| 2419 | offMsr = 0;
|
|---|
| 2420 | iBit = idMsr;
|
|---|
| 2421 | }
|
|---|
| 2422 | else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
|
|---|
| 2423 | {
|
|---|
| 2424 | offMsr = 0x400;
|
|---|
| 2425 | iBit = idMsr - UINT32_C(0xc0000000);
|
|---|
| 2426 | }
|
|---|
| 2427 | else
|
|---|
| 2428 | {
|
|---|
| 2429 | LogFunc(("Warning! Out of range MSR %#RX32\n", idMsr));
|
|---|
| 2430 | return VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR;
|
|---|
| 2431 | }
|
|---|
| 2432 |
|
|---|
| 2433 | /*
|
|---|
| 2434 | * Get the MSR read permissions.
|
|---|
| 2435 | */
|
|---|
| 2436 | uint32_t fRet;
|
|---|
| 2437 | uint32_t const offMsrRead = offBitmapRead + offMsr;
|
|---|
| 2438 | Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
|
|---|
| 2439 | if (ASMBitTest(pbMsrBitmap, (offMsrRead << 3) + iBit))
|
|---|
| 2440 | fRet = VMXMSRPM_EXIT_RD;
|
|---|
| 2441 | else
|
|---|
| 2442 | fRet = VMXMSRPM_ALLOW_RD;
|
|---|
| 2443 |
|
|---|
| 2444 | /*
|
|---|
| 2445 | * Get the MSR write permissions.
|
|---|
| 2446 | */
|
|---|
| 2447 | uint32_t const offMsrWrite = offBitmapWrite + offMsr;
|
|---|
| 2448 | Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
|
|---|
| 2449 | if (ASMBitTest(pbMsrBitmap, (offMsrWrite << 3) + iBit))
|
|---|
| 2450 | fRet |= VMXMSRPM_EXIT_WR;
|
|---|
| 2451 | else
|
|---|
| 2452 | fRet |= VMXMSRPM_ALLOW_WR;
|
|---|
| 2453 |
|
|---|
| 2454 | Assert(VMXMSRPM_IS_FLAG_VALID(fRet));
|
|---|
| 2455 | return fRet;
|
|---|
| 2456 | }
|
|---|
| 2457 |
|
|---|
| 2458 |
|
|---|
| 2459 | /**
|
|---|
| 2460 | * Checks the permission bits for the specified I/O port from the given I/O bitmap
|
|---|
| 2461 | * to see if causes a VM-exit.
|
|---|
| 2462 | *
|
|---|
| 2463 | * @returns @c true if the I/O port access must cause a VM-exit, @c false otherwise.
|
|---|
| 2464 | * @param pbIoBitmap Pointer to I/O bitmap.
|
|---|
| 2465 | * @param uPort The I/O port being accessed.
|
|---|
| 2466 | * @param cbAccess e size of the I/O access in bytes (1, 2 or 4 bytes).
|
|---|
| 2467 | */
|
|---|
| 2468 | static bool cpumGetVmxIoBitmapPermission(uint8_t const *pbIoBitmap, uint16_t uPort, uint8_t cbAccess)
|
|---|
| 2469 | {
|
|---|
| 2470 | Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
|
|---|
| 2471 |
|
|---|
| 2472 | /*
|
|---|
| 2473 | * If the I/O port access wraps around the 16-bit port I/O space, we must cause a
|
|---|
| 2474 | * VM-exit.
|
|---|
| 2475 | *
|
|---|
| 2476 | * Reading 1, 2, 4 bytes at ports 0xffff, 0xfffe and 0xfffc are valid and do not
|
|---|
| 2477 | * constitute a wrap around. However, reading 2 bytes at port 0xffff or 4 bytes
|
|---|
| 2478 | * from port 0xffff/0xfffe/0xfffd constitute a wrap around. In other words, any
|
|---|
| 2479 | * access to -both- ports 0xffff and port 0 is a wrap around.
|
|---|
| 2480 | *
|
|---|
| 2481 | * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
|
|---|
| 2482 | */
|
|---|
| 2483 | uint32_t const uPortLast = uPort + cbAccess;
|
|---|
| 2484 | if (uPortLast > 0x10000)
|
|---|
| 2485 | return true;
|
|---|
| 2486 |
|
|---|
| 2487 | /*
|
|---|
| 2488 | * If any bit corresponding to the I/O access is set, we must cause a VM-exit.
|
|---|
| 2489 | */
|
|---|
| 2490 | uint16_t const offPerm = uPort >> 3; /* Byte offset of the port. */
|
|---|
| 2491 | uint16_t const idxPermBit = uPort - (offPerm << 3); /* Bit offset within byte. */
|
|---|
| 2492 | Assert(idxPermBit < 8);
|
|---|
| 2493 | static const uint8_t s_afMask[] = { 0x0, 0x1, 0x3, 0x7, 0xf }; /* Bit-mask for all access sizes. */
|
|---|
| 2494 | uint16_t const fMask = s_afMask[cbAccess] << idxPermBit; /* Bit-mask of the access. */
|
|---|
| 2495 |
|
|---|
| 2496 | /* Fetch 8 or 16-bits depending on whether the access spans 8-bit boundary. */
|
|---|
| 2497 | RTUINT16U uPerm;
|
|---|
| 2498 | uPerm.s.Lo = pbIoBitmap[offPerm];
|
|---|
| 2499 | if (idxPermBit + cbAccess > 8)
|
|---|
| 2500 | uPerm.s.Hi = pbIoBitmap[offPerm + 1];
|
|---|
| 2501 | else
|
|---|
| 2502 | uPerm.s.Hi = 0;
|
|---|
| 2503 |
|
|---|
| 2504 | /* If any bit for the access is 1, we must cause a VM-exit. */
|
|---|
| 2505 | if (uPerm.u & fMask)
|
|---|
| 2506 | return true;
|
|---|
| 2507 |
|
|---|
| 2508 | return false;
|
|---|
| 2509 | }
|
|---|
| 2510 |
|
|---|
| 2511 |
|
|---|
| 2512 | /**
|
|---|
| 2513 | * Returns whether the given VMCS field is valid and supported for the guest.
|
|---|
| 2514 | *
|
|---|
| 2515 | * @param pVM The cross context VM structure.
|
|---|
| 2516 | * @param u64VmcsField The VMCS field.
|
|---|
| 2517 | *
|
|---|
| 2518 | * @remarks This takes into account the CPU features exposed to the guest.
|
|---|
| 2519 | */
|
|---|
| 2520 | VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVMCC pVM, uint64_t u64VmcsField)
|
|---|
| 2521 | {
|
|---|
| 2522 | uint32_t const uFieldEncHi = RT_HI_U32(u64VmcsField);
|
|---|
| 2523 | uint32_t const uFieldEncLo = RT_LO_U32(u64VmcsField);
|
|---|
| 2524 | if (!uFieldEncHi)
|
|---|
| 2525 | { /* likely */ }
|
|---|
| 2526 | else
|
|---|
| 2527 | return false;
|
|---|
| 2528 |
|
|---|
| 2529 | PCCPUMFEATURES pFeat = &pVM->cpum.s.GuestFeatures;
|
|---|
| 2530 | switch (uFieldEncLo)
|
|---|
| 2531 | {
|
|---|
| 2532 | /*
|
|---|
| 2533 | * 16-bit fields.
|
|---|
| 2534 | */
|
|---|
| 2535 | /* Control fields. */
|
|---|
| 2536 | case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
|
|---|
| 2537 | case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
|
|---|
| 2538 | case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
|
|---|
| 2539 |
|
|---|
| 2540 | /* Guest-state fields. */
|
|---|
| 2541 | case VMX_VMCS16_GUEST_ES_SEL:
|
|---|
| 2542 | case VMX_VMCS16_GUEST_CS_SEL:
|
|---|
| 2543 | case VMX_VMCS16_GUEST_SS_SEL:
|
|---|
| 2544 | case VMX_VMCS16_GUEST_DS_SEL:
|
|---|
| 2545 | case VMX_VMCS16_GUEST_FS_SEL:
|
|---|
| 2546 | case VMX_VMCS16_GUEST_GS_SEL:
|
|---|
| 2547 | case VMX_VMCS16_GUEST_LDTR_SEL:
|
|---|
| 2548 | case VMX_VMCS16_GUEST_TR_SEL: return true;
|
|---|
| 2549 | case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
|
|---|
| 2550 | case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
|
|---|
| 2551 |
|
|---|
| 2552 | /* Host-state fields. */
|
|---|
| 2553 | case VMX_VMCS16_HOST_ES_SEL:
|
|---|
| 2554 | case VMX_VMCS16_HOST_CS_SEL:
|
|---|
| 2555 | case VMX_VMCS16_HOST_SS_SEL:
|
|---|
| 2556 | case VMX_VMCS16_HOST_DS_SEL:
|
|---|
| 2557 | case VMX_VMCS16_HOST_FS_SEL:
|
|---|
| 2558 | case VMX_VMCS16_HOST_GS_SEL:
|
|---|
| 2559 | case VMX_VMCS16_HOST_TR_SEL: return true;
|
|---|
| 2560 |
|
|---|
| 2561 | /*
|
|---|
| 2562 | * 64-bit fields.
|
|---|
| 2563 | */
|
|---|
| 2564 | /* Control fields. */
|
|---|
| 2565 | case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
|
|---|
| 2566 | case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
|
|---|
| 2567 | case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
|
|---|
| 2568 | case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
|
|---|
| 2569 | case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
|
|---|
| 2570 | case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
|
|---|
| 2571 | case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
|
|---|
| 2572 | case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
|
|---|
| 2573 | case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
|
|---|
| 2574 | case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
|
|---|
| 2575 | case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
|
|---|
| 2576 | case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
|
|---|
| 2577 | case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
|
|---|
| 2578 | case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
|
|---|
| 2579 | case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
|
|---|
| 2580 | case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
|
|---|
| 2581 | case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
|
|---|
| 2582 | case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
|
|---|
| 2583 | case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
|
|---|
| 2584 | case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
|
|---|
| 2585 | case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
|
|---|
| 2586 | case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
|
|---|
| 2587 | case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
|
|---|
| 2588 | case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
|
|---|
| 2589 | case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
|
|---|
| 2590 | case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
|
|---|
| 2591 | case VMX_VMCS64_CTRL_EPTP_FULL:
|
|---|
| 2592 | case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
|
|---|
| 2593 | case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
|
|---|
| 2594 | case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
|
|---|
| 2595 | case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
|
|---|
| 2596 | case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
|
|---|
| 2597 | case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
|
|---|
| 2598 | case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
|
|---|
| 2599 | case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
|
|---|
| 2600 | case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
|
|---|
| 2601 | case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
|
|---|
| 2602 | case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
|
|---|
| 2603 | {
|
|---|
| 2604 | PCVMCPU pVCpu = pVM->CTX_SUFF(apCpus)[0];
|
|---|
| 2605 | uint64_t const uVmFuncMsr = pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64VmFunc;
|
|---|
| 2606 | return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
|
|---|
| 2607 | }
|
|---|
| 2608 | case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
|
|---|
| 2609 | case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
|
|---|
| 2610 | case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
|
|---|
| 2611 | case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
|
|---|
| 2612 | case VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL:
|
|---|
| 2613 | case VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
|
|---|
| 2614 | case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
|
|---|
| 2615 | case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
|
|---|
| 2616 | case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
|
|---|
| 2617 | case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
|
|---|
| 2618 | case VMX_VMCS64_CTRL_PROC_EXEC3_FULL:
|
|---|
| 2619 | case VMX_VMCS64_CTRL_PROC_EXEC3_HIGH: return pFeat->fVmxTertiaryExecCtls;
|
|---|
| 2620 |
|
|---|
| 2621 | /* Read-only data fields. */
|
|---|
| 2622 | case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
|
|---|
| 2623 | case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
|
|---|
| 2624 |
|
|---|
| 2625 | /* Guest-state fields. */
|
|---|
| 2626 | case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
|
|---|
| 2627 | case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
|
|---|
| 2628 | case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
|
|---|
| 2629 | case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
|
|---|
| 2630 | case VMX_VMCS64_GUEST_PAT_FULL:
|
|---|
| 2631 | case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
|
|---|
| 2632 | case VMX_VMCS64_GUEST_EFER_FULL:
|
|---|
| 2633 | case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
|
|---|
| 2634 | case VMX_VMCS64_GUEST_PDPTE0_FULL:
|
|---|
| 2635 | case VMX_VMCS64_GUEST_PDPTE0_HIGH:
|
|---|
| 2636 | case VMX_VMCS64_GUEST_PDPTE1_FULL:
|
|---|
| 2637 | case VMX_VMCS64_GUEST_PDPTE1_HIGH:
|
|---|
| 2638 | case VMX_VMCS64_GUEST_PDPTE2_FULL:
|
|---|
| 2639 | case VMX_VMCS64_GUEST_PDPTE2_HIGH:
|
|---|
| 2640 | case VMX_VMCS64_GUEST_PDPTE3_FULL:
|
|---|
| 2641 | case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
|
|---|
| 2642 |
|
|---|
| 2643 | /* Host-state fields. */
|
|---|
| 2644 | case VMX_VMCS64_HOST_PAT_FULL:
|
|---|
| 2645 | case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
|
|---|
| 2646 | case VMX_VMCS64_HOST_EFER_FULL:
|
|---|
| 2647 | case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
|
|---|
| 2648 |
|
|---|
| 2649 | /*
|
|---|
| 2650 | * 32-bit fields.
|
|---|
| 2651 | */
|
|---|
| 2652 | /* Control fields. */
|
|---|
| 2653 | case VMX_VMCS32_CTRL_PIN_EXEC:
|
|---|
| 2654 | case VMX_VMCS32_CTRL_PROC_EXEC:
|
|---|
| 2655 | case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
|
|---|
| 2656 | case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
|
|---|
| 2657 | case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
|
|---|
| 2658 | case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
|
|---|
| 2659 | case VMX_VMCS32_CTRL_EXIT:
|
|---|
| 2660 | case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
|
|---|
| 2661 | case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
|
|---|
| 2662 | case VMX_VMCS32_CTRL_ENTRY:
|
|---|
| 2663 | case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
|
|---|
| 2664 | case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
|
|---|
| 2665 | case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
|
|---|
| 2666 | case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
|
|---|
| 2667 | case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
|
|---|
| 2668 | case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
|
|---|
| 2669 | case VMX_VMCS32_CTRL_PLE_GAP:
|
|---|
| 2670 | case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
|
|---|
| 2671 |
|
|---|
| 2672 | /* Read-only data fields. */
|
|---|
| 2673 | case VMX_VMCS32_RO_VM_INSTR_ERROR:
|
|---|
| 2674 | case VMX_VMCS32_RO_EXIT_REASON:
|
|---|
| 2675 | case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
|
|---|
| 2676 | case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
|
|---|
| 2677 | case VMX_VMCS32_RO_IDT_VECTORING_INFO:
|
|---|
| 2678 | case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
|
|---|
| 2679 | case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
|
|---|
| 2680 | case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
|
|---|
| 2681 |
|
|---|
| 2682 | /* Guest-state fields. */
|
|---|
| 2683 | case VMX_VMCS32_GUEST_ES_LIMIT:
|
|---|
| 2684 | case VMX_VMCS32_GUEST_CS_LIMIT:
|
|---|
| 2685 | case VMX_VMCS32_GUEST_SS_LIMIT:
|
|---|
| 2686 | case VMX_VMCS32_GUEST_DS_LIMIT:
|
|---|
| 2687 | case VMX_VMCS32_GUEST_FS_LIMIT:
|
|---|
| 2688 | case VMX_VMCS32_GUEST_GS_LIMIT:
|
|---|
| 2689 | case VMX_VMCS32_GUEST_LDTR_LIMIT:
|
|---|
| 2690 | case VMX_VMCS32_GUEST_TR_LIMIT:
|
|---|
| 2691 | case VMX_VMCS32_GUEST_GDTR_LIMIT:
|
|---|
| 2692 | case VMX_VMCS32_GUEST_IDTR_LIMIT:
|
|---|
| 2693 | case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
|
|---|
| 2694 | case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
|
|---|
| 2695 | case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
|
|---|
| 2696 | case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
|
|---|
| 2697 | case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
|
|---|
| 2698 | case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
|
|---|
| 2699 | case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
|
|---|
| 2700 | case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
|
|---|
| 2701 | case VMX_VMCS32_GUEST_INT_STATE:
|
|---|
| 2702 | case VMX_VMCS32_GUEST_ACTIVITY_STATE:
|
|---|
| 2703 | case VMX_VMCS32_GUEST_SMBASE:
|
|---|
| 2704 | case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
|
|---|
| 2705 | case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
|
|---|
| 2706 |
|
|---|
| 2707 | /* Host-state fields. */
|
|---|
| 2708 | case VMX_VMCS32_HOST_SYSENTER_CS: return true;
|
|---|
| 2709 |
|
|---|
| 2710 | /*
|
|---|
| 2711 | * Natural-width fields.
|
|---|
| 2712 | */
|
|---|
| 2713 | /* Control fields. */
|
|---|
| 2714 | case VMX_VMCS_CTRL_CR0_MASK:
|
|---|
| 2715 | case VMX_VMCS_CTRL_CR4_MASK:
|
|---|
| 2716 | case VMX_VMCS_CTRL_CR0_READ_SHADOW:
|
|---|
| 2717 | case VMX_VMCS_CTRL_CR4_READ_SHADOW:
|
|---|
| 2718 | case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
|
|---|
| 2719 | case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
|
|---|
| 2720 | case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
|
|---|
| 2721 | case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
|
|---|
| 2722 |
|
|---|
| 2723 | /* Read-only data fields. */
|
|---|
| 2724 | case VMX_VMCS_RO_EXIT_QUALIFICATION:
|
|---|
| 2725 | case VMX_VMCS_RO_IO_RCX:
|
|---|
| 2726 | case VMX_VMCS_RO_IO_RSI:
|
|---|
| 2727 | case VMX_VMCS_RO_IO_RDI:
|
|---|
| 2728 | case VMX_VMCS_RO_IO_RIP:
|
|---|
| 2729 | case VMX_VMCS_RO_GUEST_LINEAR_ADDR: return true;
|
|---|
| 2730 |
|
|---|
| 2731 | /* Guest-state fields. */
|
|---|
| 2732 | case VMX_VMCS_GUEST_CR0:
|
|---|
| 2733 | case VMX_VMCS_GUEST_CR3:
|
|---|
| 2734 | case VMX_VMCS_GUEST_CR4:
|
|---|
| 2735 | case VMX_VMCS_GUEST_ES_BASE:
|
|---|
| 2736 | case VMX_VMCS_GUEST_CS_BASE:
|
|---|
| 2737 | case VMX_VMCS_GUEST_SS_BASE:
|
|---|
| 2738 | case VMX_VMCS_GUEST_DS_BASE:
|
|---|
| 2739 | case VMX_VMCS_GUEST_FS_BASE:
|
|---|
| 2740 | case VMX_VMCS_GUEST_GS_BASE:
|
|---|
| 2741 | case VMX_VMCS_GUEST_LDTR_BASE:
|
|---|
| 2742 | case VMX_VMCS_GUEST_TR_BASE:
|
|---|
| 2743 | case VMX_VMCS_GUEST_GDTR_BASE:
|
|---|
| 2744 | case VMX_VMCS_GUEST_IDTR_BASE:
|
|---|
| 2745 | case VMX_VMCS_GUEST_DR7:
|
|---|
| 2746 | case VMX_VMCS_GUEST_RSP:
|
|---|
| 2747 | case VMX_VMCS_GUEST_RIP:
|
|---|
| 2748 | case VMX_VMCS_GUEST_RFLAGS:
|
|---|
| 2749 | case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
|
|---|
| 2750 | case VMX_VMCS_GUEST_SYSENTER_ESP:
|
|---|
| 2751 | case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
|
|---|
| 2752 |
|
|---|
| 2753 | /* Host-state fields. */
|
|---|
| 2754 | case VMX_VMCS_HOST_CR0:
|
|---|
| 2755 | case VMX_VMCS_HOST_CR3:
|
|---|
| 2756 | case VMX_VMCS_HOST_CR4:
|
|---|
| 2757 | case VMX_VMCS_HOST_FS_BASE:
|
|---|
| 2758 | case VMX_VMCS_HOST_GS_BASE:
|
|---|
| 2759 | case VMX_VMCS_HOST_TR_BASE:
|
|---|
| 2760 | case VMX_VMCS_HOST_GDTR_BASE:
|
|---|
| 2761 | case VMX_VMCS_HOST_IDTR_BASE:
|
|---|
| 2762 | case VMX_VMCS_HOST_SYSENTER_ESP:
|
|---|
| 2763 | case VMX_VMCS_HOST_SYSENTER_EIP:
|
|---|
| 2764 | case VMX_VMCS_HOST_RSP:
|
|---|
| 2765 | case VMX_VMCS_HOST_RIP: return true;
|
|---|
| 2766 | }
|
|---|
| 2767 |
|
|---|
| 2768 | return false;
|
|---|
| 2769 | }
|
|---|
| 2770 |
|
|---|
| 2771 |
|
|---|
| 2772 | /**
|
|---|
| 2773 | * Checks whether the given I/O access should cause a nested-guest VM-exit.
|
|---|
| 2774 | *
|
|---|
| 2775 | * @returns @c true if it causes a VM-exit, @c false otherwise.
|
|---|
| 2776 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
|---|
| 2777 | * @param u16Port The I/O port being accessed.
|
|---|
| 2778 | * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
|
|---|
| 2779 | */
|
|---|
| 2780 | VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess)
|
|---|
| 2781 | {
|
|---|
| 2782 | PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
|
|---|
| 2783 | if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_UNCOND_IO_EXIT))
|
|---|
| 2784 | return true;
|
|---|
| 2785 |
|
|---|
| 2786 | if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_IO_BITMAPS))
|
|---|
| 2787 | return cpumGetVmxIoBitmapPermission(pCtx->hwvirt.vmx.abIoBitmap, u16Port, cbAccess);
|
|---|
| 2788 |
|
|---|
| 2789 | return false;
|
|---|
| 2790 | }
|
|---|
| 2791 |
|
|---|
| 2792 |
|
|---|
| 2793 | /**
|
|---|
| 2794 | * Checks whether the Mov-to-CR3 instruction causes a nested-guest VM-exit.
|
|---|
| 2795 | *
|
|---|
| 2796 | * @returns @c true if it causes a VM-exit, @c false otherwise.
|
|---|
| 2797 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
|---|
| 2798 | * @param uNewCr3 The CR3 value being written.
|
|---|
| 2799 | */
|
|---|
| 2800 | VMM_INT_DECL(bool) CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3)
|
|---|
| 2801 | {
|
|---|
| 2802 | /*
|
|---|
| 2803 | * If the CR3-load exiting control is set and the new CR3 value does not
|
|---|
| 2804 | * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
|
|---|
| 2805 | *
|
|---|
| 2806 | * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
|
|---|
| 2807 | */
|
|---|
| 2808 | PCCPUMCTX const pCtx = &pVCpu->cpum.s.Guest;
|
|---|
| 2809 | if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_CR3_LOAD_EXIT))
|
|---|
| 2810 | {
|
|---|
| 2811 | uint32_t const uCr3TargetCount = pCtx->hwvirt.vmx.Vmcs.u32Cr3TargetCount;
|
|---|
| 2812 | Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
|
|---|
| 2813 |
|
|---|
| 2814 | /* If the CR3-target count is 0, cause a VM-exit. */
|
|---|
| 2815 | if (uCr3TargetCount == 0)
|
|---|
| 2816 | return true;
|
|---|
| 2817 |
|
|---|
| 2818 | /* If the CR3 being written doesn't match any of the target values, cause a VM-exit. */
|
|---|
| 2819 | AssertCompile(VMX_V_CR3_TARGET_COUNT == 4);
|
|---|
| 2820 | if ( uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target0.u
|
|---|
| 2821 | && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target1.u
|
|---|
| 2822 | && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target2.u
|
|---|
| 2823 | && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target3.u)
|
|---|
| 2824 | return true;
|
|---|
| 2825 | }
|
|---|
| 2826 | return false;
|
|---|
| 2827 | }
|
|---|
| 2828 |
|
|---|
| 2829 |
|
|---|
| 2830 | /**
|
|---|
| 2831 | * Checks whether a VMREAD or VMWRITE instruction for the given VMCS field causes a
|
|---|
| 2832 | * VM-exit or not.
|
|---|
| 2833 | *
|
|---|
| 2834 | * @returns @c true if the VMREAD/VMWRITE is intercepted, @c false otherwise.
|
|---|
| 2835 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 2836 | * @param uExitReason The VM-exit reason (VMX_EXIT_VMREAD or
|
|---|
| 2837 | * VMX_EXIT_VMREAD).
|
|---|
| 2838 | * @param u64VmcsField The VMCS field.
|
|---|
| 2839 | */
|
|---|
| 2840 | VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64VmcsField)
|
|---|
| 2841 | {
|
|---|
| 2842 | Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest));
|
|---|
| 2843 | Assert( uExitReason == VMX_EXIT_VMREAD
|
|---|
| 2844 | || uExitReason == VMX_EXIT_VMWRITE);
|
|---|
| 2845 |
|
|---|
| 2846 | /*
|
|---|
| 2847 | * Without VMCS shadowing, all VMREAD and VMWRITE instructions are intercepted.
|
|---|
| 2848 | */
|
|---|
| 2849 | if (!CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.s.Guest, VMX_PROC_CTLS2_VMCS_SHADOWING))
|
|---|
| 2850 | return true;
|
|---|
| 2851 |
|
|---|
| 2852 | /*
|
|---|
| 2853 | * If any reserved bit in the 64-bit VMCS field encoding is set, the VMREAD/VMWRITE
|
|---|
| 2854 | * is intercepted. This excludes any reserved bits in the valid parts of the field
|
|---|
| 2855 | * encoding (i.e. bit 12).
|
|---|
| 2856 | */
|
|---|
| 2857 | if (u64VmcsField & VMX_VMCSFIELD_RSVD_MASK)
|
|---|
| 2858 | return true;
|
|---|
| 2859 |
|
|---|
| 2860 | /*
|
|---|
| 2861 | * Finally, consult the VMREAD/VMWRITE bitmap whether to intercept the instruction or not.
|
|---|
| 2862 | */
|
|---|
| 2863 | uint32_t const u32VmcsField = RT_LO_U32(u64VmcsField);
|
|---|
| 2864 | uint8_t const * const pbBitmap = uExitReason == VMX_EXIT_VMREAD
|
|---|
| 2865 | ? &pVCpu->cpum.s.Guest.hwvirt.vmx.abVmreadBitmap[0]
|
|---|
| 2866 | : &pVCpu->cpum.s.Guest.hwvirt.vmx.abVmwriteBitmap[0];
|
|---|
| 2867 | Assert(pbBitmap);
|
|---|
| 2868 | Assert(u32VmcsField >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
|
|---|
| 2869 | return ASMBitTest(pbBitmap, (u32VmcsField << 3) + (u32VmcsField & 7));
|
|---|
| 2870 | }
|
|---|
| 2871 |
|
|---|
| 2872 |
|
|---|
| 2873 |
|
|---|
| 2874 | /**
|
|---|
| 2875 | * Determines whether the given I/O access should cause a nested-guest \#VMEXIT.
|
|---|
| 2876 | *
|
|---|
| 2877 | * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
|
|---|
| 2878 | * @param u16Port The IO port being accessed.
|
|---|
| 2879 | * @param enmIoType The type of IO access.
|
|---|
| 2880 | * @param cbReg The IO operand size in bytes.
|
|---|
| 2881 | * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
|
|---|
| 2882 | * @param iEffSeg The effective segment number.
|
|---|
| 2883 | * @param fRep Whether this is a repeating IO instruction (REP prefix).
|
|---|
| 2884 | * @param fStrIo Whether this is a string IO instruction.
|
|---|
| 2885 | * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO struct to be filled.
|
|---|
| 2886 | * Optional, can be NULL.
|
|---|
| 2887 | */
|
|---|
| 2888 | VMM_INT_DECL(bool) CPUMIsSvmIoInterceptSet(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
|
|---|
| 2889 | uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
|
|---|
| 2890 | PSVMIOIOEXITINFO pIoExitInfo)
|
|---|
| 2891 | {
|
|---|
| 2892 | Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
|
|---|
| 2893 | Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
|
|---|
| 2894 |
|
|---|
| 2895 | /*
|
|---|
| 2896 | * The IOPM layout:
|
|---|
| 2897 | * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
|
|---|
| 2898 | * two 4K pages.
|
|---|
| 2899 | *
|
|---|
| 2900 | * For IO instructions that access more than a single byte, the permission bits
|
|---|
| 2901 | * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
|
|---|
| 2902 | *
|
|---|
| 2903 | * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),
|
|---|
| 2904 | * we need 3 extra bits beyond the second 4K page.
|
|---|
| 2905 | */
|
|---|
| 2906 | static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 };
|
|---|
| 2907 |
|
|---|
| 2908 | uint16_t const offIopm = u16Port >> 3;
|
|---|
| 2909 | uint16_t const fSizeMask = s_auSizeMasks[(cAddrSizeBits >> SVM_IOIO_OP_SIZE_SHIFT) & 7];
|
|---|
| 2910 | uint8_t const cShift = u16Port - (offIopm << 3);
|
|---|
| 2911 | uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);
|
|---|
| 2912 |
|
|---|
| 2913 | uint8_t const *pbIopm = (uint8_t *)pvIoBitmap;
|
|---|
| 2914 | Assert(pbIopm);
|
|---|
| 2915 | pbIopm += offIopm;
|
|---|
| 2916 | uint16_t const u16Iopm = *(uint16_t *)pbIopm;
|
|---|
| 2917 | if (u16Iopm & fIopmMask)
|
|---|
| 2918 | {
|
|---|
| 2919 | if (pIoExitInfo)
|
|---|
| 2920 | {
|
|---|
| 2921 | static const uint32_t s_auIoOpSize[] =
|
|---|
| 2922 | { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
|
|---|
| 2923 |
|
|---|
| 2924 | static const uint32_t s_auIoAddrSize[] =
|
|---|
| 2925 | { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
|
|---|
| 2926 |
|
|---|
| 2927 | pIoExitInfo->u = s_auIoOpSize[cbReg & 7];
|
|---|
| 2928 | pIoExitInfo->u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
|
|---|
| 2929 | pIoExitInfo->n.u1Str = fStrIo;
|
|---|
| 2930 | pIoExitInfo->n.u1Rep = fRep;
|
|---|
| 2931 | pIoExitInfo->n.u3Seg = iEffSeg & 7;
|
|---|
| 2932 | pIoExitInfo->n.u1Type = enmIoType;
|
|---|
| 2933 | pIoExitInfo->n.u16Port = u16Port;
|
|---|
| 2934 | }
|
|---|
| 2935 | return true;
|
|---|
| 2936 | }
|
|---|
| 2937 |
|
|---|
| 2938 | /** @todo remove later (for debugging as VirtualBox always traps all IO
|
|---|
| 2939 | * intercepts). */
|
|---|
| 2940 | AssertMsgFailed(("CPUMSvmIsIOInterceptActive: We expect an IO intercept here!\n"));
|
|---|
| 2941 | return false;
|
|---|
| 2942 | }
|
|---|
| 2943 |
|
|---|
| 2944 |
|
|---|
| 2945 | /**
|
|---|
| 2946 | * Gets the MSR permission bitmap byte and bit offset for the specified MSR.
|
|---|
| 2947 | *
|
|---|
| 2948 | * @returns VBox status code.
|
|---|
| 2949 | * @param idMsr The MSR being requested.
|
|---|
| 2950 | * @param pbOffMsrpm Where to store the byte offset in the MSR permission
|
|---|
| 2951 | * bitmap for @a idMsr.
|
|---|
| 2952 | * @param puMsrpmBit Where to store the bit offset starting at the byte
|
|---|
| 2953 | * returned in @a pbOffMsrpm.
|
|---|
| 2954 | */
|
|---|
| 2955 | VMM_INT_DECL(int) CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit)
|
|---|
| 2956 | {
|
|---|
| 2957 | Assert(pbOffMsrpm);
|
|---|
| 2958 | Assert(puMsrpmBit);
|
|---|
| 2959 |
|
|---|
| 2960 | /*
|
|---|
| 2961 | * MSRPM Layout:
|
|---|
| 2962 | * Byte offset MSR range
|
|---|
| 2963 | * 0x000 - 0x7ff 0x00000000 - 0x00001fff
|
|---|
| 2964 | * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
|
|---|
| 2965 | * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
|
|---|
| 2966 | * 0x1800 - 0x1fff Reserved
|
|---|
| 2967 | *
|
|---|
| 2968 | * Each MSR is represented by 2 permission bits (read and write).
|
|---|
| 2969 | */
|
|---|
| 2970 | if (idMsr <= 0x00001fff)
|
|---|
| 2971 | {
|
|---|
| 2972 | /* Pentium-compatible MSRs. */
|
|---|
| 2973 | uint32_t const bitoffMsr = idMsr << 1;
|
|---|
| 2974 | *pbOffMsrpm = bitoffMsr >> 3;
|
|---|
| 2975 | *puMsrpmBit = bitoffMsr & 7;
|
|---|
| 2976 | return VINF_SUCCESS;
|
|---|
| 2977 | }
|
|---|
| 2978 |
|
|---|
| 2979 | if ( idMsr >= 0xc0000000
|
|---|
| 2980 | && idMsr <= 0xc0001fff)
|
|---|
| 2981 | {
|
|---|
| 2982 | /* AMD Sixth Generation x86 Processor MSRs. */
|
|---|
| 2983 | uint32_t const bitoffMsr = (idMsr - 0xc0000000) << 1;
|
|---|
| 2984 | *pbOffMsrpm = 0x800 + (bitoffMsr >> 3);
|
|---|
| 2985 | *puMsrpmBit = bitoffMsr & 7;
|
|---|
| 2986 | return VINF_SUCCESS;
|
|---|
| 2987 | }
|
|---|
| 2988 |
|
|---|
| 2989 | if ( idMsr >= 0xc0010000
|
|---|
| 2990 | && idMsr <= 0xc0011fff)
|
|---|
| 2991 | {
|
|---|
| 2992 | /* AMD Seventh and Eighth Generation Processor MSRs. */
|
|---|
| 2993 | uint32_t const bitoffMsr = (idMsr - 0xc0010000) << 1;
|
|---|
| 2994 | *pbOffMsrpm = 0x1000 + (bitoffMsr >> 3);
|
|---|
| 2995 | *puMsrpmBit = bitoffMsr & 7;
|
|---|
| 2996 | return VINF_SUCCESS;
|
|---|
| 2997 | }
|
|---|
| 2998 |
|
|---|
| 2999 | *pbOffMsrpm = 0;
|
|---|
| 3000 | *puMsrpmBit = 0;
|
|---|
| 3001 | return VERR_OUT_OF_RANGE;
|
|---|
| 3002 | }
|
|---|
| 3003 |
|
|---|
| 3004 |
|
|---|
| 3005 | /**
|
|---|
| 3006 | * Checks whether the guest is in VMX non-root mode and using EPT paging.
|
|---|
| 3007 | *
|
|---|
| 3008 | * @returns @c true if in VMX non-root operation with EPT, @c false otherwise.
|
|---|
| 3009 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 3010 | */
|
|---|
| 3011 | VMM_INT_DECL(bool) CPUMIsGuestVmxEptPagingEnabled(PCVMCPUCC pVCpu)
|
|---|
| 3012 | {
|
|---|
| 3013 | return CPUMIsGuestVmxEptPagingEnabledEx(&pVCpu->cpum.s.Guest);
|
|---|
| 3014 | }
|
|---|
| 3015 |
|
|---|
| 3016 |
|
|---|
| 3017 | /**
|
|---|
| 3018 | * Checks whether the guest is in VMX non-root mode and using EPT paging and the
|
|---|
| 3019 | * nested-guest is in PAE mode.
|
|---|
| 3020 | *
|
|---|
| 3021 | * @returns @c true if in VMX non-root operation with EPT, @c false otherwise.
|
|---|
| 3022 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 3023 | */
|
|---|
| 3024 | VMM_INT_DECL(bool) CPUMIsGuestVmxEptPaePagingEnabled(PCVMCPUCC pVCpu)
|
|---|
| 3025 | {
|
|---|
| 3026 | return CPUMIsGuestVmxEptPagingEnabledEx(&pVCpu->cpum.s.Guest)
|
|---|
| 3027 | && CPUMIsGuestInPAEModeEx(&pVCpu->cpum.s.Guest);
|
|---|
| 3028 | }
|
|---|
| 3029 |
|
|---|
| 3030 |
|
|---|
| 3031 | /**
|
|---|
| 3032 | * Returns the guest-physical address of the APIC-access page when executing a
|
|---|
| 3033 | * nested-guest.
|
|---|
| 3034 | *
|
|---|
| 3035 | * @returns The APIC-access page guest-physical address.
|
|---|
| 3036 | * @param pVCpu The cross context virtual CPU structure.
|
|---|
| 3037 | */
|
|---|
| 3038 | VMM_INT_DECL(uint64_t) CPUMGetGuestVmxApicAccessPageAddr(PCVMCPUCC pVCpu)
|
|---|
| 3039 | {
|
|---|
| 3040 | return CPUMGetGuestVmxApicAccessPageAddrEx(&pVCpu->cpum.s.Guest);
|
|---|
| 3041 | }
|
|---|
| 3042 |
|
|---|