VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 43657

Last change on this file since 43657 was 43657, checked in by vboxsync, 12 years ago

VMM: APIC refactor. Moved APIC base MSR to the VCPU (where it belongs) for lockless accesses.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 77.4 KB
Line 
1/* $Id: CPUMAllRegs.cpp 43657 2012-10-16 15:34:05Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/pdm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
30# include <VBox/vmm/selm.h>
31#endif
32#include "CPUMInternal.h"
33#include <VBox/vmm/vm.h>
34#include <VBox/err.h>
35#include <VBox/dis.h>
36#include <VBox/log.h>
37#include <VBox/vmm/hm.h>
38#include <VBox/vmm/tm.h>
39#include <iprt/assert.h>
40#include <iprt/asm.h>
41#include <iprt/asm-amd64-x86.h>
42#ifdef IN_RING3
43#include <iprt/thread.h>
44#endif
45
46/** Disable stack frame pointer generation here. */
47#if defined(_MSC_VER) && !defined(DEBUG)
48# pragma optimize("y", off)
49#endif
50
51
52/*******************************************************************************
53* Defined Constants And Macros *
54*******************************************************************************/
55/**
56 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
57 *
58 * @returns Pointer to the Virtual CPU.
59 * @param a_pGuestCtx Pointer to the guest context.
60 */
61#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
62
63/**
64 * Lazily loads the hidden parts of a selector register when using raw-mode.
65 */
66#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
67# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
68 do \
69 { \
70 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
71 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
72 } while (0)
73#else
74# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
75 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
76#endif
77
78
79
80#ifdef VBOX_WITH_RAW_MODE_NOT_R0
81
82/**
83 * Does the lazy hidden selector register loading.
84 *
85 * @param pVCpu The current Virtual CPU.
86 * @param pSReg The selector register to lazily load hidden parts of.
87 */
88static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
89{
90 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
91 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
92 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
93
94 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
95 {
96 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
97 pSReg->Attr.u = 0;
98 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
99 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
100 pSReg->Attr.n.u2Dpl = 3;
101 pSReg->Attr.n.u1Present = 1;
102 pSReg->u32Limit = 0x0000ffff;
103 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
104 pSReg->ValidSel = pSReg->Sel;
105 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
106 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
107 }
108 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
109 {
110 /* Real mode - leave the limit and flags alone here, at least for now. */
111 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
112 pSReg->ValidSel = pSReg->Sel;
113 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
114 }
115 else
116 {
117 /* Protected mode - get it from the selector descriptor tables. */
118 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
119 {
120 Assert(!CPUMIsGuestInLongMode(pVCpu));
121 pSReg->Sel = 0;
122 pSReg->u64Base = 0;
123 pSReg->u32Limit = 0;
124 pSReg->Attr.u = 0;
125 pSReg->ValidSel = 0;
126 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
127 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
128 }
129 else
130 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
131 }
132}
133
134
135/**
136 * Makes sure the hidden CS and SS selector registers are valid, loading them if
137 * necessary.
138 *
139 * @param pVCpu The current virtual CPU.
140 */
141VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
142{
143 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
144 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
145}
146
147
148/**
149 * Loads a the hidden parts of a selector register.
150 *
151 * @param pVCpu The current virtual CPU.
152 */
153VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
154{
155 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
156}
157
158#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
159
160
161/**
162 * Obsolete.
163 *
164 * We don't support nested hypervisor context interrupts or traps. Life is much
165 * simpler when we don't. It's also slightly faster at times.
166 *
167 * @param pVM Handle to the virtual machine.
168 */
169VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
170{
171 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
172}
173
174
175/**
176 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
177 *
178 * @param pVCpu Pointer to the VMCPU.
179 */
180VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
181{
182 return &pVCpu->cpum.s.Hyper;
183}
184
185
186VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
187{
188 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
189 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
190}
191
192
193VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
194{
195 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
196 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
197}
198
199
200VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
201{
202 pVCpu->cpum.s.Hyper.cr3 = cr3;
203
204#ifdef IN_RC
205 /* Update the current CR3. */
206 ASMSetCR3(cr3);
207#endif
208}
209
210VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
211{
212 return pVCpu->cpum.s.Hyper.cr3;
213}
214
215
216VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
217{
218 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
219}
220
221
222VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
223{
224 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
225}
226
227
228VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
229{
230 pVCpu->cpum.s.Hyper.es.Sel = SelES;
231}
232
233
234VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
235{
236 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
237}
238
239
240VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
241{
242 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
243}
244
245
246VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
247{
248 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
249}
250
251
252VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
253{
254 pVCpu->cpum.s.Hyper.esp = u32ESP;
255}
256
257
258VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
259{
260 pVCpu->cpum.s.Hyper.esp = u32ESP;
261}
262
263
264VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
265{
266 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
267 return VINF_SUCCESS;
268}
269
270
271VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
272{
273 pVCpu->cpum.s.Hyper.eip = u32EIP;
274}
275
276
277/**
278 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
279 * EFLAGS and EIP prior to resuming guest execution.
280 *
281 * All general register not given as a parameter will be set to 0. The EFLAGS
282 * register will be set to sane values for C/C++ code execution with interrupts
283 * disabled and IOPL 0.
284 *
285 * @param pVCpu The current virtual CPU.
286 * @param u32EIP The EIP value.
287 * @param u32ESP The ESP value.
288 * @param u32EAX The EAX value.
289 * @param u32EDX The EDX value.
290 */
291VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
292{
293 pVCpu->cpum.s.Hyper.eip = u32EIP;
294 pVCpu->cpum.s.Hyper.esp = u32ESP;
295 pVCpu->cpum.s.Hyper.eax = u32EAX;
296 pVCpu->cpum.s.Hyper.edx = u32EDX;
297 pVCpu->cpum.s.Hyper.ecx = 0;
298 pVCpu->cpum.s.Hyper.ebx = 0;
299 pVCpu->cpum.s.Hyper.ebp = 0;
300 pVCpu->cpum.s.Hyper.esi = 0;
301 pVCpu->cpum.s.Hyper.edi = 0;
302 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
303}
304
305
306VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
307{
308 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
309}
310
311
312VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
313{
314 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
315}
316
317
318VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
319{
320 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
321 /** @todo in GC we must load it! */
322}
323
324
325VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
326{
327 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
328 /** @todo in GC we must load it! */
329}
330
331
332VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
333{
334 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
335 /** @todo in GC we must load it! */
336}
337
338
339VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
340{
341 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
342 /** @todo in GC we must load it! */
343}
344
345
346VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
347{
348 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
349 /** @todo in GC we must load it! */
350}
351
352
353VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
354{
355 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
356 /** @todo in GC we must load it! */
357}
358
359
360VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
361{
362 return pVCpu->cpum.s.Hyper.cs.Sel;
363}
364
365
366VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
367{
368 return pVCpu->cpum.s.Hyper.ds.Sel;
369}
370
371
372VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
373{
374 return pVCpu->cpum.s.Hyper.es.Sel;
375}
376
377
378VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
379{
380 return pVCpu->cpum.s.Hyper.fs.Sel;
381}
382
383
384VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
385{
386 return pVCpu->cpum.s.Hyper.gs.Sel;
387}
388
389
390VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
391{
392 return pVCpu->cpum.s.Hyper.ss.Sel;
393}
394
395
396VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
397{
398 return pVCpu->cpum.s.Hyper.eax;
399}
400
401
402VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
403{
404 return pVCpu->cpum.s.Hyper.ebx;
405}
406
407
408VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
409{
410 return pVCpu->cpum.s.Hyper.ecx;
411}
412
413
414VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
415{
416 return pVCpu->cpum.s.Hyper.edx;
417}
418
419
420VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
421{
422 return pVCpu->cpum.s.Hyper.esi;
423}
424
425
426VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
427{
428 return pVCpu->cpum.s.Hyper.edi;
429}
430
431
432VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
433{
434 return pVCpu->cpum.s.Hyper.ebp;
435}
436
437
438VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
439{
440 return pVCpu->cpum.s.Hyper.esp;
441}
442
443
444VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
445{
446 return pVCpu->cpum.s.Hyper.eflags.u32;
447}
448
449
450VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
451{
452 return pVCpu->cpum.s.Hyper.eip;
453}
454
455
456VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
457{
458 return pVCpu->cpum.s.Hyper.rip;
459}
460
461
462VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
463{
464 if (pcbLimit)
465 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
466 return pVCpu->cpum.s.Hyper.idtr.pIdt;
467}
468
469
470VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
471{
472 if (pcbLimit)
473 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
474 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
475}
476
477
478VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
479{
480 return pVCpu->cpum.s.Hyper.ldtr.Sel;
481}
482
483
484VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
485{
486 return pVCpu->cpum.s.Hyper.dr[0];
487}
488
489
490VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
491{
492 return pVCpu->cpum.s.Hyper.dr[1];
493}
494
495
496VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
497{
498 return pVCpu->cpum.s.Hyper.dr[2];
499}
500
501
502VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
503{
504 return pVCpu->cpum.s.Hyper.dr[3];
505}
506
507
508VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
509{
510 return pVCpu->cpum.s.Hyper.dr[6];
511}
512
513
514VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
515{
516 return pVCpu->cpum.s.Hyper.dr[7];
517}
518
519
520/**
521 * Gets the pointer to the internal CPUMCTXCORE structure.
522 * This is only for reading in order to save a few calls.
523 *
524 * @param pVCpu Handle to the virtual cpu.
525 */
526VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
527{
528 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
529}
530
531
532/**
533 * Queries the pointer to the internal CPUMCTX structure.
534 *
535 * @returns The CPUMCTX pointer.
536 * @param pVCpu Handle to the virtual cpu.
537 */
538VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
539{
540 return &pVCpu->cpum.s.Guest;
541}
542
543VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
544{
545#ifdef VBOX_WITH_IEM
546# ifdef VBOX_WITH_RAW_MODE_NOT_R0
547 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
548 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
549# endif
550#endif
551 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
552 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
553 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
554 return VINF_SUCCESS; /* formality, consider it void. */
555}
556
557VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
558{
559#ifdef VBOX_WITH_IEM
560# ifdef VBOX_WITH_RAW_MODE_NOT_R0
561 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
562 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
563# endif
564#endif
565 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
566 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
567 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
568 return VINF_SUCCESS; /* formality, consider it void. */
569}
570
571VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
572{
573#ifdef VBOX_WITH_IEM
574# ifdef VBOX_WITH_RAW_MODE_NOT_R0
575 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
576 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
577# endif
578#endif
579 pVCpu->cpum.s.Guest.tr.Sel = tr;
580 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
581 return VINF_SUCCESS; /* formality, consider it void. */
582}
583
584VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
585{
586#ifdef VBOX_WITH_IEM
587# ifdef VBOX_WITH_RAW_MODE_NOT_R0
588 if ( ( ldtr != 0
589 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
590 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
591 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
592# endif
593#endif
594 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
595 /* The caller will set more hidden bits if it has them. */
596 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
597 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
598 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
599 return VINF_SUCCESS; /* formality, consider it void. */
600}
601
602
603/**
604 * Set the guest CR0.
605 *
606 * When called in GC, the hyper CR0 may be updated if that is
607 * required. The caller only has to take special action if AM,
608 * WP, PG or PE changes.
609 *
610 * @returns VINF_SUCCESS (consider it void).
611 * @param pVCpu Handle to the virtual cpu.
612 * @param cr0 The new CR0 value.
613 */
614VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
615{
616#ifdef IN_RC
617 /*
618 * Check if we need to change hypervisor CR0 because
619 * of math stuff.
620 */
621 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
622 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
623 {
624 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU))
625 {
626 /*
627 * We haven't saved the host FPU state yet, so TS and MT are both set
628 * and EM should be reflecting the guest EM (it always does this).
629 */
630 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
631 {
632 uint32_t HyperCR0 = ASMGetCR0();
633 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
634 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
635 HyperCR0 &= ~X86_CR0_EM;
636 HyperCR0 |= cr0 & X86_CR0_EM;
637 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
638 ASMSetCR0(HyperCR0);
639 }
640# ifdef VBOX_STRICT
641 else
642 {
643 uint32_t HyperCR0 = ASMGetCR0();
644 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
645 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
646 }
647# endif
648 }
649 else
650 {
651 /*
652 * Already saved the state, so we're just mirroring
653 * the guest flags.
654 */
655 uint32_t HyperCR0 = ASMGetCR0();
656 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
657 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
658 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
659 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
660 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
661 Log(("CPUM New HyperCR0=%#x\n", HyperCR0));
662 ASMSetCR0(HyperCR0);
663 }
664 }
665#endif /* IN_RC */
666
667 /*
668 * Check for changes causing TLB flushes (for REM).
669 * The caller is responsible for calling PGM when appropriate.
670 */
671 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
672 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
673 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
674 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
675
676 pVCpu->cpum.s.Guest.cr0 = cr0 | X86_CR0_ET;
677 return VINF_SUCCESS;
678}
679
680
681VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
682{
683 pVCpu->cpum.s.Guest.cr2 = cr2;
684 return VINF_SUCCESS;
685}
686
687
688VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
689{
690 pVCpu->cpum.s.Guest.cr3 = cr3;
691 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
692 return VINF_SUCCESS;
693}
694
695
696VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
697{
698 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
699 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
700 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
701 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
702 if (!CPUMSupportsFXSR(pVCpu->CTX_SUFF(pVM)))
703 cr4 &= ~X86_CR4_OSFSXR;
704 pVCpu->cpum.s.Guest.cr4 = cr4;
705 return VINF_SUCCESS;
706}
707
708
709VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
710{
711 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
712 return VINF_SUCCESS;
713}
714
715
716VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
717{
718 pVCpu->cpum.s.Guest.eip = eip;
719 return VINF_SUCCESS;
720}
721
722
723VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
724{
725 pVCpu->cpum.s.Guest.eax = eax;
726 return VINF_SUCCESS;
727}
728
729
730VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
731{
732 pVCpu->cpum.s.Guest.ebx = ebx;
733 return VINF_SUCCESS;
734}
735
736
737VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
738{
739 pVCpu->cpum.s.Guest.ecx = ecx;
740 return VINF_SUCCESS;
741}
742
743
744VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
745{
746 pVCpu->cpum.s.Guest.edx = edx;
747 return VINF_SUCCESS;
748}
749
750
751VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
752{
753 pVCpu->cpum.s.Guest.esp = esp;
754 return VINF_SUCCESS;
755}
756
757
758VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
759{
760 pVCpu->cpum.s.Guest.ebp = ebp;
761 return VINF_SUCCESS;
762}
763
764
765VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
766{
767 pVCpu->cpum.s.Guest.esi = esi;
768 return VINF_SUCCESS;
769}
770
771
772VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
773{
774 pVCpu->cpum.s.Guest.edi = edi;
775 return VINF_SUCCESS;
776}
777
778
779VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
780{
781 pVCpu->cpum.s.Guest.ss.Sel = ss;
782 return VINF_SUCCESS;
783}
784
785
786VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
787{
788 pVCpu->cpum.s.Guest.cs.Sel = cs;
789 return VINF_SUCCESS;
790}
791
792
793VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
794{
795 pVCpu->cpum.s.Guest.ds.Sel = ds;
796 return VINF_SUCCESS;
797}
798
799
800VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
801{
802 pVCpu->cpum.s.Guest.es.Sel = es;
803 return VINF_SUCCESS;
804}
805
806
807VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
808{
809 pVCpu->cpum.s.Guest.fs.Sel = fs;
810 return VINF_SUCCESS;
811}
812
813
814VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
815{
816 pVCpu->cpum.s.Guest.gs.Sel = gs;
817 return VINF_SUCCESS;
818}
819
820
821VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
822{
823 pVCpu->cpum.s.Guest.msrEFER = val;
824}
825
826
827/**
828 * Query an MSR.
829 *
830 * The caller is responsible for checking privilege if the call is the result
831 * of a RDMSR instruction. We'll do the rest.
832 *
833 * @retval VINF_SUCCESS on success.
834 * @retval VERR_CPUM_RAISE_GP_0 on failure (invalid MSR), the caller is
835 * expected to take the appropriate actions. @a *puValue is set to 0.
836 * @param pVCpu Pointer to the VMCPU.
837 * @param idMsr The MSR.
838 * @param puValue Where to return the value.
839 *
840 * @remarks This will always return the right values, even when we're in the
841 * recompiler.
842 */
843VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue)
844{
845 /*
846 * If we don't indicate MSR support in the CPUID feature bits, indicate
847 * that a #GP(0) should be raised.
848 */
849 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
850 {
851 *puValue = 0;
852 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */
853 }
854
855 int rc = VINF_SUCCESS;
856 uint8_t const u8Multiplier = 4;
857 switch (idMsr)
858 {
859 case MSR_IA32_TSC:
860 *puValue = TMCpuTickGet(pVCpu);
861 break;
862
863 case MSR_IA32_APICBASE:
864 {
865 PVM pVM = pVCpu->CTX_SUFF(pVM);
866 if ( ( pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1
867 && (pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_APIC))
868 || ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
869 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD
870 && (pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_APIC)))
871 {
872 *puValue = pVCpu->cpum.s.Guest.msrApicBase;
873 }
874 else
875 {
876 *puValue = 0;
877 rc = VERR_CPUM_RAISE_GP_0;
878 }
879 break;
880 }
881
882 case MSR_IA32_CR_PAT:
883 *puValue = pVCpu->cpum.s.Guest.msrPAT;
884 break;
885
886 case MSR_IA32_SYSENTER_CS:
887 *puValue = pVCpu->cpum.s.Guest.SysEnter.cs;
888 break;
889
890 case MSR_IA32_SYSENTER_EIP:
891 *puValue = pVCpu->cpum.s.Guest.SysEnter.eip;
892 break;
893
894 case MSR_IA32_SYSENTER_ESP:
895 *puValue = pVCpu->cpum.s.Guest.SysEnter.esp;
896 break;
897
898 case MSR_IA32_MTRR_CAP:
899 {
900 /* This is currently a bit weird. :-) */
901 uint8_t const cVariableRangeRegs = 0;
902 bool const fSystemManagementRangeRegisters = false;
903 bool const fFixedRangeRegisters = false;
904 bool const fWriteCombiningType = false;
905 *puValue = cVariableRangeRegs
906 | (fFixedRangeRegisters ? RT_BIT_64(8) : 0)
907 | (fWriteCombiningType ? RT_BIT_64(10) : 0)
908 | (fSystemManagementRangeRegisters ? RT_BIT_64(11) : 0);
909 break;
910 }
911
912 case MSR_IA32_MTRR_DEF_TYPE:
913 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType;
914 break;
915
916 case IA32_MTRR_FIX64K_00000:
917 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000;
918 break;
919 case IA32_MTRR_FIX16K_80000:
920 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000;
921 break;
922 case IA32_MTRR_FIX16K_A0000:
923 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000;
924 break;
925 case IA32_MTRR_FIX4K_C0000:
926 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000;
927 break;
928 case IA32_MTRR_FIX4K_C8000:
929 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000;
930 break;
931 case IA32_MTRR_FIX4K_D0000:
932 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000;
933 break;
934 case IA32_MTRR_FIX4K_D8000:
935 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000;
936 break;
937 case IA32_MTRR_FIX4K_E0000:
938 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000;
939 break;
940 case IA32_MTRR_FIX4K_E8000:
941 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000;
942 break;
943 case IA32_MTRR_FIX4K_F0000:
944 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000;
945 break;
946 case IA32_MTRR_FIX4K_F8000:
947 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000;
948 break;
949
950 case MSR_K6_EFER:
951 *puValue = pVCpu->cpum.s.Guest.msrEFER;
952 break;
953
954 case MSR_K8_SF_MASK:
955 *puValue = pVCpu->cpum.s.Guest.msrSFMASK;
956 break;
957
958 case MSR_K6_STAR:
959 *puValue = pVCpu->cpum.s.Guest.msrSTAR;
960 break;
961
962 case MSR_K8_LSTAR:
963 *puValue = pVCpu->cpum.s.Guest.msrLSTAR;
964 break;
965
966 case MSR_K8_CSTAR:
967 *puValue = pVCpu->cpum.s.Guest.msrCSTAR;
968 break;
969
970 case MSR_K8_FS_BASE:
971 *puValue = pVCpu->cpum.s.Guest.fs.u64Base;
972 break;
973
974 case MSR_K8_GS_BASE:
975 *puValue = pVCpu->cpum.s.Guest.gs.u64Base;
976 break;
977
978 case MSR_K8_KERNEL_GS_BASE:
979 *puValue = pVCpu->cpum.s.Guest.msrKERNELGSBASE;
980 break;
981
982 case MSR_K8_TSC_AUX:
983 *puValue = pVCpu->cpum.s.GuestMsrs.msr.TscAux;
984 break;
985
986 case MSR_IA32_PERF_STATUS:
987 /** @todo could really be not exactly correct, maybe use host's values */
988 *puValue = UINT64_C(1000) /* TSC increment by tick */
989 | ((uint64_t)u8Multiplier << 24) /* CPU multiplier (aka bus ratio) min */
990 | ((uint64_t)u8Multiplier << 40) /* CPU multiplier (aka bus ratio) max */;
991 break;
992
993 case MSR_IA32_FSB_CLOCK_STS:
994 /*
995 * Encoded as:
996 * 0 - 266
997 * 1 - 133
998 * 2 - 200
999 * 3 - return 166
1000 * 5 - return 100
1001 */
1002 *puValue = (2 << 4);
1003 break;
1004
1005 case MSR_IA32_PLATFORM_INFO:
1006 *puValue = (u8Multiplier << 8) /* Flex ratio max */
1007 | ((uint64_t)u8Multiplier << 40) /* Flex ratio min */;
1008 break;
1009
1010 case MSR_IA32_THERM_STATUS:
1011 /* CPU temperature relative to TCC, to actually activate, CPUID leaf 6 EAX[0] must be set */
1012 *puValue = RT_BIT(31) /* validity bit */
1013 | (UINT64_C(20) << 16) /* degrees till TCC */;
1014 break;
1015
1016 case MSR_IA32_MISC_ENABLE:
1017#if 0
1018 /* Needs to be tested more before enabling. */
1019 *puValue = pVCpu->cpum.s.GuestMsr.msr.miscEnable;
1020#else
1021 /* Currenty we don't allow guests to modify enable MSRs. */
1022 *puValue = MSR_IA32_MISC_ENABLE_FAST_STRINGS /* by default */;
1023
1024 if ((pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR) != 0)
1025
1026 *puValue |= MSR_IA32_MISC_ENABLE_MONITOR /* if mwait/monitor available */;
1027 /** @todo: add more cpuid-controlled features this way. */
1028#endif
1029 break;
1030
1031#if 0 /*def IN_RING0 */
1032 case MSR_IA32_PLATFORM_ID:
1033 case MSR_IA32_BIOS_SIGN_ID:
1034 if (CPUMGetCPUVendor(pVM) == CPUMCPUVENDOR_INTEL)
1035 {
1036 /* Available since the P6 family. VT-x implies that this feature is present. */
1037 if (idMsr == MSR_IA32_PLATFORM_ID)
1038 *puValue = ASMRdMsr(MSR_IA32_PLATFORM_ID);
1039 else if (idMsr == MSR_IA32_BIOS_SIGN_ID)
1040 *puValue = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
1041 break;
1042 }
1043 /* no break */
1044#endif
1045
1046 /*
1047 * Intel specifics MSRs:
1048 */
1049 case MSR_IA32_PLATFORM_ID: /* fam/mod >= 6_01 */
1050 case MSR_IA32_BIOS_SIGN_ID: /* fam/mod >= 6_01 */
1051 /*case MSR_IA32_BIOS_UPDT_TRIG: - write-only? */
1052 case MSR_IA32_MCP_CAP: /* fam/mod >= 6_01 */
1053 /*case MSR_IA32_MCP_STATUS: - indicated as not present in CAP */
1054 /*case MSR_IA32_MCP_CTRL: - indicated as not present in CAP */
1055 case MSR_IA32_MC0_CTL:
1056 case MSR_IA32_MC0_STATUS:
1057 *puValue = 0;
1058 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_INTEL)
1059 {
1060 Log(("MSR %#x is Intel, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1061 rc = VERR_CPUM_RAISE_GP_0;
1062 }
1063 break;
1064
1065 default:
1066 /*
1067 * Hand the X2APIC range to PDM and the APIC.
1068 */
1069 if ( idMsr >= MSR_IA32_APIC_START
1070 && idMsr < MSR_IA32_APIC_END)
1071 {
1072 rc = PDMApicReadMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, puValue);
1073 if (RT_SUCCESS(rc))
1074 rc = VINF_SUCCESS;
1075 else
1076 {
1077 *puValue = 0;
1078 rc = VERR_CPUM_RAISE_GP_0;
1079 }
1080 }
1081 else
1082 {
1083 *puValue = 0;
1084 rc = VERR_CPUM_RAISE_GP_0;
1085 }
1086 break;
1087 }
1088
1089 return rc;
1090}
1091
1092
1093/**
1094 * Sets the MSR.
1095 *
1096 * The caller is responsible for checking privilege if the call is the result
1097 * of a WRMSR instruction. We'll do the rest.
1098 *
1099 * @retval VINF_SUCCESS on success.
1100 * @retval VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the
1101 * appropriate actions.
1102 *
1103 * @param pVCpu Pointer to the VMCPU.
1104 * @param idMsr The MSR id.
1105 * @param uValue The value to set.
1106 *
1107 * @remarks Everyone changing MSR values, including the recompiler, shall do it
1108 * by calling this method. This makes sure we have current values and
1109 * that we trigger all the right actions when something changes.
1110 */
1111VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue)
1112{
1113 /*
1114 * If we don't indicate MSR support in the CPUID feature bits, indicate
1115 * that a #GP(0) should be raised.
1116 */
1117 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))
1118 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */
1119
1120 int rc = VINF_SUCCESS;
1121 switch (idMsr)
1122 {
1123 case MSR_IA32_MISC_ENABLE:
1124 pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = uValue;
1125 break;
1126
1127 case MSR_IA32_TSC:
1128 TMCpuTickSet(pVCpu->CTX_SUFF(pVM), pVCpu, uValue);
1129 break;
1130
1131 case MSR_IA32_APICBASE:
1132 rc = PDMApicSetBase(pVCpu, uValue);
1133 if (rc != VINF_SUCCESS)
1134 rc = VERR_CPUM_RAISE_GP_0;
1135 break;
1136
1137 case MSR_IA32_CR_PAT:
1138 pVCpu->cpum.s.Guest.msrPAT = uValue;
1139 break;
1140
1141 case MSR_IA32_SYSENTER_CS:
1142 pVCpu->cpum.s.Guest.SysEnter.cs = uValue & 0xffff; /* 16 bits selector */
1143 break;
1144
1145 case MSR_IA32_SYSENTER_EIP:
1146 pVCpu->cpum.s.Guest.SysEnter.eip = uValue;
1147 break;
1148
1149 case MSR_IA32_SYSENTER_ESP:
1150 pVCpu->cpum.s.Guest.SysEnter.esp = uValue;
1151 break;
1152
1153 case MSR_IA32_MTRR_CAP:
1154 return VERR_CPUM_RAISE_GP_0;
1155
1156 case MSR_IA32_MTRR_DEF_TYPE:
1157 if ( (uValue & UINT64_C(0xfffffffffffff300))
1158 || ( (uValue & 0xff) != 0
1159 && (uValue & 0xff) != 1
1160 && (uValue & 0xff) != 4
1161 && (uValue & 0xff) != 5
1162 && (uValue & 0xff) != 6) )
1163 {
1164 Log(("MSR_IA32_MTRR_DEF_TYPE: #GP(0) - writing reserved value (%#llx)\n", uValue));
1165 return VERR_CPUM_RAISE_GP_0;
1166 }
1167 pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType = uValue;
1168 break;
1169
1170 case IA32_MTRR_FIX64K_00000:
1171 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000 = uValue;
1172 break;
1173 case IA32_MTRR_FIX16K_80000:
1174 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000 = uValue;
1175 break;
1176 case IA32_MTRR_FIX16K_A0000:
1177 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000 = uValue;
1178 break;
1179 case IA32_MTRR_FIX4K_C0000:
1180 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000 = uValue;
1181 break;
1182 case IA32_MTRR_FIX4K_C8000:
1183 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000 = uValue;
1184 break;
1185 case IA32_MTRR_FIX4K_D0000:
1186 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000 = uValue;
1187 break;
1188 case IA32_MTRR_FIX4K_D8000:
1189 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000 = uValue;
1190 break;
1191 case IA32_MTRR_FIX4K_E0000:
1192 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000 = uValue;
1193 break;
1194 case IA32_MTRR_FIX4K_E8000:
1195 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000 = uValue;
1196 break;
1197 case IA32_MTRR_FIX4K_F0000:
1198 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000 = uValue;
1199 break;
1200 case IA32_MTRR_FIX4K_F8000:
1201 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000 = uValue;
1202 break;
1203
1204 /*
1205 * AMD64 MSRs.
1206 */
1207 case MSR_K6_EFER:
1208 {
1209 PVM pVM = pVCpu->CTX_SUFF(pVM);
1210 uint64_t const uOldEFER = pVCpu->cpum.s.Guest.msrEFER;
1211 uint32_t const fExtFeatures = pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1212 ? pVM->cpum.s.aGuestCpuIdExt[1].edx
1213 : 0;
1214 uint64_t fMask = 0;
1215
1216 /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */
1217 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX)
1218 fMask |= MSR_K6_EFER_NXE;
1219 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
1220 fMask |= MSR_K6_EFER_LME;
1221 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
1222 fMask |= MSR_K6_EFER_SCE;
1223 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
1224 fMask |= MSR_K6_EFER_FFXSR;
1225
1226 /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if
1227 paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */
1228 if ( (uOldEFER & MSR_K6_EFER_LME) != (uValue & fMask & MSR_K6_EFER_LME)
1229 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG))
1230 {
1231 Log(("Illegal MSR_K6_EFER_LME change: paging is enabled!!\n"));
1232 return VERR_CPUM_RAISE_GP_0;
1233 }
1234
1235 /* There are a few more: e.g. MSR_K6_EFER_LMSLE */
1236 AssertMsg(!(uValue & ~(MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA /* ignored anyway */ | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)),
1237 ("Unexpected value %RX64\n", uValue));
1238 pVCpu->cpum.s.Guest.msrEFER = (uOldEFER & ~fMask) | (uValue & fMask);
1239
1240 /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB
1241 if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */
1242 if ( (uOldEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))
1243 != (pVCpu->cpum.s.Guest.msrEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA)))
1244 {
1245 /// @todo PGMFlushTLB(pVCpu, cr3, true /*fGlobal*/);
1246 HMFlushTLB(pVCpu);
1247
1248 /* Notify PGM about NXE changes. */
1249 if ( (uOldEFER & MSR_K6_EFER_NXE)
1250 != (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE))
1251 PGMNotifyNxeChanged(pVCpu, !(uOldEFER & MSR_K6_EFER_NXE));
1252 }
1253 break;
1254 }
1255
1256 case MSR_K8_SF_MASK:
1257 pVCpu->cpum.s.Guest.msrSFMASK = uValue;
1258 break;
1259
1260 case MSR_K6_STAR:
1261 pVCpu->cpum.s.Guest.msrSTAR = uValue;
1262 break;
1263
1264 case MSR_K8_LSTAR:
1265 pVCpu->cpum.s.Guest.msrLSTAR = uValue;
1266 break;
1267
1268 case MSR_K8_CSTAR:
1269 pVCpu->cpum.s.Guest.msrCSTAR = uValue;
1270 break;
1271
1272 case MSR_K8_FS_BASE:
1273 pVCpu->cpum.s.Guest.fs.u64Base = uValue;
1274 break;
1275
1276 case MSR_K8_GS_BASE:
1277 pVCpu->cpum.s.Guest.gs.u64Base = uValue;
1278 break;
1279
1280 case MSR_K8_KERNEL_GS_BASE:
1281 pVCpu->cpum.s.Guest.msrKERNELGSBASE = uValue;
1282 break;
1283
1284 case MSR_K8_TSC_AUX:
1285 pVCpu->cpum.s.GuestMsrs.msr.TscAux = uValue;
1286 break;
1287
1288 /*
1289 * Intel specifics MSRs:
1290 */
1291 /*case MSR_IA32_PLATFORM_ID: - read-only */
1292 case MSR_IA32_BIOS_SIGN_ID: /* fam/mod >= 6_01 */
1293 case MSR_IA32_BIOS_UPDT_TRIG: /* fam/mod >= 6_01 */
1294 /*case MSR_IA32_MCP_CAP: - read-only */
1295 /*case MSR_IA32_MCP_STATUS: - read-only */
1296 /*case MSR_IA32_MCP_CTRL: - indicated as not present in CAP */
1297 /*case MSR_IA32_MC0_CTL: - read-only? */
1298 /*case MSR_IA32_MC0_STATUS: - read-only? */
1299 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_INTEL)
1300 {
1301 Log(("MSR %#x is Intel, the virtual CPU isn't an Intel one -> #GP\n", idMsr));
1302 return VERR_CPUM_RAISE_GP_0;
1303 }
1304 /* ignored */
1305 break;
1306
1307 default:
1308 /*
1309 * Hand the X2APIC range to PDM and the APIC.
1310 */
1311 if ( idMsr >= MSR_IA32_APIC_START
1312 && idMsr < MSR_IA32_APIC_END)
1313 {
1314 rc = PDMApicWriteMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, uValue);
1315 if (rc != VINF_SUCCESS)
1316 rc = VERR_CPUM_RAISE_GP_0;
1317 }
1318 else
1319 {
1320 /* We should actually trigger a #GP here, but don't as that might cause more trouble. */
1321 /** @todo rc = VERR_CPUM_RAISE_GP_0 */
1322 Log(("CPUMSetGuestMsr: Unknown MSR %#x attempted set to %#llx\n", idMsr, uValue));
1323 }
1324 break;
1325 }
1326 return rc;
1327}
1328
1329
1330VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
1331{
1332 if (pcbLimit)
1333 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
1334 return pVCpu->cpum.s.Guest.idtr.pIdt;
1335}
1336
1337
1338VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
1339{
1340 if (pHidden)
1341 *pHidden = pVCpu->cpum.s.Guest.tr;
1342 return pVCpu->cpum.s.Guest.tr.Sel;
1343}
1344
1345
1346VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
1347{
1348 return pVCpu->cpum.s.Guest.cs.Sel;
1349}
1350
1351
1352VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
1353{
1354 return pVCpu->cpum.s.Guest.ds.Sel;
1355}
1356
1357
1358VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
1359{
1360 return pVCpu->cpum.s.Guest.es.Sel;
1361}
1362
1363
1364VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
1365{
1366 return pVCpu->cpum.s.Guest.fs.Sel;
1367}
1368
1369
1370VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
1371{
1372 return pVCpu->cpum.s.Guest.gs.Sel;
1373}
1374
1375
1376VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
1377{
1378 return pVCpu->cpum.s.Guest.ss.Sel;
1379}
1380
1381
1382VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
1383{
1384 return pVCpu->cpum.s.Guest.ldtr.Sel;
1385}
1386
1387
1388VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
1389{
1390 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
1391 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
1392 return pVCpu->cpum.s.Guest.ldtr.Sel;
1393}
1394
1395
1396VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
1397{
1398 return pVCpu->cpum.s.Guest.cr0;
1399}
1400
1401
1402VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
1403{
1404 return pVCpu->cpum.s.Guest.cr2;
1405}
1406
1407
1408VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
1409{
1410 return pVCpu->cpum.s.Guest.cr3;
1411}
1412
1413
1414VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
1415{
1416 return pVCpu->cpum.s.Guest.cr4;
1417}
1418
1419
1420VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
1421{
1422 uint64_t u64;
1423 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
1424 if (RT_FAILURE(rc))
1425 u64 = 0;
1426 return u64;
1427}
1428
1429
1430VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
1431{
1432 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
1433}
1434
1435
1436VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
1437{
1438 return pVCpu->cpum.s.Guest.eip;
1439}
1440
1441
1442VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
1443{
1444 return pVCpu->cpum.s.Guest.rip;
1445}
1446
1447
1448VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
1449{
1450 return pVCpu->cpum.s.Guest.eax;
1451}
1452
1453
1454VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
1455{
1456 return pVCpu->cpum.s.Guest.ebx;
1457}
1458
1459
1460VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1461{
1462 return pVCpu->cpum.s.Guest.ecx;
1463}
1464
1465
1466VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1467{
1468 return pVCpu->cpum.s.Guest.edx;
1469}
1470
1471
1472VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1473{
1474 return pVCpu->cpum.s.Guest.esi;
1475}
1476
1477
1478VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1479{
1480 return pVCpu->cpum.s.Guest.edi;
1481}
1482
1483
1484VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1485{
1486 return pVCpu->cpum.s.Guest.esp;
1487}
1488
1489
1490VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1491{
1492 return pVCpu->cpum.s.Guest.ebp;
1493}
1494
1495
1496VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1497{
1498 return pVCpu->cpum.s.Guest.eflags.u32;
1499}
1500
1501
1502VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1503{
1504 switch (iReg)
1505 {
1506 case DISCREG_CR0:
1507 *pValue = pVCpu->cpum.s.Guest.cr0;
1508 break;
1509
1510 case DISCREG_CR2:
1511 *pValue = pVCpu->cpum.s.Guest.cr2;
1512 break;
1513
1514 case DISCREG_CR3:
1515 *pValue = pVCpu->cpum.s.Guest.cr3;
1516 break;
1517
1518 case DISCREG_CR4:
1519 *pValue = pVCpu->cpum.s.Guest.cr4;
1520 break;
1521
1522 case DISCREG_CR8:
1523 {
1524 uint8_t u8Tpr;
1525 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, NULL /*pfPending*/);
1526 if (RT_FAILURE(rc))
1527 {
1528 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1529 *pValue = 0;
1530 return rc;
1531 }
1532 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0*/
1533 break;
1534 }
1535
1536 default:
1537 return VERR_INVALID_PARAMETER;
1538 }
1539 return VINF_SUCCESS;
1540}
1541
1542
1543VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1544{
1545 return pVCpu->cpum.s.Guest.dr[0];
1546}
1547
1548
1549VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1550{
1551 return pVCpu->cpum.s.Guest.dr[1];
1552}
1553
1554
1555VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1556{
1557 return pVCpu->cpum.s.Guest.dr[2];
1558}
1559
1560
1561VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1562{
1563 return pVCpu->cpum.s.Guest.dr[3];
1564}
1565
1566
1567VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1568{
1569 return pVCpu->cpum.s.Guest.dr[6];
1570}
1571
1572
1573VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1574{
1575 return pVCpu->cpum.s.Guest.dr[7];
1576}
1577
1578
1579VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1580{
1581 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1582 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1583 if (iReg == 4 || iReg == 5)
1584 iReg += 2;
1585 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1586 return VINF_SUCCESS;
1587}
1588
1589
1590VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1591{
1592 return pVCpu->cpum.s.Guest.msrEFER;
1593}
1594
1595
1596/**
1597 * Gets a CPUID leaf.
1598 *
1599 * @param pVCpu Pointer to the VMCPU.
1600 * @param iLeaf The CPUID leaf to get.
1601 * @param pEax Where to store the EAX value.
1602 * @param pEbx Where to store the EBX value.
1603 * @param pEcx Where to store the ECX value.
1604 * @param pEdx Where to store the EDX value.
1605 */
1606VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1607{
1608 PVM pVM = pVCpu->CTX_SUFF(pVM);
1609
1610 PCCPUMCPUID pCpuId;
1611 if (iLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
1612 pCpuId = &pVM->cpum.s.aGuestCpuIdStd[iLeaf];
1613 else if (iLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
1614 pCpuId = &pVM->cpum.s.aGuestCpuIdExt[iLeaf - UINT32_C(0x80000000)];
1615 else if ( iLeaf - UINT32_C(0x40000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdHyper)
1616 && (pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_HVP))
1617 pCpuId = &pVM->cpum.s.aGuestCpuIdHyper[iLeaf - UINT32_C(0x40000000)]; /* Only report if HVP bit set. */
1618 else if (iLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
1619 pCpuId = &pVM->cpum.s.aGuestCpuIdCentaur[iLeaf - UINT32_C(0xc0000000)];
1620 else
1621 pCpuId = &pVM->cpum.s.GuestCpuIdDef;
1622
1623 uint32_t cCurrentCacheIndex = *pEcx;
1624
1625 *pEax = pCpuId->eax;
1626 *pEbx = pCpuId->ebx;
1627 *pEcx = pCpuId->ecx;
1628 *pEdx = pCpuId->edx;
1629
1630 if ( iLeaf == 1)
1631 {
1632 /* Bits 31-24: Initial APIC ID */
1633 Assert(pVCpu->idCpu <= 255);
1634 *pEbx |= (pVCpu->idCpu << 24);
1635 }
1636
1637 if ( iLeaf == 4
1638 && cCurrentCacheIndex < 3
1639 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
1640 {
1641 uint32_t type, level, sharing, linesize,
1642 partitions, associativity, sets, cores;
1643
1644 /* For type: 1 - data cache, 2 - i-cache, 3 - unified */
1645 partitions = 1;
1646 /* Those are only to shut up compiler, as they will always
1647 get overwritten, and compiler should be able to figure that out */
1648 sets = associativity = sharing = level = 1;
1649 cores = pVM->cCpus > 32 ? 32 : pVM->cCpus;
1650 switch (cCurrentCacheIndex)
1651 {
1652 case 0:
1653 type = 1;
1654 level = 1;
1655 sharing = 1;
1656 linesize = 64;
1657 associativity = 8;
1658 sets = 64;
1659 break;
1660 case 1:
1661 level = 1;
1662 type = 2;
1663 sharing = 1;
1664 linesize = 64;
1665 associativity = 8;
1666 sets = 64;
1667 break;
1668 default: /* shut up gcc.*/
1669 AssertFailed();
1670 case 2:
1671 level = 2;
1672 type = 3;
1673 sharing = cores; /* our L2 cache is modelled as shared between all cores */
1674 linesize = 64;
1675 associativity = 24;
1676 sets = 4096;
1677 break;
1678 }
1679
1680 *pEax |= ((cores - 1) << 26) |
1681 ((sharing - 1) << 14) |
1682 (level << 5) |
1683 1;
1684 *pEbx = (linesize - 1) |
1685 ((partitions - 1) << 12) |
1686 ((associativity - 1) << 22); /* -1 encoding */
1687 *pEcx = sets - 1;
1688 }
1689
1690 Log2(("CPUMGetGuestCpuId: iLeaf=%#010x %RX32 %RX32 %RX32 %RX32\n", iLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1691}
1692
1693/**
1694 * Gets a number of standard CPUID leafs.
1695 *
1696 * @returns Number of leafs.
1697 * @param pVM Pointer to the VM.
1698 * @remark Intended for PATM.
1699 */
1700VMMDECL(uint32_t) CPUMGetGuestCpuIdStdMax(PVM pVM)
1701{
1702 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd);
1703}
1704
1705
1706/**
1707 * Gets a number of extended CPUID leafs.
1708 *
1709 * @returns Number of leafs.
1710 * @param pVM Pointer to the VM.
1711 * @remark Intended for PATM.
1712 */
1713VMMDECL(uint32_t) CPUMGetGuestCpuIdExtMax(PVM pVM)
1714{
1715 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt);
1716}
1717
1718
1719/**
1720 * Gets a number of centaur CPUID leafs.
1721 *
1722 * @returns Number of leafs.
1723 * @param pVM Pointer to the VM.
1724 * @remark Intended for PATM.
1725 */
1726VMMDECL(uint32_t) CPUMGetGuestCpuIdCentaurMax(PVM pVM)
1727{
1728 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur);
1729}
1730
1731
1732/**
1733 * Sets a CPUID feature bit.
1734 *
1735 * @param pVM Pointer to the VM.
1736 * @param enmFeature The feature to set.
1737 */
1738VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1739{
1740 switch (enmFeature)
1741 {
1742 /*
1743 * Set the APIC bit in both feature masks.
1744 */
1745 case CPUMCPUIDFEATURE_APIC:
1746 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1747 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
1748 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1749 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1750 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
1751 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));
1752 break;
1753
1754 /*
1755 * Set the x2APIC bit in the standard feature mask.
1756 */
1757 case CPUMCPUIDFEATURE_X2APIC:
1758 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1759 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
1760 LogRel(("CPUMSetGuestCpuIdFeature: Enabled x2APIC\n"));
1761 break;
1762
1763 /*
1764 * Set the sysenter/sysexit bit in the standard feature mask.
1765 * Assumes the caller knows what it's doing! (host must support these)
1766 */
1767 case CPUMCPUIDFEATURE_SEP:
1768 {
1769 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
1770 {
1771 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
1772 return;
1773 }
1774
1775 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1776 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
1777 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n"));
1778 break;
1779 }
1780
1781 /*
1782 * Set the syscall/sysret bit in the extended feature mask.
1783 * Assumes the caller knows what it's doing! (host must support these)
1784 */
1785 case CPUMCPUIDFEATURE_SYSCALL:
1786 {
1787 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1788 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL))
1789 {
1790#if HC_ARCH_BITS == 32
1791 /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32 bits mode.
1792 * Even when the cpu is capable of doing so in 64 bits mode.
1793 */
1794 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1795 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
1796 || !(ASMCpuId_EDX(1) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL))
1797#endif
1798 {
1799 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n"));
1800 return;
1801 }
1802 }
1803 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
1804 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
1805 LogRel(("CPUMSetGuestCpuIdFeature: Enabled syscall/ret\n"));
1806 break;
1807 }
1808
1809 /*
1810 * Set the PAE bit in both feature masks.
1811 * Assumes the caller knows what it's doing! (host must support these)
1812 */
1813 case CPUMCPUIDFEATURE_PAE:
1814 {
1815 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
1816 {
1817 LogRel(("WARNING: Can't turn on PAE when the host doesn't support it!!\n"));
1818 return;
1819 }
1820
1821 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1822 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
1823 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1824 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1825 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
1826 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));
1827 break;
1828 }
1829
1830 /*
1831 * Set the LONG MODE bit in the extended feature mask.
1832 * Assumes the caller knows what it's doing! (host must support these)
1833 */
1834 case CPUMCPUIDFEATURE_LONG_MODE:
1835 {
1836 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1837 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
1838 {
1839 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
1840 return;
1841 }
1842
1843 /* Valid for both Intel and AMD. */
1844 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
1845 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
1846 break;
1847 }
1848
1849 /*
1850 * Set the NX/XD bit in the extended feature mask.
1851 * Assumes the caller knows what it's doing! (host must support these)
1852 */
1853 case CPUMCPUIDFEATURE_NX:
1854 {
1855 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1856 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_NX))
1857 {
1858 LogRel(("WARNING: Can't turn on NX/XD when the host doesn't support it!!\n"));
1859 return;
1860 }
1861
1862 /* Valid for both Intel and AMD. */
1863 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_NX;
1864 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NX\n"));
1865 break;
1866 }
1867
1868 /*
1869 * Set the LAHF/SAHF support in 64-bit mode.
1870 * Assumes the caller knows what it's doing! (host must support this)
1871 */
1872 case CPUMCPUIDFEATURE_LAHF:
1873 {
1874 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1875 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))
1876 {
1877 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
1878 return;
1879 }
1880
1881 /* Valid for both Intel and AMD. */
1882 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
1883 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
1884 break;
1885 }
1886
1887 case CPUMCPUIDFEATURE_PAT:
1888 {
1889 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1890 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
1891 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
1892 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
1893 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
1894 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAT\n"));
1895 break;
1896 }
1897
1898 /*
1899 * Set the RDTSCP support bit.
1900 * Assumes the caller knows what it's doing! (host must support this)
1901 */
1902 case CPUMCPUIDFEATURE_RDTSCP:
1903 {
1904 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
1905 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
1906 || pVM->cpum.s.u8PortableCpuIdLevel > 0)
1907 {
1908 if (!pVM->cpum.s.u8PortableCpuIdLevel)
1909 LogRel(("WARNING: Can't turn on RDTSCP when the host doesn't support it!!\n"));
1910 return;
1911 }
1912
1913 /* Valid for both Intel and AMD. */
1914 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
1915 LogRel(("CPUMSetGuestCpuIdFeature: Enabled RDTSCP.\n"));
1916 break;
1917 }
1918
1919 /*
1920 * Set the Hypervisor Present bit in the standard feature mask.
1921 */
1922 case CPUMCPUIDFEATURE_HVP:
1923 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1924 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_HVP;
1925 LogRel(("CPUMSetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
1926 break;
1927
1928 default:
1929 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1930 break;
1931 }
1932 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1933 {
1934 PVMCPU pVCpu = &pVM->aCpus[i];
1935 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
1936 }
1937}
1938
1939
1940/**
1941 * Queries a CPUID feature bit.
1942 *
1943 * @returns boolean for feature presence
1944 * @param pVM Pointer to the VM.
1945 * @param enmFeature The feature to query.
1946 */
1947VMMDECL(bool) CPUMGetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1948{
1949 switch (enmFeature)
1950 {
1951 case CPUMCPUIDFEATURE_PAE:
1952 {
1953 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
1954 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
1955 break;
1956 }
1957
1958 case CPUMCPUIDFEATURE_NX:
1959 {
1960 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1961 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_NX);
1962 }
1963
1964 case CPUMCPUIDFEATURE_RDTSCP:
1965 {
1966 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1967 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
1968 break;
1969 }
1970
1971 case CPUMCPUIDFEATURE_LONG_MODE:
1972 {
1973 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
1974 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
1975 break;
1976 }
1977
1978 default:
1979 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
1980 break;
1981 }
1982 return false;
1983}
1984
1985
1986/**
1987 * Clears a CPUID feature bit.
1988 *
1989 * @param pVM Pointer to the VM.
1990 * @param enmFeature The feature to clear.
1991 */
1992VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
1993{
1994 switch (enmFeature)
1995 {
1996 /*
1997 * Set the APIC bit in both feature masks.
1998 */
1999 case CPUMCPUIDFEATURE_APIC:
2000 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2001 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
2002 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2003 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2004 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
2005 Log(("CPUMClearGuestCpuIdFeature: Disabled APIC\n"));
2006 break;
2007
2008 /*
2009 * Clear the x2APIC bit in the standard feature mask.
2010 */
2011 case CPUMCPUIDFEATURE_X2APIC:
2012 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2013 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
2014 LogRel(("CPUMClearGuestCpuIdFeature: Disabled x2APIC\n"));
2015 break;
2016
2017 case CPUMCPUIDFEATURE_PAE:
2018 {
2019 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2020 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
2021 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2022 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2023 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
2024 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n"));
2025 break;
2026 }
2027
2028 case CPUMCPUIDFEATURE_PAT:
2029 {
2030 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2031 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
2032 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
2033 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
2034 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
2035 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAT!\n"));
2036 break;
2037 }
2038
2039 case CPUMCPUIDFEATURE_LONG_MODE:
2040 {
2041 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2042 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
2043 break;
2044 }
2045
2046 case CPUMCPUIDFEATURE_LAHF:
2047 {
2048 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2049 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
2050 break;
2051 }
2052
2053 case CPUMCPUIDFEATURE_RDTSCP:
2054 {
2055 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
2056 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
2057 LogRel(("CPUMClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
2058 break;
2059 }
2060
2061 case CPUMCPUIDFEATURE_HVP:
2062 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
2063 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_HVP;
2064 break;
2065
2066 default:
2067 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
2068 break;
2069 }
2070 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2071 {
2072 PVMCPU pVCpu = &pVM->aCpus[i];
2073 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
2074 }
2075}
2076
2077
2078/**
2079 * Gets the host CPU vendor.
2080 *
2081 * @returns CPU vendor.
2082 * @param pVM Pointer to the VM.
2083 */
2084VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
2085{
2086 return pVM->cpum.s.enmHostCpuVendor;
2087}
2088
2089
2090/**
2091 * Gets the CPU vendor.
2092 *
2093 * @returns CPU vendor.
2094 * @param pVM Pointer to the VM.
2095 */
2096VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
2097{
2098 return pVM->cpum.s.enmGuestCpuVendor;
2099}
2100
2101
2102VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
2103{
2104 pVCpu->cpum.s.Guest.dr[0] = uDr0;
2105 return CPUMRecalcHyperDRx(pVCpu);
2106}
2107
2108
2109VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
2110{
2111 pVCpu->cpum.s.Guest.dr[1] = uDr1;
2112 return CPUMRecalcHyperDRx(pVCpu);
2113}
2114
2115
2116VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
2117{
2118 pVCpu->cpum.s.Guest.dr[2] = uDr2;
2119 return CPUMRecalcHyperDRx(pVCpu);
2120}
2121
2122
2123VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
2124{
2125 pVCpu->cpum.s.Guest.dr[3] = uDr3;
2126 return CPUMRecalcHyperDRx(pVCpu);
2127}
2128
2129
2130VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
2131{
2132 pVCpu->cpum.s.Guest.dr[6] = uDr6;
2133 return CPUMRecalcHyperDRx(pVCpu);
2134}
2135
2136
2137VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
2138{
2139 pVCpu->cpum.s.Guest.dr[7] = uDr7;
2140 return CPUMRecalcHyperDRx(pVCpu);
2141}
2142
2143
2144VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
2145{
2146 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
2147 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
2148 if (iReg == 4 || iReg == 5)
2149 iReg += 2;
2150 pVCpu->cpum.s.Guest.dr[iReg] = Value;
2151 return CPUMRecalcHyperDRx(pVCpu);
2152}
2153
2154
2155/**
2156 * Recalculates the hypervisor DRx register values based on
2157 * current guest registers and DBGF breakpoints.
2158 *
2159 * This is called whenever a guest DRx register is modified and when DBGF
2160 * sets a hardware breakpoint. In guest context this function will reload
2161 * any (hyper) DRx registers which comes out with a different value.
2162 *
2163 * @returns VINF_SUCCESS.
2164 * @param pVCpu Pointer to the VMCPU.
2165 */
2166VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu)
2167{
2168 PVM pVM = pVCpu->CTX_SUFF(pVM);
2169
2170 /*
2171 * Compare the DR7s first.
2172 *
2173 * We only care about the enabled flags. The GE and LE flags are always
2174 * set and we don't care if the guest doesn't set them. GD is virtualized
2175 * when we dispatch #DB, we never enable it.
2176 */
2177 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
2178#ifdef CPUM_VIRTUALIZE_DRX
2179 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
2180#else
2181 const RTGCUINTREG uGstDr7 = 0;
2182#endif
2183 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
2184 {
2185 /*
2186 * Ok, something is enabled. Recalc each of the breakpoints.
2187 * Straight forward code, not optimized/minimized in any way.
2188 */
2189 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_MB1_MASK;
2190
2191 /* bp 0 */
2192 RTGCUINTREG uNewDr0;
2193 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
2194 {
2195 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
2196 uNewDr0 = DBGFBpGetDR0(pVM);
2197 }
2198 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
2199 {
2200 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
2201 uNewDr0 = CPUMGetGuestDR0(pVCpu);
2202 }
2203 else
2204 uNewDr0 = pVCpu->cpum.s.Hyper.dr[0];
2205
2206 /* bp 1 */
2207 RTGCUINTREG uNewDr1;
2208 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
2209 {
2210 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
2211 uNewDr1 = DBGFBpGetDR1(pVM);
2212 }
2213 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
2214 {
2215 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
2216 uNewDr1 = CPUMGetGuestDR1(pVCpu);
2217 }
2218 else
2219 uNewDr1 = pVCpu->cpum.s.Hyper.dr[1];
2220
2221 /* bp 2 */
2222 RTGCUINTREG uNewDr2;
2223 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
2224 {
2225 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2226 uNewDr2 = DBGFBpGetDR2(pVM);
2227 }
2228 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
2229 {
2230 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
2231 uNewDr2 = CPUMGetGuestDR2(pVCpu);
2232 }
2233 else
2234 uNewDr2 = pVCpu->cpum.s.Hyper.dr[2];
2235
2236 /* bp 3 */
2237 RTGCUINTREG uNewDr3;
2238 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
2239 {
2240 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2241 uNewDr3 = DBGFBpGetDR3(pVM);
2242 }
2243 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
2244 {
2245 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
2246 uNewDr3 = CPUMGetGuestDR3(pVCpu);
2247 }
2248 else
2249 uNewDr3 = pVCpu->cpum.s.Hyper.dr[3];
2250
2251 /*
2252 * Apply the updates.
2253 */
2254#ifdef IN_RC
2255 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS))
2256 {
2257 /** @todo save host DBx registers. */
2258 }
2259#endif
2260 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
2261 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
2262 CPUMSetHyperDR3(pVCpu, uNewDr3);
2263 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
2264 CPUMSetHyperDR2(pVCpu, uNewDr2);
2265 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
2266 CPUMSetHyperDR1(pVCpu, uNewDr1);
2267 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
2268 CPUMSetHyperDR0(pVCpu, uNewDr0);
2269 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
2270 CPUMSetHyperDR7(pVCpu, uNewDr7);
2271 }
2272 else
2273 {
2274#ifdef IN_RC
2275 if (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)
2276 {
2277 /** @todo restore host DBx registers. */
2278 }
2279#endif
2280 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
2281 }
2282 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
2283 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
2284 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
2285 pVCpu->cpum.s.Hyper.dr[7]));
2286
2287 return VINF_SUCCESS;
2288}
2289
2290
2291/**
2292 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
2293 *
2294 * @returns true if in real mode, otherwise false.
2295 * @param pVCpu Pointer to the VMCPU.
2296 */
2297VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
2298{
2299 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
2300}
2301
2302
2303/**
2304 * Tests if the guest has the Page Size Extension enabled (PSE).
2305 *
2306 * @returns true if in real mode, otherwise false.
2307 * @param pVCpu Pointer to the VMCPU.
2308 */
2309VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
2310{
2311 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
2312 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
2313}
2314
2315
2316/**
2317 * Tests if the guest has the paging enabled (PG).
2318 *
2319 * @returns true if in real mode, otherwise false.
2320 * @param pVCpu Pointer to the VMCPU.
2321 */
2322VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
2323{
2324 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
2325}
2326
2327
2328/**
2329 * Tests if the guest has the paging enabled (PG).
2330 *
2331 * @returns true if in real mode, otherwise false.
2332 * @param pVCpu Pointer to the VMCPU.
2333 */
2334VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
2335{
2336 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
2337}
2338
2339
2340/**
2341 * Tests if the guest is running in real mode or not.
2342 *
2343 * @returns true if in real mode, otherwise false.
2344 * @param pVCpu Pointer to the VMCPU.
2345 */
2346VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
2347{
2348 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2349}
2350
2351
2352/**
2353 * Tests if the guest is running in real or virtual 8086 mode.
2354 *
2355 * @returns @c true if it is, @c false if not.
2356 * @param pVCpu Pointer to the VMCPU.
2357 */
2358VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
2359{
2360 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2361 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
2362}
2363
2364
2365/**
2366 * Tests if the guest is running in protected or not.
2367 *
2368 * @returns true if in protected mode, otherwise false.
2369 * @param pVCpu Pointer to the VMCPU.
2370 */
2371VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
2372{
2373 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
2374}
2375
2376
2377/**
2378 * Tests if the guest is running in paged protected or not.
2379 *
2380 * @returns true if in paged protected mode, otherwise false.
2381 * @param pVCpu Pointer to the VMCPU.
2382 */
2383VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
2384{
2385 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
2386}
2387
2388
2389/**
2390 * Tests if the guest is running in long mode or not.
2391 *
2392 * @returns true if in long mode, otherwise false.
2393 * @param pVCpu Pointer to the VMCPU.
2394 */
2395VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
2396{
2397 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
2398}
2399
2400
2401/**
2402 * Tests if the guest is running in PAE mode or not.
2403 *
2404 * @returns true if in PAE mode, otherwise false.
2405 * @param pVCpu Pointer to the VMCPU.
2406 */
2407VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
2408{
2409 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
2410 && (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
2411 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
2412}
2413
2414
2415/**
2416 * Tests if the guest is running in 64 bits mode or not.
2417 *
2418 * @returns true if in 64 bits protected mode, otherwise false.
2419 * @param pVCpu The current virtual CPU.
2420 */
2421VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
2422{
2423 if (!CPUMIsGuestInLongMode(pVCpu))
2424 return false;
2425 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2426 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
2427}
2428
2429
2430/**
2431 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
2432 * registers.
2433 *
2434 * @returns true if in 64 bits protected mode, otherwise false.
2435 * @param pCtx Pointer to the current guest CPU context.
2436 */
2437VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
2438{
2439 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
2440}
2441
2442#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2443/**
2444 *
2445 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
2446 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
2447 * @param pVCpu The current virtual CPU.
2448 */
2449VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
2450{
2451 return pVCpu->cpum.s.fRawEntered;
2452}
2453#endif
2454
2455
2456/**
2457 * Updates the EFLAGS while we're in raw-mode.
2458 *
2459 * @param pVCpu Pointer to the VMCPU.
2460 * @param fEfl The new EFLAGS value.
2461 */
2462VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
2463{
2464#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2465 if (pVCpu->cpum.s.fRawEntered)
2466 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), CPUMCTX2CORE(&pVCpu->cpum.s.Guest), fEfl);
2467 else
2468#endif
2469 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
2470}
2471
2472
2473/**
2474 * Gets the EFLAGS while we're in raw-mode.
2475 *
2476 * @returns The eflags.
2477 * @param pVCpu Pointer to the current virtual CPU.
2478 */
2479VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
2480{
2481#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2482 if (pVCpu->cpum.s.fRawEntered)
2483 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), CPUMCTX2CORE(&pVCpu->cpum.s.Guest));
2484#endif
2485 return pVCpu->cpum.s.Guest.eflags.u32;
2486}
2487
2488
2489/**
2490 * Sets the specified changed flags (CPUM_CHANGED_*).
2491 *
2492 * @param pVCpu Pointer to the current virtual CPU.
2493 */
2494VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedFlags)
2495{
2496 pVCpu->cpum.s.fChanged |= fChangedFlags;
2497}
2498
2499
2500/**
2501 * Checks if the CPU supports the FXSAVE and FXRSTOR instruction.
2502 * @returns true if supported.
2503 * @returns false if not supported.
2504 * @param pVM Pointer to the VM.
2505 */
2506VMMDECL(bool) CPUMSupportsFXSR(PVM pVM)
2507{
2508 return pVM->cpum.s.CPUFeatures.edx.u1FXSR != 0;
2509}
2510
2511
2512/**
2513 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2514 * @returns true if used.
2515 * @returns false if not used.
2516 * @param pVM Pointer to the VM.
2517 */
2518VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2519{
2520 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER) != 0;
2521}
2522
2523
2524/**
2525 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2526 * @returns true if used.
2527 * @returns false if not used.
2528 * @param pVM Pointer to the VM.
2529 */
2530VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2531{
2532 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL) != 0;
2533}
2534
2535#ifndef IN_RING3
2536
2537/**
2538 * Lazily sync in the FPU/XMM state.
2539 *
2540 * @returns VBox status code.
2541 * @param pVCpu Pointer to the VMCPU.
2542 */
2543VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2544{
2545 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2546}
2547
2548#endif /* !IN_RING3 */
2549
2550/**
2551 * Checks if we activated the FPU/XMM state of the guest OS.
2552 * @returns true if we did.
2553 * @returns false if not.
2554 * @param pVCpu Pointer to the VMCPU.
2555 */
2556VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2557{
2558 return (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
2559}
2560
2561
2562/**
2563 * Deactivate the FPU/XMM state of the guest OS.
2564 * @param pVCpu Pointer to the VMCPU.
2565 */
2566VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
2567{
2568 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
2569}
2570
2571
2572/**
2573 * Checks if the guest debug state is active.
2574 *
2575 * @returns boolean
2576 * @param pVM Pointer to the VM.
2577 */
2578VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2579{
2580 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS) != 0;
2581}
2582
2583/**
2584 * Checks if the hyper debug state is active.
2585 *
2586 * @returns boolean
2587 * @param pVM Pointer to the VM.
2588 */
2589VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2590{
2591 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HYPER) != 0;
2592}
2593
2594
2595/**
2596 * Mark the guest's debug state as inactive.
2597 *
2598 * @returns boolean
2599 * @param pVM Pointer to the VM.
2600 */
2601VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2602{
2603 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
2604}
2605
2606
2607/**
2608 * Mark the hypervisor's debug state as inactive.
2609 *
2610 * @returns boolean
2611 * @param pVM Pointer to the VM.
2612 */
2613VMMDECL(void) CPUMDeactivateHyperDebugState(PVMCPU pVCpu)
2614{
2615 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
2616}
2617
2618
2619/**
2620 * Get the current privilege level of the guest.
2621 *
2622 * @returns CPL
2623 * @param pVCpu Pointer to the current virtual CPU.
2624 */
2625VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
2626{
2627 /*
2628 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
2629 *
2630 * Note! We used to check CS.DPL here, assuming it was always equal to
2631 * CPL even if a conforming segment was loaded. But this truned out to
2632 * only apply to older AMD-V. With VT-x we had an ACP2 regression
2633 * during install after a far call to ring 2 with VT-x. Then on newer
2634 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
2635 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
2636 *
2637 * So, forget CS.DPL, always use SS.DPL.
2638 *
2639 * Note! The SS RPL is always equal to the CPL, while the CS RPL
2640 * isn't necessarily equal if the segment is conforming.
2641 * See section 4.11.1 in the AMD manual.
2642 */
2643 uint32_t uCpl;
2644 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2645 {
2646 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2647 {
2648 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
2649 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
2650 else
2651 {
2652 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
2653#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2654 if (uCpl == 1)
2655 uCpl = 0;
2656#endif
2657 }
2658 }
2659 else
2660 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2661 }
2662 else
2663 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
2664 return uCpl;
2665}
2666
2667
2668/**
2669 * Gets the current guest CPU mode.
2670 *
2671 * If paging mode is what you need, check out PGMGetGuestMode().
2672 *
2673 * @returns The CPU mode.
2674 * @param pVCpu Pointer to the VMCPU.
2675 */
2676VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2677{
2678 CPUMMODE enmMode;
2679 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2680 enmMode = CPUMMODE_REAL;
2681 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2682 enmMode = CPUMMODE_PROTECTED;
2683 else
2684 enmMode = CPUMMODE_LONG;
2685
2686 return enmMode;
2687}
2688
2689
2690/**
2691 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
2692 *
2693 * @returns 16, 32 or 64.
2694 * @param pVCpu The current virtual CPU.
2695 */
2696VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
2697{
2698 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2699 return 16;
2700
2701 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2702 {
2703 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2704 return 16;
2705 }
2706
2707 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2708 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2709 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2710 return 64;
2711
2712 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2713 return 32;
2714
2715 return 16;
2716}
2717
2718
2719VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
2720{
2721 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2722 return DISCPUMODE_16BIT;
2723
2724 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2725 {
2726 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2727 return DISCPUMODE_16BIT;
2728 }
2729
2730 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2731 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2732 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2733 return DISCPUMODE_64BIT;
2734
2735 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2736 return DISCPUMODE_32BIT;
2737
2738 return DISCPUMODE_16BIT;
2739}
2740
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use