VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp

Last change on this file was 109020, checked in by vboxsync, 2 weeks ago

VMM,SUP,VBoxCpuReport: Port of VBoxCpuReport to ARM. jiraref:VBP-1598

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 313.2 KB
Line 
1/* $Id: CPUMR3CpuId.cpp 109020 2025-04-17 23:37:08Z vboxsync $ */
2/** @file
3 * CPUM - CPU ID part.
4 */
5
6/*
7 * Copyright (C) 2013-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_CPUM
33#include <VBox/vmm/cpum.h>
34#include <VBox/vmm/dbgf.h>
35#include <VBox/vmm/hm.h>
36#include <VBox/vmm/nem.h>
37#include <VBox/vmm/ssm.h>
38#include "CPUMInternal.h"
39#include <VBox/vmm/vmcc.h>
40#include <VBox/sup.h>
41
42#include <VBox/err.h>
43#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
44# include <iprt/asm-amd64-x86.h>
45#endif
46#include <iprt/ctype.h>
47#include <iprt/mem.h>
48#include <iprt/string.h>
49#include <iprt/x86-helpers.h>
50
51
52/*********************************************************************************************************************************
53* Defined Constants And Macros *
54*********************************************************************************************************************************/
55/** For sanity and avoid wasting hyper heap on buggy config / saved state. */
56#define CPUM_CPUID_MAX_LEAVES 2048
57
58
59#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
60/**
61 * Determins the host CPU MXCSR mask.
62 *
63 * @returns MXCSR mask.
64 */
65VMMR3DECL(uint32_t) CPUMR3DeterminHostMxCsrMask(void)
66{
67 if ( ASMHasCpuId()
68 && RTX86IsValidStdRange(ASMCpuId_EAX(0))
69 && ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_FXSR)
70 {
71 uint8_t volatile abBuf[sizeof(X86FXSTATE) + 64];
72 PX86FXSTATE pState = (PX86FXSTATE)&abBuf[64 - ((uintptr_t)&abBuf[0] & 63)];
73 RT_ZERO(*pState);
74 ASMFxSave(pState);
75 if (pState->MXCSR_MASK == 0)
76 return 0xffbf;
77 return pState->MXCSR_MASK;
78 }
79 return 0;
80}
81#endif
82
83
84
85#ifndef IN_VBOX_CPU_REPORT
86/**
87 * Gets a matching leaf in the CPUID leaf array, converted to a CPUMCPUID.
88 *
89 * @returns true if found, false it not.
90 * @param paLeaves The CPUID leaves to search. This is sorted.
91 * @param cLeaves The number of leaves in the array.
92 * @param uLeaf The leaf to locate.
93 * @param uSubLeaf The subleaf to locate. Pass 0 if no sub-leaves.
94 * @param pLegacy The legacy output leaf.
95 */
96static bool cpumR3CpuIdGetLeafLegacy(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf,
97 PCPUMCPUID pLegacy)
98{
99 PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, uLeaf, uSubLeaf);
100 if (pLeaf)
101 {
102 pLegacy->uEax = pLeaf->uEax;
103 pLegacy->uEbx = pLeaf->uEbx;
104 pLegacy->uEcx = pLeaf->uEcx;
105 pLegacy->uEdx = pLeaf->uEdx;
106 return true;
107 }
108 return false;
109}
110#endif /* !IN_VBOX_CPU_REPORT */
111
112
113/**
114 * Inserts a CPU ID leaf, replacing any existing ones.
115 *
116 * When inserting a simple leaf where we already got a series of sub-leaves with
117 * the same leaf number (eax), the simple leaf will replace the whole series.
118 *
119 * When pVM is NULL, this ASSUMES that the leaves array is still on the normal
120 * host-context heap and has only been allocated/reallocated by the
121 * cpumCpuIdEnsureSpace function.
122 *
123 * @returns VBox status code.
124 * @param pVM The cross context VM structure. If NULL, use
125 * the process heap, otherwise the VM's hyper heap.
126 * @param ppaLeaves Pointer to the pointer to the array of sorted
127 * CPUID leaves and sub-leaves. Must be NULL if using
128 * the hyper heap.
129 * @param pcLeaves Where we keep the leaf count for *ppaLeaves. Must
130 * be NULL if using the hyper heap.
131 * @param pNewLeaf Pointer to the data of the new leaf we're about to
132 * insert.
133 */
134static int cpumR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves, PCCPUMCPUIDLEAF pNewLeaf)
135{
136 /*
137 * Validate input parameters if we are using the hyper heap and use the VM's CPUID arrays.
138 */
139 if (pVM)
140 {
141 AssertReturn(!ppaLeaves, VERR_INVALID_PARAMETER);
142 AssertReturn(!pcLeaves, VERR_INVALID_PARAMETER);
143 AssertReturn(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3 == pVM->cpum.s.GuestInfo.aCpuIdLeaves, VERR_INVALID_PARAMETER);
144
145 ppaLeaves = &pVM->cpum.s.GuestInfo.paCpuIdLeavesR3;
146 pcLeaves = &pVM->cpum.s.GuestInfo.cCpuIdLeaves;
147 }
148
149 PCPUMCPUIDLEAF paLeaves = *ppaLeaves;
150 uint32_t cLeaves = *pcLeaves;
151
152 /*
153 * Validate the new leaf a little.
154 */
155 AssertLogRelMsgReturn(!(pNewLeaf->fFlags & ~CPUMCPUIDLEAF_F_VALID_MASK),
156 ("%#x/%#x: %#x", pNewLeaf->uLeaf, pNewLeaf->uSubLeaf, pNewLeaf->fFlags),
157 VERR_INVALID_FLAGS);
158 AssertLogRelMsgReturn(pNewLeaf->fSubLeafMask != 0 || pNewLeaf->uSubLeaf == 0,
159 ("%#x/%#x: %#x", pNewLeaf->uLeaf, pNewLeaf->uSubLeaf, pNewLeaf->fSubLeafMask),
160 VERR_INVALID_PARAMETER);
161 AssertLogRelMsgReturn(RT_IS_POWER_OF_TWO(pNewLeaf->fSubLeafMask + 1),
162 ("%#x/%#x: %#x", pNewLeaf->uLeaf, pNewLeaf->uSubLeaf, pNewLeaf->fSubLeafMask),
163 VERR_INVALID_PARAMETER);
164 AssertLogRelMsgReturn((pNewLeaf->fSubLeafMask & pNewLeaf->uSubLeaf) == pNewLeaf->uSubLeaf,
165 ("%#x/%#x: %#x", pNewLeaf->uLeaf, pNewLeaf->uSubLeaf, pNewLeaf->fSubLeafMask),
166 VERR_INVALID_PARAMETER);
167
168 /*
169 * Find insertion point. The lazy bird uses the same excuse as in
170 * cpumCpuIdGetLeaf(), but optimizes for linear insertion (saved state).
171 */
172 uint32_t i;
173 if ( cLeaves > 0
174 && paLeaves[cLeaves - 1].uLeaf < pNewLeaf->uLeaf)
175 {
176 /* Add at end. */
177 i = cLeaves;
178 }
179 else if ( cLeaves > 0
180 && paLeaves[cLeaves - 1].uLeaf == pNewLeaf->uLeaf)
181 {
182 /* Either replacing the last leaf or dealing with sub-leaves. Spool
183 back to the first sub-leaf to pretend we did the linear search. */
184 i = cLeaves - 1;
185 while ( i > 0
186 && paLeaves[i - 1].uLeaf == pNewLeaf->uLeaf)
187 i--;
188 }
189 else
190 {
191 /* Linear search from the start. */
192 i = 0;
193 while ( i < cLeaves
194 && paLeaves[i].uLeaf < pNewLeaf->uLeaf)
195 i++;
196 }
197 if ( i < cLeaves
198 && paLeaves[i].uLeaf == pNewLeaf->uLeaf)
199 {
200 if (paLeaves[i].fSubLeafMask != pNewLeaf->fSubLeafMask)
201 {
202 /*
203 * The sub-leaf mask differs, replace all existing leaves with the
204 * same leaf number.
205 */
206 uint32_t c = 1;
207 while ( i + c < cLeaves
208 && paLeaves[i + c].uLeaf == pNewLeaf->uLeaf)
209 c++;
210 if (c > 1 && i + c < cLeaves)
211 {
212 memmove(&paLeaves[i + c], &paLeaves[i + 1], (cLeaves - i - c) * sizeof(paLeaves[0]));
213 *pcLeaves = cLeaves -= c - 1;
214 }
215
216 paLeaves[i] = *pNewLeaf;
217#ifdef VBOX_STRICT
218 cpumCpuIdAssertOrder(*ppaLeaves, *pcLeaves);
219#endif
220 return VINF_SUCCESS;
221 }
222
223 /* Find sub-leaf insertion point. */
224 while ( i < cLeaves
225 && paLeaves[i].uSubLeaf < pNewLeaf->uSubLeaf
226 && paLeaves[i].uLeaf == pNewLeaf->uLeaf)
227 i++;
228
229 /*
230 * If we've got an exactly matching leaf, replace it.
231 */
232 if ( i < cLeaves
233 && paLeaves[i].uLeaf == pNewLeaf->uLeaf
234 && paLeaves[i].uSubLeaf == pNewLeaf->uSubLeaf)
235 {
236 paLeaves[i] = *pNewLeaf;
237#ifdef VBOX_STRICT
238 cpumCpuIdAssertOrder(*ppaLeaves, *pcLeaves);
239#endif
240 return VINF_SUCCESS;
241 }
242 }
243
244 /*
245 * Adding a new leaf at 'i'.
246 */
247 AssertLogRelReturn(cLeaves < CPUM_CPUID_MAX_LEAVES, VERR_TOO_MANY_CPUID_LEAVES);
248 paLeaves = cpumCpuIdEnsureSpace(pVM, ppaLeaves, cLeaves);
249 if (!paLeaves)
250 return VERR_NO_MEMORY;
251
252 if (i < cLeaves)
253 memmove(&paLeaves[i + 1], &paLeaves[i], (cLeaves - i) * sizeof(paLeaves[0]));
254 *pcLeaves += 1;
255 paLeaves[i] = *pNewLeaf;
256
257#ifdef VBOX_STRICT
258 cpumCpuIdAssertOrder(*ppaLeaves, *pcLeaves);
259#endif
260 return VINF_SUCCESS;
261}
262
263
264#ifndef IN_VBOX_CPU_REPORT
265/**
266 * Removes a range of CPUID leaves.
267 *
268 * This will not reallocate the array.
269 *
270 * @param paLeaves The array of sorted CPUID leaves and sub-leaves.
271 * @param pcLeaves Where we keep the leaf count for @a paLeaves.
272 * @param uFirst The first leaf.
273 * @param uLast The last leaf.
274 */
275static void cpumR3CpuIdRemoveRange(PCPUMCPUIDLEAF paLeaves, uint32_t *pcLeaves, uint32_t uFirst, uint32_t uLast)
276{
277 uint32_t cLeaves = *pcLeaves;
278
279 Assert(uFirst <= uLast);
280
281 /*
282 * Find the first one.
283 */
284 uint32_t iFirst = 0;
285 while ( iFirst < cLeaves
286 && paLeaves[iFirst].uLeaf < uFirst)
287 iFirst++;
288
289 /*
290 * Find the end (last + 1).
291 */
292 uint32_t iEnd = iFirst;
293 while ( iEnd < cLeaves
294 && paLeaves[iEnd].uLeaf <= uLast)
295 iEnd++;
296
297 /*
298 * Adjust the array if anything needs removing.
299 */
300 if (iFirst < iEnd)
301 {
302 if (iEnd < cLeaves)
303 memmove(&paLeaves[iFirst], &paLeaves[iEnd], (cLeaves - iEnd) * sizeof(paLeaves[0]));
304 *pcLeaves = cLeaves -= (iEnd - iFirst);
305 }
306
307# ifdef VBOX_STRICT
308 cpumCpuIdAssertOrder(paLeaves, *pcLeaves);
309# endif
310}
311#endif /* IN_VBOX_CPU_REPORT */
312
313
314/**
315 * Gets a CPU ID leaf.
316 *
317 * @returns VBox status code.
318 * @param pVM The cross context VM structure.
319 * @param pLeaf Where to store the found leaf.
320 * @param uLeaf The leaf to locate.
321 * @param uSubLeaf The subleaf to locate. Pass 0 if no sub-leaves.
322 */
323VMMR3DECL(int) CPUMR3CpuIdGetLeaf(PVM pVM, PCPUMCPUIDLEAF pLeaf, uint32_t uLeaf, uint32_t uSubLeaf)
324{
325 PCPUMCPUIDLEAF pcLeaf = cpumCpuIdGetLeafInt(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, pVM->cpum.s.GuestInfo.cCpuIdLeaves,
326 uLeaf, uSubLeaf);
327 if (pcLeaf)
328 {
329 memcpy(pLeaf, pcLeaf, sizeof(*pLeaf));
330 return VINF_SUCCESS;
331 }
332
333 return VERR_NOT_FOUND;
334}
335
336
337/**
338 * Gets all the leaves.
339 *
340 * This only works after the CPUID leaves have been initialized. The interface
341 * is intended for NEM and configuring CPUID leaves for the native hypervisor.
342 *
343 * @returns Pointer to the array of leaves. NULL on failure.
344 * @param pVM The cross context VM structure.
345 * @param pcLeaves Where to return the number of leaves.
346 */
347VMMR3_INT_DECL(PCCPUMCPUIDLEAF) CPUMR3CpuIdGetPtr(PVM pVM, uint32_t *pcLeaves)
348{
349 *pcLeaves = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
350 return pVM->cpum.s.GuestInfo.paCpuIdLeavesR3;
351}
352
353
354/**
355 * Inserts a CPU ID leaf, replacing any existing ones.
356 *
357 * @returns VBox status code.
358 * @param pVM The cross context VM structure.
359 * @param pNewLeaf Pointer to the leaf being inserted.
360 */
361VMMR3DECL(int) CPUMR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF pNewLeaf)
362{
363 /*
364 * Validate parameters.
365 */
366 AssertReturn(pVM, VERR_INVALID_PARAMETER);
367 AssertReturn(pNewLeaf, VERR_INVALID_PARAMETER);
368
369 /*
370 * Disallow replacing CPU ID leaves that this API currently cannot manage.
371 * These leaves have dependencies on saved-states, see PATMCpuidReplacement().
372 * If you want to modify these leaves, use CPUMSetGuestCpuIdFeature().
373 */
374 if ( pNewLeaf->uLeaf == UINT32_C(0x00000000) /* Standard */
375 || pNewLeaf->uLeaf == UINT32_C(0x00000001)
376 || pNewLeaf->uLeaf == UINT32_C(0x80000000) /* Extended */
377 || pNewLeaf->uLeaf == UINT32_C(0x80000001)
378 || pNewLeaf->uLeaf == UINT32_C(0xc0000000) /* Centaur */
379 || pNewLeaf->uLeaf == UINT32_C(0xc0000001) )
380 {
381 return VERR_NOT_SUPPORTED;
382 }
383
384 return cpumR3CpuIdInsert(pVM, NULL /* ppaLeaves */, NULL /* pcLeaves */, pNewLeaf);
385}
386
387
388#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
389/**
390 * Determines the method the CPU uses to handle unknown CPUID leaves.
391 *
392 * @returns VBox status code.
393 * @param penmUnknownMethod Where to return the method.
394 * @param pDefUnknown Where to return default unknown values. This
395 * will be set, even if the resulting method
396 * doesn't actually needs it.
397 */
398VMMR3DECL(int) CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUNKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown)
399{
400 uint32_t uLastStd = ASMCpuId_EAX(0);
401 uint32_t uLastExt = ASMCpuId_EAX(0x80000000);
402 if (!RTX86IsValidExtRange(uLastExt))
403 uLastExt = 0x80000000;
404
405 uint32_t auChecks[] =
406 {
407 uLastStd + 1,
408 uLastStd + 5,
409 uLastStd + 8,
410 uLastStd + 32,
411 uLastStd + 251,
412 uLastExt + 1,
413 uLastExt + 8,
414 uLastExt + 15,
415 uLastExt + 63,
416 uLastExt + 255,
417 0x7fbbffcc,
418 0x833f7872,
419 0xefff2353,
420 0x35779456,
421 0x1ef6d33e,
422 };
423
424 static const uint32_t s_auValues[] =
425 {
426 0xa95d2156,
427 0x00000001,
428 0x00000002,
429 0x00000008,
430 0x00000000,
431 0x55773399,
432 0x93401769,
433 0x12039587,
434 };
435
436 /*
437 * Simple method, all zeros.
438 */
439 *penmUnknownMethod = CPUMUNKNOWNCPUID_DEFAULTS;
440 pDefUnknown->uEax = 0;
441 pDefUnknown->uEbx = 0;
442 pDefUnknown->uEcx = 0;
443 pDefUnknown->uEdx = 0;
444
445 /*
446 * Intel has been observed returning the last standard leaf.
447 */
448 uint32_t auLast[4];
449 ASMCpuIdExSlow(uLastStd, 0, 0, 0, &auLast[0], &auLast[1], &auLast[2], &auLast[3]);
450
451 uint32_t cChecks = RT_ELEMENTS(auChecks);
452 while (cChecks > 0)
453 {
454 uint32_t auCur[4];
455 ASMCpuIdExSlow(auChecks[cChecks - 1], 0, 0, 0, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
456 if (memcmp(auCur, auLast, sizeof(auCur)))
457 break;
458 cChecks--;
459 }
460 if (cChecks == 0)
461 {
462 /* Now, what happens when the input changes? Esp. ECX. */
463 uint32_t cTotal = 0;
464 uint32_t cSame = 0;
465 uint32_t cLastWithEcx = 0;
466 uint32_t cNeither = 0;
467 uint32_t cValues = RT_ELEMENTS(s_auValues);
468 while (cValues > 0)
469 {
470 uint32_t uValue = s_auValues[cValues - 1];
471 uint32_t auLastWithEcx[4];
472 ASMCpuIdExSlow(uLastStd, uValue, uValue, uValue,
473 &auLastWithEcx[0], &auLastWithEcx[1], &auLastWithEcx[2], &auLastWithEcx[3]);
474
475 cChecks = RT_ELEMENTS(auChecks);
476 while (cChecks > 0)
477 {
478 uint32_t auCur[4];
479 ASMCpuIdExSlow(auChecks[cChecks - 1], uValue, uValue, uValue, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
480 if (!memcmp(auCur, auLast, sizeof(auCur)))
481 {
482 cSame++;
483 if (!memcmp(auCur, auLastWithEcx, sizeof(auCur)))
484 cLastWithEcx++;
485 }
486 else if (!memcmp(auCur, auLastWithEcx, sizeof(auCur)))
487 cLastWithEcx++;
488 else
489 cNeither++;
490 cTotal++;
491 cChecks--;
492 }
493 cValues--;
494 }
495
496 Log(("CPUM: cNeither=%d cSame=%d cLastWithEcx=%d cTotal=%d\n", cNeither, cSame, cLastWithEcx, cTotal));
497 if (cSame == cTotal)
498 *penmUnknownMethod = CPUMUNKNOWNCPUID_LAST_STD_LEAF;
499 else if (cLastWithEcx == cTotal)
500 *penmUnknownMethod = CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX;
501 else
502 *penmUnknownMethod = CPUMUNKNOWNCPUID_LAST_STD_LEAF;
503 pDefUnknown->uEax = auLast[0];
504 pDefUnknown->uEbx = auLast[1];
505 pDefUnknown->uEcx = auLast[2];
506 pDefUnknown->uEdx = auLast[3];
507 return VINF_SUCCESS;
508 }
509
510 /*
511 * Unchanged register values?
512 */
513 cChecks = RT_ELEMENTS(auChecks);
514 while (cChecks > 0)
515 {
516 uint32_t const uLeaf = auChecks[cChecks - 1];
517 uint32_t cValues = RT_ELEMENTS(s_auValues);
518 while (cValues > 0)
519 {
520 uint32_t uValue = s_auValues[cValues - 1];
521 uint32_t auCur[4];
522 ASMCpuIdExSlow(uLeaf, uValue, uValue, uValue, &auCur[0], &auCur[1], &auCur[2], &auCur[3]);
523 if ( auCur[0] != uLeaf
524 || auCur[1] != uValue
525 || auCur[2] != uValue
526 || auCur[3] != uValue)
527 break;
528 cValues--;
529 }
530 if (cValues != 0)
531 break;
532 cChecks--;
533 }
534 if (cChecks == 0)
535 {
536 *penmUnknownMethod = CPUMUNKNOWNCPUID_PASSTHRU;
537 return VINF_SUCCESS;
538 }
539
540 /*
541 * Just go with the simple method.
542 */
543 return VINF_SUCCESS;
544}
545#endif /* RT_ARCH_X86 || RT_ARCH_AMD64 */
546
547
548/**
549 * Translates a unknow CPUID leaf method into the constant name (sans prefix).
550 *
551 * @returns Read only name string.
552 * @param enmUnknownMethod The method to translate.
553 */
554VMMR3DECL(const char *) CPUMR3CpuIdUnknownLeafMethodName(CPUMUNKNOWNCPUID enmUnknownMethod)
555{
556 switch (enmUnknownMethod)
557 {
558 case CPUMUNKNOWNCPUID_DEFAULTS: return "DEFAULTS";
559 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: return "LAST_STD_LEAF";
560 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: return "LAST_STD_LEAF_WITH_ECX";
561 case CPUMUNKNOWNCPUID_PASSTHRU: return "PASSTHRU";
562
563 case CPUMUNKNOWNCPUID_INVALID:
564 case CPUMUNKNOWNCPUID_END:
565 case CPUMUNKNOWNCPUID_32BIT_HACK:
566 break;
567 }
568 return "Invalid-unknown-CPUID-method";
569}
570
571
572/*
573 *
574 * Init related code.
575 * Init related code.
576 * Init related code.
577 *
578 *
579 */
580#ifndef IN_VBOX_CPU_REPORT
581
582
583/**
584 * Gets an exactly matching leaf + sub-leaf in the CPUID leaf array.
585 *
586 * This ignores the fSubLeafMask.
587 *
588 * @returns Pointer to the matching leaf, or NULL if not found.
589 * @param pCpum The CPUM instance data.
590 * @param uLeaf The leaf to locate.
591 * @param uSubLeaf The subleaf to locate.
592 */
593static PCPUMCPUIDLEAF cpumR3CpuIdGetExactLeaf(PCPUM pCpum, uint32_t uLeaf, uint32_t uSubLeaf)
594{
595 uint64_t uNeedle = RT_MAKE_U64(uSubLeaf, uLeaf);
596 PCPUMCPUIDLEAF paLeaves = pCpum->GuestInfo.paCpuIdLeavesR3;
597 uint32_t iEnd = pCpum->GuestInfo.cCpuIdLeaves;
598 if (iEnd)
599 {
600 uint32_t iBegin = 0;
601 for (;;)
602 {
603 uint32_t const i = (iEnd - iBegin) / 2 + iBegin;
604 uint64_t const uCur = RT_MAKE_U64(paLeaves[i].uSubLeaf, paLeaves[i].uLeaf);
605 if (uNeedle < uCur)
606 {
607 if (i > iBegin)
608 iEnd = i;
609 else
610 break;
611 }
612 else if (uNeedle > uCur)
613 {
614 if (i + 1 < iEnd)
615 iBegin = i + 1;
616 else
617 break;
618 }
619 else
620 return &paLeaves[i];
621 }
622 }
623 return NULL;
624}
625
626
627/**
628 * Loads MSR range overrides.
629 *
630 * This must be called before the MSR ranges are moved from the normal heap to
631 * the hyper heap!
632 *
633 * @returns VBox status code (VMSetError called).
634 * @param pVM The cross context VM structure.
635 * @param pMsrNode The CFGM node with the MSR overrides.
636 */
637static int cpumR3LoadMsrOverrides(PVM pVM, PCFGMNODE pMsrNode)
638{
639 for (PCFGMNODE pNode = CFGMR3GetFirstChild(pMsrNode); pNode; pNode = CFGMR3GetNextChild(pNode))
640 {
641 /*
642 * Assemble a valid MSR range.
643 */
644 CPUMMSRRANGE MsrRange;
645 MsrRange.offCpumCpu = 0;
646 MsrRange.fReserved = 0;
647
648 int rc = CFGMR3GetName(pNode, MsrRange.szName, sizeof(MsrRange.szName));
649 if (RT_FAILURE(rc))
650 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry (name is probably too long): %Rrc\n", rc);
651
652 rc = CFGMR3QueryU32(pNode, "First", &MsrRange.uFirst);
653 if (RT_FAILURE(rc))
654 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying mandatory 'First' value: %Rrc\n",
655 MsrRange.szName, rc);
656
657 rc = CFGMR3QueryU32Def(pNode, "Last", &MsrRange.uLast, MsrRange.uFirst);
658 if (RT_FAILURE(rc))
659 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Last' value: %Rrc\n",
660 MsrRange.szName, rc);
661
662 char szType[32];
663 rc = CFGMR3QueryStringDef(pNode, "Type", szType, sizeof(szType), "FixedValue");
664 if (RT_FAILURE(rc))
665 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Type' value: %Rrc\n",
666 MsrRange.szName, rc);
667 if (!RTStrICmp(szType, "FixedValue"))
668 {
669 MsrRange.enmRdFn = kCpumMsrRdFn_FixedValue;
670 MsrRange.enmWrFn = kCpumMsrWrFn_IgnoreWrite;
671
672 rc = CFGMR3QueryU64Def(pNode, "Value", &MsrRange.uValue, 0);
673 if (RT_FAILURE(rc))
674 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Value' value: %Rrc\n",
675 MsrRange.szName, rc);
676
677 rc = CFGMR3QueryU64Def(pNode, "WrGpMask", &MsrRange.fWrGpMask, 0);
678 if (RT_FAILURE(rc))
679 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrGpMask' value: %Rrc\n",
680 MsrRange.szName, rc);
681
682 rc = CFGMR3QueryU64Def(pNode, "WrIgnMask", &MsrRange.fWrIgnMask, 0);
683 if (RT_FAILURE(rc))
684 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrIgnMask' value: %Rrc\n",
685 MsrRange.szName, rc);
686 }
687 else
688 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS,
689 "Invalid MSR entry '%s': Unknown type '%s'\n", MsrRange.szName, szType);
690
691 /*
692 * Insert the range into the table (replaces/splits/shrinks existing
693 * MSR ranges).
694 */
695 rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges,
696 &MsrRange);
697 if (RT_FAILURE(rc))
698 return VMSetError(pVM, rc, RT_SRC_POS, "Error adding MSR entry '%s': %Rrc\n", MsrRange.szName, rc);
699 }
700
701 return VINF_SUCCESS;
702}
703
704
705/**
706 * Loads CPUID leaf overrides.
707 *
708 * This must be called before the CPUID leaves are moved from the normal
709 * heap to the hyper heap!
710 *
711 * @returns VBox status code (VMSetError called).
712 * @param pVM The cross context VM structure.
713 * @param pParentNode The CFGM node with the CPUID leaves.
714 * @param pszLabel How to label the overrides we're loading.
715 */
716static int cpumR3LoadCpuIdOverrides(PVM pVM, PCFGMNODE pParentNode, const char *pszLabel)
717{
718 for (PCFGMNODE pNode = CFGMR3GetFirstChild(pParentNode); pNode; pNode = CFGMR3GetNextChild(pNode))
719 {
720 /*
721 * Get the leaf and subleaf numbers.
722 */
723 char szName[128];
724 int rc = CFGMR3GetName(pNode, szName, sizeof(szName));
725 if (RT_FAILURE(rc))
726 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry (name is probably too long): %Rrc\n", pszLabel, rc);
727
728 /* The leaf number is either specified directly or thru the node name. */
729 uint32_t uLeaf;
730 rc = CFGMR3QueryU32(pNode, "Leaf", &uLeaf);
731 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
732 {
733 rc = RTStrToUInt32Full(szName, 16, &uLeaf);
734 if (rc != VINF_SUCCESS)
735 return VMSetError(pVM, VERR_INVALID_NAME, RT_SRC_POS,
736 "Invalid %s entry: Invalid leaf number: '%s' \n", pszLabel, szName);
737 }
738 else if (RT_FAILURE(rc))
739 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'Leaf' value: %Rrc\n",
740 pszLabel, szName, rc);
741
742 uint32_t uSubLeaf;
743 rc = CFGMR3QueryU32Def(pNode, "SubLeaf", &uSubLeaf, 0);
744 if (RT_FAILURE(rc))
745 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeaf' value: %Rrc\n",
746 pszLabel, szName, rc);
747
748 uint32_t fSubLeafMask;
749 rc = CFGMR3QueryU32Def(pNode, "SubLeafMask", &fSubLeafMask, 0);
750 if (RT_FAILURE(rc))
751 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeafMask' value: %Rrc\n",
752 pszLabel, szName, rc);
753
754 /*
755 * Look up the specified leaf, since the output register values
756 * defaults to any existing values. This allows overriding a single
757 * register, without needing to know the other values.
758 */
759 PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, uLeaf, uSubLeaf);
760 CPUMCPUIDLEAF Leaf;
761 if (pLeaf)
762 Leaf = *pLeaf;
763 else
764 RT_ZERO(Leaf);
765 Leaf.uLeaf = uLeaf;
766 Leaf.uSubLeaf = uSubLeaf;
767 Leaf.fSubLeafMask = fSubLeafMask;
768
769 rc = CFGMR3QueryU32Def(pNode, "eax", &Leaf.uEax, Leaf.uEax);
770 if (RT_FAILURE(rc))
771 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'eax' value: %Rrc\n",
772 pszLabel, szName, rc);
773 rc = CFGMR3QueryU32Def(pNode, "ebx", &Leaf.uEbx, Leaf.uEbx);
774 if (RT_FAILURE(rc))
775 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ebx' value: %Rrc\n",
776 pszLabel, szName, rc);
777 rc = CFGMR3QueryU32Def(pNode, "ecx", &Leaf.uEcx, Leaf.uEcx);
778 if (RT_FAILURE(rc))
779 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ecx' value: %Rrc\n",
780 pszLabel, szName, rc);
781 rc = CFGMR3QueryU32Def(pNode, "edx", &Leaf.uEdx, Leaf.uEdx);
782 if (RT_FAILURE(rc))
783 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'edx' value: %Rrc\n",
784 pszLabel, szName, rc);
785
786 /*
787 * Insert the leaf into the table (replaces existing ones).
788 */
789 rc = cpumR3CpuIdInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, &pVM->cpum.s.GuestInfo.cCpuIdLeaves,
790 &Leaf);
791 if (RT_FAILURE(rc))
792 return VMSetError(pVM, rc, RT_SRC_POS, "Error adding CPUID leaf entry '%s': %Rrc\n", szName, rc);
793 }
794
795 return VINF_SUCCESS;
796}
797
798
799
800/**
801 * Fetches overrides for a CPUID leaf.
802 *
803 * @returns VBox status code.
804 * @param pLeaf The leaf to load the overrides into.
805 * @param pCfgNode The CFGM node containing the overrides
806 * (/CPUM/HostCPUID/ or /CPUM/CPUID/).
807 * @param iLeaf The CPUID leaf number.
808 */
809static int cpumR3CpuIdFetchLeafOverride(PCPUMCPUID pLeaf, PCFGMNODE pCfgNode, uint32_t iLeaf)
810{
811 PCFGMNODE pLeafNode = CFGMR3GetChildF(pCfgNode, "%RX32", iLeaf);
812 if (pLeafNode)
813 {
814 uint32_t u32;
815 int rc = CFGMR3QueryU32(pLeafNode, "eax", &u32);
816 if (RT_SUCCESS(rc))
817 pLeaf->uEax = u32;
818 else
819 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
820
821 rc = CFGMR3QueryU32(pLeafNode, "ebx", &u32);
822 if (RT_SUCCESS(rc))
823 pLeaf->uEbx = u32;
824 else
825 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
826
827 rc = CFGMR3QueryU32(pLeafNode, "ecx", &u32);
828 if (RT_SUCCESS(rc))
829 pLeaf->uEcx = u32;
830 else
831 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
832
833 rc = CFGMR3QueryU32(pLeafNode, "edx", &u32);
834 if (RT_SUCCESS(rc))
835 pLeaf->uEdx = u32;
836 else
837 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
838
839 }
840 return VINF_SUCCESS;
841}
842
843
844/**
845 * Load the overrides for a set of CPUID leaves.
846 *
847 * @returns VBox status code.
848 * @param paLeaves The leaf array.
849 * @param cLeaves The number of leaves.
850 * @param uStart The start leaf number.
851 * @param pCfgNode The CFGM node containing the overrides
852 * (/CPUM/HostCPUID/ or /CPUM/CPUID/).
853 */
854static int cpumR3CpuIdInitLoadOverrideSet(uint32_t uStart, PCPUMCPUID paLeaves, uint32_t cLeaves, PCFGMNODE pCfgNode)
855{
856 for (uint32_t i = 0; i < cLeaves; i++)
857 {
858 int rc = cpumR3CpuIdFetchLeafOverride(&paLeaves[i], pCfgNode, uStart + i);
859 if (RT_FAILURE(rc))
860 return rc;
861 }
862
863 return VINF_SUCCESS;
864}
865
866
867/**
868 * Installs the CPUID leaves and explods the data into structures like
869 * GuestFeatures and CPUMCTX::aoffXState.
870 *
871 * @returns VBox status code.
872 * @param pVM The cross context VM structure.
873 * @param pCpum The CPUM part of @a VM.
874 * @param paLeaves The leaves. These will be copied (but not freed).
875 * @param cLeaves The number of leaves.
876 * @param pMsrs The MSRs.
877 */
878static int cpumR3CpuIdInstallAndExplodeLeaves(PVM pVM, PCPUM pCpum, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs)
879{
880# ifdef VBOX_STRICT
881 cpumCpuIdAssertOrder(paLeaves, cLeaves);
882# endif
883
884 /*
885 * Install the CPUID information.
886 */
887 AssertLogRelMsgReturn(cLeaves <= RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves),
888 ("cLeaves=%u - max %u\n", cLeaves, RT_ELEMENTS(pVM->cpum.s.GuestInfo.aCpuIdLeaves)),
889 VERR_CPUM_IPE_1); /** @todo better status! */
890 if (paLeaves != pCpum->GuestInfo.aCpuIdLeaves)
891 memcpy(pCpum->GuestInfo.aCpuIdLeaves, paLeaves, cLeaves * sizeof(paLeaves[0]));
892 pCpum->GuestInfo.paCpuIdLeavesR3 = pCpum->GuestInfo.aCpuIdLeaves;
893 pCpum->GuestInfo.cCpuIdLeaves = cLeaves;
894
895 /*
896 * Update the default CPUID leaf if necessary.
897 */
898 switch (pCpum->GuestInfo.enmUnknownCpuIdMethod)
899 {
900 case CPUMUNKNOWNCPUID_LAST_STD_LEAF:
901 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX:
902 {
903 /* We don't use CPUID(0).eax here because of the NT hack that only
904 changes that value without actually removing any leaves. */
905 uint32_t i = 0;
906 if ( pCpum->GuestInfo.cCpuIdLeaves > 0
907 && pCpum->GuestInfo.paCpuIdLeavesR3[0].uLeaf <= UINT32_C(0xff))
908 {
909 while ( i + 1 < pCpum->GuestInfo.cCpuIdLeaves
910 && pCpum->GuestInfo.paCpuIdLeavesR3[i + 1].uLeaf <= UINT32_C(0xff))
911 i++;
912 pCpum->GuestInfo.DefCpuId.uEax = pCpum->GuestInfo.paCpuIdLeavesR3[i].uEax;
913 pCpum->GuestInfo.DefCpuId.uEbx = pCpum->GuestInfo.paCpuIdLeavesR3[i].uEbx;
914 pCpum->GuestInfo.DefCpuId.uEcx = pCpum->GuestInfo.paCpuIdLeavesR3[i].uEcx;
915 pCpum->GuestInfo.DefCpuId.uEdx = pCpum->GuestInfo.paCpuIdLeavesR3[i].uEdx;
916 }
917 break;
918 }
919 default:
920 break;
921 }
922
923 /*
924 * Explode the guest CPU features.
925 */
926 int rc = cpumCpuIdExplodeFeaturesX86(pCpum->GuestInfo.paCpuIdLeavesR3, pCpum->GuestInfo.cCpuIdLeaves, pMsrs,
927 &pCpum->GuestFeatures);
928 AssertLogRelRCReturn(rc, rc);
929
930 /*
931 * Adjust the scalable bus frequency according to the CPUID information
932 * we're now using.
933 */
934 if (CPUMMICROARCH_IS_INTEL_CORE7(pVM->cpum.s.GuestFeatures.enmMicroarch))
935 pCpum->GuestInfo.uScalableBusFreq = pCpum->GuestFeatures.enmMicroarch >= kCpumMicroarch_Intel_Core7_SandyBridge
936 ? UINT64_C(100000000) /* 100MHz */
937 : UINT64_C(133333333); /* 133MHz */
938
939 /*
940 * Populate the legacy arrays. Currently used for everything, later only
941 * for patch manager.
942 */
943 struct { PCPUMCPUID paCpuIds; uint32_t cCpuIds, uBase; } aOldRanges[] =
944 {
945 { pCpum->aGuestCpuIdPatmStd, RT_ELEMENTS(pCpum->aGuestCpuIdPatmStd), 0x00000000 },
946 { pCpum->aGuestCpuIdPatmExt, RT_ELEMENTS(pCpum->aGuestCpuIdPatmExt), 0x80000000 },
947 { pCpum->aGuestCpuIdPatmCentaur, RT_ELEMENTS(pCpum->aGuestCpuIdPatmCentaur), 0xc0000000 },
948 };
949 for (uint32_t i = 0; i < RT_ELEMENTS(aOldRanges); i++)
950 {
951 uint32_t cLeft = aOldRanges[i].cCpuIds;
952 uint32_t uLeaf = aOldRanges[i].uBase + cLeft;
953 PCPUMCPUID pLegacyLeaf = &aOldRanges[i].paCpuIds[cLeft];
954 while (cLeft-- > 0)
955 {
956 uLeaf--;
957 pLegacyLeaf--;
958
959 PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetExactLeaf(pCpum, uLeaf, 0 /* uSubLeaf */);
960 if (pLeaf)
961 {
962 pLegacyLeaf->uEax = pLeaf->uEax;
963 pLegacyLeaf->uEbx = pLeaf->uEbx;
964 pLegacyLeaf->uEcx = pLeaf->uEcx;
965 pLegacyLeaf->uEdx = pLeaf->uEdx;
966 }
967 else
968 *pLegacyLeaf = pCpum->GuestInfo.DefCpuId;
969 }
970 }
971
972 /*
973 * Configure XSAVE offsets according to the CPUID info and set the feature flags.
974 */
975 PVMCPU pVCpu0 = pVM->apCpusR3[0];
976 AssertCompile(sizeof(pVCpu0->cpum.s.Guest.abXState) == CPUM_MAX_XSAVE_AREA_SIZE);
977 memset(&pVCpu0->cpum.s.Guest.aoffXState[0], 0xff, sizeof(pVCpu0->cpum.s.Guest.aoffXState));
978 pVCpu0->cpum.s.Guest.aoffXState[XSAVE_C_X87_BIT] = 0;
979 pVCpu0->cpum.s.Guest.aoffXState[XSAVE_C_SSE_BIT] = 0;
980 for (uint32_t iComponent = XSAVE_C_SSE_BIT + 1; iComponent < 63; iComponent++)
981 if (pCpum->fXStateGuestMask & RT_BIT_64(iComponent))
982 {
983 PCPUMCPUIDLEAF pSubLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 0xd, iComponent);
984 AssertLogRelMsgReturn(pSubLeaf, ("iComponent=%#x\n", iComponent), VERR_CPUM_IPE_1);
985 AssertLogRelMsgReturn(pSubLeaf->fSubLeafMask >= iComponent, ("iComponent=%#x\n", iComponent), VERR_CPUM_IPE_1);
986 AssertLogRelMsgReturn( pSubLeaf->uEax > 0
987 && pSubLeaf->uEbx >= CPUM_MIN_XSAVE_AREA_SIZE
988 && pSubLeaf->uEax <= pCpum->GuestFeatures.cbMaxExtendedState
989 && pSubLeaf->uEbx <= pCpum->GuestFeatures.cbMaxExtendedState
990 && pSubLeaf->uEbx + pSubLeaf->uEax <= pCpum->GuestFeatures.cbMaxExtendedState,
991 ("iComponent=%#x eax=%#x ebx=%#x cbMax=%#x\n", iComponent, pSubLeaf->uEax, pSubLeaf->uEbx,
992 pCpum->GuestFeatures.cbMaxExtendedState),
993 VERR_CPUM_IPE_1);
994 pVCpu0->cpum.s.Guest.aoffXState[iComponent] = pSubLeaf->uEbx;
995 }
996
997 /* Copy the CPU #0 data to the other CPUs. */
998 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
999 {
1000 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1001 memcpy(&pVCpu->cpum.s.Guest.aoffXState[0], &pVCpu0->cpum.s.Guest.aoffXState[0], sizeof(pVCpu0->cpum.s.Guest.aoffXState));
1002 }
1003
1004 return VINF_SUCCESS;
1005}
1006
1007
1008/** @name Instruction Set Extension Options
1009 * @{ */
1010/** Configuration option type (extended boolean, really). */
1011typedef uint8_t CPUMISAEXTCFG;
1012/** Always disable the extension. */
1013#define CPUMISAEXTCFG_DISABLED false
1014/** Enable the extension if it's supported by the host CPU. */
1015#define CPUMISAEXTCFG_ENABLED_SUPPORTED true
1016/** Enable the extension if it's supported by the host CPU or when on ARM64. */
1017#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
1018# define CPUMISAEXTCFG_ENABLED_SUPPORTED_OR_NOT_AMD64 CPUMISAEXTCFG_ENABLED_SUPPORTED
1019#else
1020# define CPUMISAEXTCFG_ENABLED_SUPPORTED_OR_NOT_AMD64 CPUMISAEXTCFG_ENABLED_ALWAYS
1021#endif
1022/** Enable the extension if it's supported by the host CPU, but don't let
1023 * the portable CPUID feature disable it. */
1024#define CPUMISAEXTCFG_ENABLED_PORTABLE UINT8_C(127)
1025/** Always enable the extension. */
1026#define CPUMISAEXTCFG_ENABLED_ALWAYS UINT8_C(255)
1027/** @} */
1028
1029/**
1030 * CPUID Configuration (from CFGM).
1031 *
1032 * @remarks The members aren't document since we would only be duplicating the
1033 * \@cfgm entries in cpumR3CpuIdReadConfig.
1034 */
1035typedef struct CPUMCPUIDCONFIG
1036{
1037 bool fNt4LeafLimit;
1038 bool fInvariantTsc;
1039 bool fInvariantApic;
1040 bool fForceVme;
1041 bool fNestedHWVirt;
1042 bool fSpecCtrl;
1043
1044 CPUMISAEXTCFG enmCmpXchg16b;
1045 CPUMISAEXTCFG enmMonitor;
1046 CPUMISAEXTCFG enmMWaitExtensions;
1047 CPUMISAEXTCFG enmSse41;
1048 CPUMISAEXTCFG enmSse42;
1049 CPUMISAEXTCFG enmAvx;
1050 CPUMISAEXTCFG enmAvx2;
1051 CPUMISAEXTCFG enmXSave;
1052 CPUMISAEXTCFG enmAesNi;
1053 CPUMISAEXTCFG enmPClMul;
1054 CPUMISAEXTCFG enmPopCnt;
1055 CPUMISAEXTCFG enmMovBe;
1056 CPUMISAEXTCFG enmRdRand;
1057 CPUMISAEXTCFG enmRdSeed;
1058 CPUMISAEXTCFG enmSha;
1059 CPUMISAEXTCFG enmAdx;
1060 CPUMISAEXTCFG enmCLFlushOpt;
1061 CPUMISAEXTCFG enmFsGsBase;
1062 CPUMISAEXTCFG enmPcid;
1063 CPUMISAEXTCFG enmInvpcid;
1064 CPUMISAEXTCFG enmFlushCmdMsr;
1065 CPUMISAEXTCFG enmMdsClear;
1066 CPUMISAEXTCFG enmArchCapMsr;
1067 CPUMISAEXTCFG enmFma;
1068 CPUMISAEXTCFG enmF16c;
1069 CPUMISAEXTCFG enmMcdtNo;
1070 CPUMISAEXTCFG enmMonitorMitgNo;
1071
1072 CPUMISAEXTCFG enmAbm;
1073 CPUMISAEXTCFG enmSse4A;
1074 CPUMISAEXTCFG enmMisAlnSse;
1075 CPUMISAEXTCFG enm3dNowPrf;
1076 CPUMISAEXTCFG enmAmdExtMmx;
1077
1078 uint32_t uMaxStdLeaf;
1079 uint32_t uMaxExtLeaf;
1080 uint32_t uMaxCentaurLeaf;
1081 uint32_t uMaxIntelFamilyModelStep;
1082 char szCpuName[128];
1083} CPUMCPUIDCONFIG;
1084/** Pointer to CPUID config (from CFGM). */
1085typedef CPUMCPUIDCONFIG *PCPUMCPUIDCONFIG;
1086
1087
1088/**
1089 * Mini CPU selection support for making Mac OS X happy.
1090 *
1091 * Executes the /CPUM/MaxIntelFamilyModelStep config.
1092 *
1093 * @param pCpum The CPUM instance data.
1094 * @param pConfig The CPUID configuration we've read from CFGM.
1095 */
1096static void cpumR3CpuIdLimitIntelFamModStep(PCPUM pCpum, PCPUMCPUIDCONFIG pConfig)
1097{
1098 if (pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1099 {
1100 PCPUMCPUIDLEAF pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0);
1101 uint32_t uCurIntelFamilyModelStep = RT_MAKE_U32_FROM_U8(RTX86GetCpuStepping(pStdFeatureLeaf->uEax),
1102 RTX86GetCpuModelIntel(pStdFeatureLeaf->uEax),
1103 RTX86GetCpuFamily(pStdFeatureLeaf->uEax),
1104 0);
1105 uint32_t uMaxIntelFamilyModelStep = pConfig->uMaxIntelFamilyModelStep;
1106 if (pConfig->uMaxIntelFamilyModelStep < uCurIntelFamilyModelStep)
1107 {
1108 uint32_t uNew = pStdFeatureLeaf->uEax & UINT32_C(0xf0003000);
1109 uNew |= RT_BYTE1(uMaxIntelFamilyModelStep) & 0xf; /* stepping */
1110 uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) & 0xf) << 4; /* 4 low model bits */
1111 uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) >> 4) << 16; /* 4 high model bits */
1112 uNew |= (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf) << 8; /* 4 low family bits */
1113 if (RT_BYTE3(uMaxIntelFamilyModelStep) > 0xf) /* 8 high family bits, using intel's suggested calculation. */
1114 uNew |= ( (RT_BYTE3(uMaxIntelFamilyModelStep) - (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf)) & 0xff ) << 20;
1115 LogRel(("CPU: CPUID(0).EAX %#x -> %#x (uMaxIntelFamilyModelStep=%#x, uCurIntelFamilyModelStep=%#x\n",
1116 pStdFeatureLeaf->uEax, uNew, uMaxIntelFamilyModelStep, uCurIntelFamilyModelStep));
1117 pStdFeatureLeaf->uEax = uNew;
1118 }
1119 }
1120}
1121
1122
1123
1124/**
1125 * Limit it the number of entries, zapping the remainder.
1126 *
1127 * The limits are masking off stuff about power saving and similar, this
1128 * is perhaps a bit crudely done as there is probably some relatively harmless
1129 * info too in these leaves (like words about having a constant TSC).
1130 *
1131 * @param pCpum The CPUM instance data.
1132 * @param pConfig The CPUID configuration we've read from CFGM.
1133 */
1134static void cpumR3CpuIdLimitLeaves(PCPUM pCpum, PCPUMCPUIDCONFIG pConfig)
1135{
1136 /*
1137 * Standard leaves.
1138 */
1139 uint32_t uSubLeaf = 0;
1140 PCPUMCPUIDLEAF pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 0, uSubLeaf);
1141 if (pCurLeaf)
1142 {
1143 uint32_t uLimit = pCurLeaf->uEax;
1144 if (uLimit <= UINT32_C(0x000fffff))
1145 {
1146 if (uLimit > pConfig->uMaxStdLeaf)
1147 {
1148 pCurLeaf->uEax = uLimit = pConfig->uMaxStdLeaf;
1149 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
1150 uLimit + 1, UINT32_C(0x000fffff));
1151 }
1152
1153 /* NT4 hack, no zapping of extra leaves here. */
1154 if (pConfig->fNt4LeafLimit && uLimit > 3)
1155 pCurLeaf->uEax = uLimit = 3;
1156
1157 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x00000000), ++uSubLeaf)) != NULL)
1158 pCurLeaf->uEax = uLimit;
1159 }
1160 else
1161 {
1162 LogRel(("CPUID: Invalid standard range: %#x\n", uLimit));
1163 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
1164 UINT32_C(0x00000000), UINT32_C(0x0fffffff));
1165 }
1166 }
1167
1168 /*
1169 * Extended leaves.
1170 */
1171 uSubLeaf = 0;
1172 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000000), uSubLeaf);
1173 if (pCurLeaf)
1174 {
1175 uint32_t uLimit = pCurLeaf->uEax;
1176 if ( uLimit >= UINT32_C(0x80000000)
1177 && uLimit <= UINT32_C(0x800fffff))
1178 {
1179 if (uLimit > pConfig->uMaxExtLeaf)
1180 {
1181 pCurLeaf->uEax = uLimit = pConfig->uMaxExtLeaf;
1182 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
1183 uLimit + 1, UINT32_C(0x800fffff));
1184 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000000), ++uSubLeaf)) != NULL)
1185 pCurLeaf->uEax = uLimit;
1186 }
1187 }
1188 else
1189 {
1190 LogRel(("CPUID: Invalid extended range: %#x\n", uLimit));
1191 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
1192 UINT32_C(0x80000000), UINT32_C(0x8ffffffd));
1193 }
1194 }
1195
1196 /*
1197 * Centaur leaves (VIA).
1198 */
1199 uSubLeaf = 0;
1200 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0xc0000000), uSubLeaf);
1201 if (pCurLeaf)
1202 {
1203 uint32_t uLimit = pCurLeaf->uEax;
1204 if ( uLimit >= UINT32_C(0xc0000000)
1205 && uLimit <= UINT32_C(0xc00fffff))
1206 {
1207 if (uLimit > pConfig->uMaxCentaurLeaf)
1208 {
1209 pCurLeaf->uEax = uLimit = pConfig->uMaxCentaurLeaf;
1210 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
1211 uLimit + 1, UINT32_C(0xcfffffff));
1212 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0xc0000000), ++uSubLeaf)) != NULL)
1213 pCurLeaf->uEax = uLimit;
1214 }
1215 }
1216 else
1217 {
1218 LogRel(("CPUID: Invalid centaur range: %#x\n", uLimit));
1219 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
1220 UINT32_C(0xc0000000), UINT32_C(0xcfffffff));
1221 }
1222 }
1223}
1224
1225
1226/**
1227 * Clears a CPUID leaf and all sub-leaves (to zero).
1228 *
1229 * @param pCpum The CPUM instance data.
1230 * @param uLeaf The leaf to clear.
1231 */
1232static void cpumR3CpuIdZeroLeaf(PCPUM pCpum, uint32_t uLeaf)
1233{
1234 uint32_t uSubLeaf = 0;
1235 PCPUMCPUIDLEAF pCurLeaf;
1236 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, uLeaf, uSubLeaf)) != NULL)
1237 {
1238 pCurLeaf->uEax = 0;
1239 pCurLeaf->uEbx = 0;
1240 pCurLeaf->uEcx = 0;
1241 pCurLeaf->uEdx = 0;
1242 uSubLeaf++;
1243 }
1244}
1245
1246
1247/**
1248 * Used by cpumR3CpuIdSanitize to ensure that we don't have any sub-leaves for
1249 * the given leaf.
1250 *
1251 * @returns pLeaf.
1252 * @param pCpum The CPUM instance data.
1253 * @param pLeaf The leaf to ensure is alone with it's EAX input value.
1254 */
1255static PCPUMCPUIDLEAF cpumR3CpuIdMakeSingleLeaf(PCPUM pCpum, PCPUMCPUIDLEAF pLeaf)
1256{
1257 Assert((uintptr_t)(pLeaf - pCpum->GuestInfo.paCpuIdLeavesR3) < pCpum->GuestInfo.cCpuIdLeaves);
1258 if (pLeaf->fSubLeafMask != 0)
1259 {
1260 /*
1261 * Figure out how many sub-leaves in need of removal (we'll keep the first).
1262 * Log everything while we're at it.
1263 */
1264 LogRel(("CPUM:\n"
1265 "CPUM: Unexpected CPUID sub-leaves for leaf %#x; fSubLeafMask=%#x\n", pLeaf->uLeaf, pLeaf->fSubLeafMask));
1266 PCPUMCPUIDLEAF pLast = &pCpum->GuestInfo.paCpuIdLeavesR3[pCpum->GuestInfo.cCpuIdLeaves - 1];
1267 PCPUMCPUIDLEAF pSubLeaf = pLeaf;
1268 for (;;)
1269 {
1270 LogRel(("CPUM: %08x/%08x: %08x %08x %08x %08x; flags=%#x mask=%#x\n",
1271 pSubLeaf->uLeaf, pSubLeaf->uSubLeaf,
1272 pSubLeaf->uEax, pSubLeaf->uEbx, pSubLeaf->uEcx, pSubLeaf->uEdx,
1273 pSubLeaf->fFlags, pSubLeaf->fSubLeafMask));
1274 if (pSubLeaf == pLast || pSubLeaf[1].uLeaf != pLeaf->uLeaf)
1275 break;
1276 pSubLeaf++;
1277 }
1278 LogRel(("CPUM:\n"));
1279
1280 /*
1281 * Remove the offending sub-leaves.
1282 */
1283 if (pSubLeaf != pLeaf)
1284 {
1285 if (pSubLeaf != pLast)
1286 memmove(pLeaf + 1, pSubLeaf + 1, (uintptr_t)pLast - (uintptr_t)pSubLeaf);
1287 pCpum->GuestInfo.cCpuIdLeaves -= (uint32_t)(pSubLeaf - pLeaf);
1288 }
1289
1290 /*
1291 * Convert the first sub-leaf into a single leaf.
1292 */
1293 pLeaf->uSubLeaf = 0;
1294 pLeaf->fSubLeafMask = 0;
1295 }
1296 return pLeaf;
1297}
1298
1299
1300/**
1301 * Sanitizes and adjust the CPUID leaves.
1302 *
1303 * Drop features that aren't virtualized (or virtualizable). Adjust information
1304 * and capabilities to fit the virtualized hardware. Remove information the
1305 * guest shouldn't have (because it's wrong in the virtual world or because it
1306 * gives away host details) or that we don't have documentation for and no idea
1307 * what means.
1308 *
1309 * @returns VBox status code.
1310 * @param pVM The cross context VM structure (for cCpus).
1311 * @param pCpum The CPUM instance data.
1312 * @param pConfig The CPUID configuration we've read from CFGM.
1313 */
1314static int cpumR3CpuIdSanitize(PVM pVM, PCPUM pCpum, PCPUMCPUIDCONFIG pConfig)
1315{
1316#define PORTABLE_CLEAR_BITS_WHEN(Lvl, a_pLeafReg, FeatNm, fMask, uValue) \
1317 if ( pCpum->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fMask)) == (uValue) ) \
1318 { \
1319 LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: %#x -> 0\n", (a_pLeafReg) & (fMask))); \
1320 (a_pLeafReg) &= ~(uint32_t)(fMask); \
1321 }
1322#define PORTABLE_DISABLE_FEATURE_BIT(Lvl, a_pLeafReg, FeatNm, fBitMask) \
1323 if ( pCpum->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fBitMask)) ) \
1324 { \
1325 LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: 1 -> 0\n")); \
1326 (a_pLeafReg) &= ~(uint32_t)(fBitMask); \
1327 }
1328#define PORTABLE_DISABLE_FEATURE_BIT_CFG(Lvl, a_pLeafReg, FeatNm, fBitMask, enmConfig) \
1329 if ( pCpum->u8PortableCpuIdLevel >= (Lvl) \
1330 && ((a_pLeafReg) & (fBitMask)) \
1331 && (enmConfig) != CPUMISAEXTCFG_ENABLED_PORTABLE ) \
1332 { \
1333 LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: 1 -> 0\n")); \
1334 (a_pLeafReg) &= ~(uint32_t)(fBitMask); \
1335 }
1336 Assert(pCpum->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_INVALID);
1337
1338 /* The CPUID entries we start with here isn't necessarily the ones of the host, so we
1339 must consult HostFeatures when processing CPUMISAEXTCFG variables. */
1340#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
1341 PCCPUMFEATURES const pHstFeat = &pCpum->HostFeatures.s;
1342#else
1343 PCCPUMFEATURES const pHstFeat = &pCpum->GuestFeatures;
1344#endif
1345#define PASSTHRU_FEATURE(enmConfig, fHostFeature, fConst) \
1346 ((enmConfig) && ((enmConfig) == CPUMISAEXTCFG_ENABLED_ALWAYS || (fHostFeature)) ? (fConst) : 0)
1347#define PASSTHRU_FEATURE_EX(enmConfig, fHostFeature, fAndExpr, fConst) \
1348 ((enmConfig) && ((enmConfig) == CPUMISAEXTCFG_ENABLED_ALWAYS || (fHostFeature)) && (fAndExpr) ? (fConst) : 0)
1349#define PASSTHRU_FEATURE_NOT_IEM(enmConfig, fHostFeature, fConst) \
1350 PASSTHRU_FEATURE_EX(enmConfig, fHostFeature, !VM_IS_EXEC_ENGINE_IEM(pVM), fConst)
1351#define PASSTHRU_FEATURE_TODO(enmConfig, fConst) ((enmConfig) ? (fConst) : 0)
1352
1353 /* Cpuid 1:
1354 * EAX: CPU model, family and stepping.
1355 *
1356 * ECX + EDX: Supported features. Only report features we can support.
1357 * Note! When enabling new features the Synthetic CPU and Portable CPUID
1358 * options may require adjusting (i.e. stripping what was enabled).
1359 *
1360 * EBX: Branding, CLFLUSH line size, logical processors per package and
1361 * initial APIC ID.
1362 */
1363 PCPUMCPUIDLEAF pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0); /* Note! Must refetch when used later. */
1364 AssertLogRelReturn(pStdFeatureLeaf, VERR_CPUM_IPE_2);
1365 pStdFeatureLeaf = cpumR3CpuIdMakeSingleLeaf(pCpum, pStdFeatureLeaf);
1366
1367 pStdFeatureLeaf->uEdx &= X86_CPUID_FEATURE_EDX_FPU
1368 | X86_CPUID_FEATURE_EDX_VME
1369 | X86_CPUID_FEATURE_EDX_DE
1370 | X86_CPUID_FEATURE_EDX_PSE
1371 | X86_CPUID_FEATURE_EDX_TSC
1372 | X86_CPUID_FEATURE_EDX_MSR
1373 //| X86_CPUID_FEATURE_EDX_PAE - set later if configured.
1374 | X86_CPUID_FEATURE_EDX_MCE
1375 | X86_CPUID_FEATURE_EDX_CX8
1376 //| X86_CPUID_FEATURE_EDX_APIC - set by the APIC device if present.
1377 //| RT_BIT_32(10) - not defined
1378 | X86_CPUID_FEATURE_EDX_SEP
1379 | X86_CPUID_FEATURE_EDX_MTRR
1380 | X86_CPUID_FEATURE_EDX_PGE
1381 | X86_CPUID_FEATURE_EDX_MCA
1382 | X86_CPUID_FEATURE_EDX_CMOV
1383 | X86_CPUID_FEATURE_EDX_PAT /* 16 */
1384 | X86_CPUID_FEATURE_EDX_PSE36
1385 //| X86_CPUID_FEATURE_EDX_PSN - no serial number.
1386 | X86_CPUID_FEATURE_EDX_CLFSH
1387 //| RT_BIT_32(20) - not defined
1388 //| X86_CPUID_FEATURE_EDX_DS - no debug store.
1389 //| X86_CPUID_FEATURE_EDX_ACPI - not supported (not DevAcpi, right?).
1390 | X86_CPUID_FEATURE_EDX_MMX
1391 | X86_CPUID_FEATURE_EDX_FXSR
1392 | X86_CPUID_FEATURE_EDX_SSE
1393 | X86_CPUID_FEATURE_EDX_SSE2
1394 //| X86_CPUID_FEATURE_EDX_SS - no self snoop.
1395 | X86_CPUID_FEATURE_EDX_HTT
1396 //| X86_CPUID_FEATURE_EDX_TM - no thermal monitor.
1397 //| RT_BIT_32(30) - not defined
1398 //| X86_CPUID_FEATURE_EDX_PBE - no pending break enabled.
1399 ;
1400 pStdFeatureLeaf->uEcx &= X86_CPUID_FEATURE_ECX_SSE3
1401 | PASSTHRU_FEATURE_TODO(pConfig->enmPClMul, X86_CPUID_FEATURE_ECX_PCLMUL)
1402 //| X86_CPUID_FEATURE_ECX_DTES64 - not implemented yet.
1403 /* Can't properly emulate monitor & mwait with guest SMP; force the guest to use hlt for idling VCPUs. */
1404 | PASSTHRU_FEATURE_EX(pConfig->enmMonitor, pHstFeat->fMonitorMWait, pVM->cCpus == 1, X86_CPUID_FEATURE_ECX_MONITOR)
1405 //| X86_CPUID_FEATURE_ECX_CPLDS - no CPL qualified debug store.
1406 | (pConfig->fNestedHWVirt ? X86_CPUID_FEATURE_ECX_VMX : 0)
1407 //| X86_CPUID_FEATURE_ECX_SMX - not virtualized yet.
1408 //| X86_CPUID_FEATURE_ECX_EST - no extended speed step.
1409 //| X86_CPUID_FEATURE_ECX_TM2 - no thermal monitor 2.
1410 | X86_CPUID_FEATURE_ECX_SSSE3
1411 //| X86_CPUID_FEATURE_ECX_CNTXID - no L1 context id (MSR++).
1412 | PASSTHRU_FEATURE(pConfig->enmFma, pHstFeat->fFma, X86_CPUID_FEATURE_ECX_FMA)
1413 | PASSTHRU_FEATURE(pConfig->enmCmpXchg16b, pHstFeat->fCmpXchg16b, X86_CPUID_FEATURE_ECX_CX16)
1414 /* ECX Bit 14 - xTPR Update Control. Processor supports changing IA32_MISC_ENABLES[bit 23]. */
1415 //| X86_CPUID_FEATURE_ECX_TPRUPDATE
1416 //| X86_CPUID_FEATURE_ECX_PDCM - not implemented yet.
1417 | PASSTHRU_FEATURE_NOT_IEM(pConfig->enmPcid, pHstFeat->fPcid, X86_CPUID_FEATURE_ECX_PCID)
1418 //| X86_CPUID_FEATURE_ECX_DCA - not implemented yet.
1419 | PASSTHRU_FEATURE(pConfig->enmSse41, pHstFeat->fSse41, X86_CPUID_FEATURE_ECX_SSE4_1)
1420 | PASSTHRU_FEATURE(pConfig->enmSse42, pHstFeat->fSse42, X86_CPUID_FEATURE_ECX_SSE4_2)
1421 //| X86_CPUID_FEATURE_ECX_X2APIC - turned on later by the device if enabled.
1422 | PASSTHRU_FEATURE(pConfig->enmMovBe, pHstFeat->fMovBe, X86_CPUID_FEATURE_ECX_MOVBE)
1423 | PASSTHRU_FEATURE(pConfig->enmPopCnt, pHstFeat->fPopCnt, X86_CPUID_FEATURE_ECX_POPCNT)
1424 //| X86_CPUID_FEATURE_ECX_TSCDEADL - not implemented yet.
1425 | PASSTHRU_FEATURE_TODO(pConfig->enmAesNi, X86_CPUID_FEATURE_ECX_AES)
1426 | PASSTHRU_FEATURE(pConfig->enmXSave, pHstFeat->fXSaveRstor, X86_CPUID_FEATURE_ECX_XSAVE)
1427 //| X86_CPUID_FEATURE_ECX_OSXSAVE - mirrors CR4.OSXSAVE state, set dynamically.
1428 | PASSTHRU_FEATURE(pConfig->enmAvx, pHstFeat->fAvx, X86_CPUID_FEATURE_ECX_AVX)
1429 | PASSTHRU_FEATURE(pConfig->enmF16c, pHstFeat->fF16c, X86_CPUID_FEATURE_ECX_F16C)
1430 | PASSTHRU_FEATURE_TODO(pConfig->enmRdRand, X86_CPUID_FEATURE_ECX_RDRAND)
1431 //| X86_CPUID_FEATURE_ECX_HVP - Set explicitly later.
1432 ;
1433
1434 /* Mask out PCID unless FSGSBASE is exposed due to a bug in Windows 10 SMP guests, see @bugref{9089#c15}. */
1435 if ( !pVM->cpum.s.GuestFeatures.fFsGsBase
1436 && (pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_PCID))
1437 {
1438 pStdFeatureLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_PCID;
1439 LogRel(("CPUM: Disabled PCID without FSGSBASE to workaround buggy guests\n"));
1440 }
1441
1442 if (pCpum->u8PortableCpuIdLevel > 0)
1443 {
1444 PORTABLE_CLEAR_BITS_WHEN(1, pStdFeatureLeaf->uEax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12));
1445 PORTABLE_DISABLE_FEATURE_BIT( 1, pStdFeatureLeaf->uEcx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3);
1446 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, PCID, X86_CPUID_FEATURE_ECX_PCID, pConfig->enmPcid);
1447 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, SSE4_1, X86_CPUID_FEATURE_ECX_SSE4_1, pConfig->enmSse41);
1448 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, SSE4_2, X86_CPUID_FEATURE_ECX_SSE4_2, pConfig->enmSse42);
1449 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, MOVBE, X86_CPUID_FEATURE_ECX_MOVBE, pConfig->enmMovBe);
1450 PORTABLE_DISABLE_FEATURE_BIT( 1, pStdFeatureLeaf->uEcx, AES, X86_CPUID_FEATURE_ECX_AES);
1451 PORTABLE_DISABLE_FEATURE_BIT( 1, pStdFeatureLeaf->uEcx, VMX, X86_CPUID_FEATURE_ECX_VMX);
1452 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, PCLMUL, X86_CPUID_FEATURE_ECX_PCLMUL, pConfig->enmPClMul);
1453 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, POPCNT, X86_CPUID_FEATURE_ECX_POPCNT, pConfig->enmPopCnt);
1454 PORTABLE_DISABLE_FEATURE_BIT( 1, pStdFeatureLeaf->uEcx, F16C, X86_CPUID_FEATURE_ECX_F16C);
1455 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, XSAVE, X86_CPUID_FEATURE_ECX_XSAVE, pConfig->enmXSave);
1456 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, AVX, X86_CPUID_FEATURE_ECX_AVX, pConfig->enmAvx);
1457 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, RDRAND, X86_CPUID_FEATURE_ECX_RDRAND, pConfig->enmRdRand);
1458 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pStdFeatureLeaf->uEcx, CX16, X86_CPUID_FEATURE_ECX_CX16, pConfig->enmCmpXchg16b);
1459 PORTABLE_DISABLE_FEATURE_BIT( 2, pStdFeatureLeaf->uEcx, SSE3, X86_CPUID_FEATURE_ECX_SSE3);
1460 PORTABLE_DISABLE_FEATURE_BIT( 3, pStdFeatureLeaf->uEdx, SSE2, X86_CPUID_FEATURE_EDX_SSE2);
1461 PORTABLE_DISABLE_FEATURE_BIT( 3, pStdFeatureLeaf->uEdx, SSE, X86_CPUID_FEATURE_EDX_SSE);
1462 PORTABLE_DISABLE_FEATURE_BIT( 3, pStdFeatureLeaf->uEdx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH);
1463 PORTABLE_DISABLE_FEATURE_BIT( 3, pStdFeatureLeaf->uEdx, CMOV, X86_CPUID_FEATURE_EDX_CMOV);
1464
1465 Assert(!(pStdFeatureLeaf->uEdx & ( X86_CPUID_FEATURE_EDX_SEP ///??
1466 | X86_CPUID_FEATURE_EDX_PSN
1467 | X86_CPUID_FEATURE_EDX_DS
1468 | X86_CPUID_FEATURE_EDX_ACPI
1469 | X86_CPUID_FEATURE_EDX_SS
1470 | X86_CPUID_FEATURE_EDX_TM
1471 | X86_CPUID_FEATURE_EDX_PBE
1472 )));
1473 Assert(!(pStdFeatureLeaf->uEcx & ( X86_CPUID_FEATURE_ECX_DTES64
1474 | X86_CPUID_FEATURE_ECX_CPLDS
1475 | X86_CPUID_FEATURE_ECX_AES
1476 | X86_CPUID_FEATURE_ECX_VMX
1477 | X86_CPUID_FEATURE_ECX_SMX
1478 | X86_CPUID_FEATURE_ECX_EST
1479 | X86_CPUID_FEATURE_ECX_TM2
1480 | X86_CPUID_FEATURE_ECX_CNTXID
1481 | X86_CPUID_FEATURE_ECX_FMA
1482 | X86_CPUID_FEATURE_ECX_TPRUPDATE
1483 | X86_CPUID_FEATURE_ECX_PDCM
1484 | X86_CPUID_FEATURE_ECX_DCA
1485 | X86_CPUID_FEATURE_ECX_OSXSAVE
1486 )));
1487 }
1488
1489 /* Set up APIC ID for CPU 0, configure multi core/threaded smp. */
1490 pStdFeatureLeaf->uEbx &= UINT32_C(0x0000ffff); /* (APIC-ID := 0 and #LogCpus := 0) */
1491
1492 /* The HTT bit is architectural and does not directly indicate hyper-threading or multiple cores;
1493 * it was set even on single-core/non-HT Northwood P4s for example. The HTT bit only means that the
1494 * information in EBX[23:16] (max number of addressable logical processor IDs) is valid.
1495 */
1496#ifdef VBOX_WITH_MULTI_CORE
1497 if (pVM->cCpus > 1)
1498 pStdFeatureLeaf->uEdx |= X86_CPUID_FEATURE_EDX_HTT; /* Force if emulating a multi-core CPU. */
1499#endif
1500 if (pStdFeatureLeaf->uEdx & X86_CPUID_FEATURE_EDX_HTT)
1501 {
1502 /* If CPUID Fn0000_0001_EDX[HTT] = 1 then LogicalProcessorCount is the number of threads per CPU
1503 core times the number of CPU cores per processor */
1504#ifdef VBOX_WITH_MULTI_CORE
1505 pStdFeatureLeaf->uEbx |= pVM->cCpus <= 0xff ? (pVM->cCpus << 16) : UINT32_C(0x00ff0000);
1506#else
1507 /* Single logical processor in a package. */
1508 pStdFeatureLeaf->uEbx |= (1 << 16);
1509#endif
1510 }
1511
1512 uint32_t uMicrocodeRev;
1513 int rc = SUPR3QueryMicrocodeRev(&uMicrocodeRev);
1514 if (RT_SUCCESS(rc))
1515 {
1516 LogRel(("CPUM: Microcode revision 0x%08X\n", uMicrocodeRev));
1517 }
1518 else
1519 {
1520 uMicrocodeRev = 0;
1521 LogRel(("CPUM: Failed to query microcode revision. rc=%Rrc\n", rc));
1522 }
1523
1524 /* Mask out the VME capability on certain CPUs, unless overridden by fForceVme.
1525 * VME bug was fixed in AGESA 1.0.0.6, microcode patch level 8001126.
1526 */
1527 if ( ( pVM->cpum.s.GuestFeatures.enmMicroarch == kCpumMicroarch_AMD_Zen_Ryzen
1528 /** @todo The following ASSUMES that Hygon uses the same version numbering
1529 * as AMD and that they shipped buggy firmware. */
1530 || pVM->cpum.s.GuestFeatures.enmMicroarch == kCpumMicroarch_Hygon_Dhyana)
1531 && uMicrocodeRev < 0x8001126
1532 && !pConfig->fForceVme)
1533 {
1534 /** @todo The above is a very coarse test but at the moment we don't know any better (see @bugref{8852}). */
1535 LogRel(("CPUM: Zen VME workaround engaged\n"));
1536 pStdFeatureLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_VME;
1537 }
1538
1539 /* Force standard feature bits. */
1540 if (pConfig->enmPClMul == CPUMISAEXTCFG_ENABLED_ALWAYS)
1541 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_PCLMUL;
1542 if (pConfig->enmMonitor == CPUMISAEXTCFG_ENABLED_ALWAYS)
1543 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_MONITOR;
1544 if (pConfig->enmCmpXchg16b == CPUMISAEXTCFG_ENABLED_ALWAYS)
1545 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_CX16;
1546 if (pConfig->enmSse41 == CPUMISAEXTCFG_ENABLED_ALWAYS)
1547 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_SSE4_1;
1548 if (pConfig->enmSse42 == CPUMISAEXTCFG_ENABLED_ALWAYS)
1549 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_SSE4_2;
1550 if (pConfig->enmMovBe == CPUMISAEXTCFG_ENABLED_ALWAYS)
1551 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_MOVBE;
1552 if (pConfig->enmPopCnt == CPUMISAEXTCFG_ENABLED_ALWAYS)
1553 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_POPCNT;
1554 if (pConfig->enmAesNi == CPUMISAEXTCFG_ENABLED_ALWAYS)
1555 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_AES;
1556 if (pConfig->enmXSave == CPUMISAEXTCFG_ENABLED_ALWAYS)
1557 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_XSAVE;
1558 if (pConfig->enmAvx == CPUMISAEXTCFG_ENABLED_ALWAYS)
1559 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_AVX;
1560 if (pConfig->enmRdRand == CPUMISAEXTCFG_ENABLED_ALWAYS)
1561 pStdFeatureLeaf->uEcx |= X86_CPUID_FEATURE_ECX_RDRAND;
1562
1563 pStdFeatureLeaf = NULL; /* Must refetch! */
1564
1565 /* Cpuid 0x80000001: (Similar, but in no way identical to 0x00000001.)
1566 * AMD:
1567 * EAX: CPU model, family and stepping.
1568 *
1569 * ECX + EDX: Supported features. Only report features we can support.
1570 * Note! When enabling new features the Synthetic CPU and Portable CPUID
1571 * options may require adjusting (i.e. stripping what was enabled).
1572 * ASSUMES that this is ALWAYS the AMD defined feature set if present.
1573 *
1574 * EBX: Branding ID and package type (or reserved).
1575 *
1576 * Intel and probably most others:
1577 * EAX: 0
1578 * EBX: 0
1579 * ECX + EDX: Subset of AMD features, mainly for AMD64 support.
1580 */
1581 PCPUMCPUIDLEAF pExtFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000001), 0);
1582 if (pExtFeatureLeaf)
1583 {
1584 pExtFeatureLeaf = cpumR3CpuIdMakeSingleLeaf(pCpum, pExtFeatureLeaf);
1585
1586 pExtFeatureLeaf->uEdx &= X86_CPUID_AMD_FEATURE_EDX_FPU
1587 | X86_CPUID_AMD_FEATURE_EDX_VME
1588 | X86_CPUID_AMD_FEATURE_EDX_DE
1589 | X86_CPUID_AMD_FEATURE_EDX_PSE
1590 | X86_CPUID_AMD_FEATURE_EDX_TSC
1591 | X86_CPUID_AMD_FEATURE_EDX_MSR //?? this means AMD MSRs..
1592 //| X86_CPUID_AMD_FEATURE_EDX_PAE - turned on when necessary
1593 //| X86_CPUID_AMD_FEATURE_EDX_MCE - not virtualized yet.
1594 | X86_CPUID_AMD_FEATURE_EDX_CX8
1595 //| X86_CPUID_AMD_FEATURE_EDX_APIC - set by the APIC device if present.
1596 //| RT_BIT_32(10) - reserved
1597 | X86_CPUID_EXT_FEATURE_EDX_SYSCALL
1598 | X86_CPUID_AMD_FEATURE_EDX_MTRR
1599 | X86_CPUID_AMD_FEATURE_EDX_PGE
1600 | X86_CPUID_AMD_FEATURE_EDX_MCA
1601 | X86_CPUID_AMD_FEATURE_EDX_CMOV
1602 | X86_CPUID_AMD_FEATURE_EDX_PAT
1603 | X86_CPUID_AMD_FEATURE_EDX_PSE36
1604 //| RT_BIT_32(18) - reserved
1605 //| RT_BIT_32(19) - reserved
1606 | X86_CPUID_EXT_FEATURE_EDX_NX
1607 //| RT_BIT_32(21) - reserved
1608 | PASSTHRU_FEATURE(pConfig->enmAmdExtMmx, pHstFeat->fAmdMmxExts, X86_CPUID_AMD_FEATURE_EDX_AXMMX)
1609 | X86_CPUID_AMD_FEATURE_EDX_MMX
1610 | X86_CPUID_AMD_FEATURE_EDX_FXSR
1611 | X86_CPUID_AMD_FEATURE_EDX_FFXSR
1612 //| X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
1613 | X86_CPUID_EXT_FEATURE_EDX_RDTSCP
1614 //| RT_BIT_32(28) - reserved
1615 //| X86_CPUID_EXT_FEATURE_EDX_LONG_MODE - turned on when necessary
1616 | X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX
1617 | X86_CPUID_AMD_FEATURE_EDX_3DNOW
1618 ;
1619 pExtFeatureLeaf->uEcx &= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF
1620 //| X86_CPUID_AMD_FEATURE_ECX_CMPL - set below if applicable.
1621 | (pConfig->fNestedHWVirt ? X86_CPUID_AMD_FEATURE_ECX_SVM : 0)
1622 //| X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
1623 /* Note: This could prevent teleporting from AMD to Intel CPUs! */
1624 | X86_CPUID_AMD_FEATURE_ECX_CR8L /* expose lock mov cr0 = mov cr8 hack for guests that can use this feature to access the TPR. */
1625 | PASSTHRU_FEATURE(pConfig->enmAbm, pHstFeat->fAbm, X86_CPUID_AMD_FEATURE_ECX_ABM)
1626 | PASSTHRU_FEATURE_TODO(pConfig->enmSse4A, X86_CPUID_AMD_FEATURE_ECX_SSE4A)
1627 | PASSTHRU_FEATURE_TODO(pConfig->enmMisAlnSse, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE)
1628 | PASSTHRU_FEATURE(pConfig->enm3dNowPrf, pHstFeat->f3DNowPrefetch, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF)
1629 //| X86_CPUID_AMD_FEATURE_ECX_OSVW
1630 //| X86_CPUID_AMD_FEATURE_ECX_IBS
1631 //| X86_CPUID_AMD_FEATURE_ECX_XOP
1632 //| X86_CPUID_AMD_FEATURE_ECX_SKINIT
1633 //| X86_CPUID_AMD_FEATURE_ECX_WDT
1634 //| RT_BIT_32(14) - reserved
1635 //| X86_CPUID_AMD_FEATURE_ECX_LWP - not supported
1636 //| X86_CPUID_AMD_FEATURE_ECX_FMA4 - not yet virtualized.
1637 //| RT_BIT_32(17) - reserved
1638 //| RT_BIT_32(18) - reserved
1639 //| X86_CPUID_AMD_FEATURE_ECX_NODEID - not yet virtualized.
1640 //| RT_BIT_32(20) - reserved
1641 //| X86_CPUID_AMD_FEATURE_ECX_TBM - not yet virtualized.
1642 //| X86_CPUID_AMD_FEATURE_ECX_TOPOEXT - not yet virtualized.
1643 //| RT_BIT_32(23) - reserved
1644 //| RT_BIT_32(24) - reserved
1645 //| RT_BIT_32(25) - reserved
1646 //| RT_BIT_32(26) - reserved
1647 //| RT_BIT_32(27) - reserved
1648 //| RT_BIT_32(28) - reserved
1649 //| RT_BIT_32(29) - reserved
1650 //| RT_BIT_32(30) - reserved
1651 //| RT_BIT_32(31) - reserved
1652 ;
1653#ifdef VBOX_WITH_MULTI_CORE
1654 if ( pVM->cCpus > 1
1655 && ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
1656 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON))
1657 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_CMPL; /* CmpLegacy */
1658#endif
1659
1660 if (pCpum->u8PortableCpuIdLevel > 0)
1661 {
1662 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, CR8L, X86_CPUID_AMD_FEATURE_ECX_CR8L);
1663 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, SVM, X86_CPUID_AMD_FEATURE_ECX_SVM);
1664 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, ABM, X86_CPUID_AMD_FEATURE_ECX_ABM, pConfig->enmAbm);
1665 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, SSE4A, X86_CPUID_AMD_FEATURE_ECX_SSE4A, pConfig->enmSse4A);
1666 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, MISALNSSE, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE, pConfig->enmMisAlnSse);
1667 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, 3DNOWPRF, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF, pConfig->enm3dNowPrf);
1668 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, XOP, X86_CPUID_AMD_FEATURE_ECX_XOP);
1669 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, TBM, X86_CPUID_AMD_FEATURE_ECX_TBM);
1670 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, FMA4, X86_CPUID_AMD_FEATURE_ECX_FMA4);
1671 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEdx, AXMMX, X86_CPUID_AMD_FEATURE_EDX_AXMMX, pConfig->enmAmdExtMmx);
1672 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEdx, 3DNOW, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
1673 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEdx, 3DNOW_EX, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
1674 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEdx, FFXSR, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
1675 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEdx, RDTSCP, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
1676 PORTABLE_DISABLE_FEATURE_BIT( 2, pExtFeatureLeaf->uEcx, LAHF_SAHF, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
1677 PORTABLE_DISABLE_FEATURE_BIT( 3, pExtFeatureLeaf->uEcx, CMOV, X86_CPUID_AMD_FEATURE_EDX_CMOV);
1678
1679 Assert(!(pExtFeatureLeaf->uEcx & ( X86_CPUID_AMD_FEATURE_ECX_SVM
1680 | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
1681 | X86_CPUID_AMD_FEATURE_ECX_OSVW
1682 | X86_CPUID_AMD_FEATURE_ECX_IBS
1683 | X86_CPUID_AMD_FEATURE_ECX_SKINIT
1684 | X86_CPUID_AMD_FEATURE_ECX_WDT
1685 | X86_CPUID_AMD_FEATURE_ECX_LWP
1686 | X86_CPUID_AMD_FEATURE_ECX_NODEID
1687 | X86_CPUID_AMD_FEATURE_ECX_TOPOEXT
1688 | UINT32_C(0xff964000)
1689 )));
1690 Assert(!(pExtFeatureLeaf->uEdx & ( RT_BIT(10)
1691 | X86_CPUID_EXT_FEATURE_EDX_SYSCALL
1692 | RT_BIT(18)
1693 | RT_BIT(19)
1694 | RT_BIT(21)
1695 | X86_CPUID_AMD_FEATURE_EDX_AXMMX
1696 | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
1697 | RT_BIT(28)
1698 )));
1699 }
1700
1701 /* Force extended feature bits. */
1702 if (pConfig->enmAbm == CPUMISAEXTCFG_ENABLED_ALWAYS)
1703 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_ABM;
1704 if (pConfig->enmSse4A == CPUMISAEXTCFG_ENABLED_ALWAYS)
1705 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_SSE4A;
1706 if (pConfig->enmMisAlnSse == CPUMISAEXTCFG_ENABLED_ALWAYS)
1707 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_MISALNSSE;
1708 if (pConfig->enm3dNowPrf == CPUMISAEXTCFG_ENABLED_ALWAYS)
1709 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF;
1710 if (pConfig->enmAmdExtMmx == CPUMISAEXTCFG_ENABLED_ALWAYS)
1711 pExtFeatureLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_AXMMX;
1712 }
1713 pExtFeatureLeaf = NULL; /* Must refetch! */
1714
1715
1716 /* Cpuid 2:
1717 * Intel: (Nondeterministic) Cache and TLB information
1718 * AMD: Reserved
1719 * VIA: Reserved
1720 * Safe to expose.
1721 */
1722 uint32_t uSubLeaf = 0;
1723 PCPUMCPUIDLEAF pCurLeaf;
1724 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 2, uSubLeaf)) != NULL)
1725 {
1726 if ((pCurLeaf->uEax & 0xff) > 1)
1727 {
1728 LogRel(("CpuId: Std[2].al: %d -> 1\n", pCurLeaf->uEax & 0xff));
1729 pCurLeaf->uEax &= UINT32_C(0xffffff01);
1730 }
1731 uSubLeaf++;
1732 }
1733
1734 /* Cpuid 3:
1735 * Intel: EAX, EBX - reserved (transmeta uses these)
1736 * ECX, EDX - Processor Serial Number if available, otherwise reserved
1737 * AMD: Reserved
1738 * VIA: Reserved
1739 * Safe to expose
1740 */
1741 pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0);
1742 if (!(pStdFeatureLeaf->uEdx & X86_CPUID_FEATURE_EDX_PSN))
1743 {
1744 uSubLeaf = 0;
1745 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 3, uSubLeaf)) != NULL)
1746 {
1747 pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
1748 if (pCpum->u8PortableCpuIdLevel > 0)
1749 pCurLeaf->uEax = pCurLeaf->uEbx = 0;
1750 uSubLeaf++;
1751 }
1752 }
1753
1754 /* Cpuid 4 + ECX:
1755 * Intel: Deterministic Cache Parameters Leaf.
1756 * AMD: Reserved
1757 * VIA: Reserved
1758 * Safe to expose, except for EAX:
1759 * Bits 25-14: Maximum number of addressable IDs for logical processors sharing this cache (see note)**
1760 * Bits 31-26: Maximum number of processor cores in this physical package**
1761 * Note: These SMP values are constant regardless of ECX
1762 */
1763 uSubLeaf = 0;
1764 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 4, uSubLeaf)) != NULL)
1765 {
1766 pCurLeaf->uEax &= UINT32_C(0x00003fff); /* Clear the #maxcores, #threads-sharing-cache (both are #-1).*/
1767#ifdef VBOX_WITH_MULTI_CORE
1768 if ( pVM->cCpus > 1
1769 && pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1770 {
1771 AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);
1772 /* One logical processor with possibly multiple cores. */
1773 /* See http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */
1774 pCurLeaf->uEax |= pVM->cCpus <= 0x40 ? ((pVM->cCpus - 1) << 26) : UINT32_C(0xfc000000); /* 6 bits only -> 64 cores! */
1775 }
1776#endif
1777 uSubLeaf++;
1778 }
1779
1780 /* Cpuid 5: Monitor/mwait Leaf
1781 * Intel: ECX, EDX - reserved
1782 * EAX, EBX - Smallest and largest monitor line size
1783 * AMD: EDX - reserved
1784 * EAX, EBX - Smallest and largest monitor line size
1785 * ECX - extensions (ignored for now)
1786 * VIA: Reserved
1787 * Safe to expose
1788 */
1789 uSubLeaf = 0;
1790 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 5, uSubLeaf)) != NULL)
1791 {
1792 pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0);
1793 if (!(pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_MONITOR))
1794 pCurLeaf->uEax = pCurLeaf->uEbx = 0;
1795
1796 pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
1797 if (pConfig->enmMWaitExtensions)
1798 {
1799 pCurLeaf->uEcx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
1800 /** @todo for now we just expose host's MWAIT C-states, although conceptually
1801 it shall be part of our power management virtualization model */
1802#if 0
1803 /* MWAIT sub C-states */
1804 pCurLeaf->uEdx =
1805 (0 << 0) /* 0 in C0 */ |
1806 (2 << 4) /* 2 in C1 */ |
1807 (2 << 8) /* 2 in C2 */ |
1808 (2 << 12) /* 2 in C3 */ |
1809 (0 << 16) /* 0 in C4 */
1810 ;
1811#endif
1812 }
1813 else
1814 pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
1815 uSubLeaf++;
1816 }
1817
1818 /* Cpuid 6: Digital Thermal Sensor and Power Management Paramenters.
1819 * Intel: Various thermal and power management related stuff.
1820 * AMD: EBX, EDX - reserved.
1821 * EAX - Bit two is ARAT, indicating that APIC timers run at a constant
1822 * rate regardless of processor P-states. Same as Intel.
1823 * ECX - Bit zero is EffFreq, indicating MSR_0000_00e7 and MSR_0000_00e8
1824 * present. Same as Intel.
1825 * VIA: ??
1826 *
1827 * We clear everything except for the ARAT bit which is important for Windows 11.
1828 */
1829 uSubLeaf = 0;
1830 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 6, uSubLeaf)) != NULL)
1831 {
1832 pCurLeaf->uEbx = pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
1833 pCurLeaf->uEax &= 0
1834 | X86_CPUID_POWER_EAX_ARAT
1835 ;
1836
1837 /* Since we emulate the APIC timers, we can normally set the ARAT bit
1838 * regardless of whether the host CPU sets it or not. Intel sets the ARAT
1839 * bit circa since the Westmere generation, AMD probably only since Zen.
1840 * See @bugref{10567}.
1841 */
1842 if (pConfig->fInvariantApic)
1843 pCurLeaf->uEax |= X86_CPUID_POWER_EAX_ARAT;
1844
1845 uSubLeaf++;
1846 }
1847
1848 /* Cpuid 7 + ECX: Structured Extended Feature Flags Enumeration
1849 * EAX: Number of sub leaves.
1850 * EBX+ECX+EDX: Feature flags
1851 *
1852 * We only have documentation for one sub-leaf, so clear all other (no need
1853 * to remove them as such, just set them to zero).
1854 *
1855 * Note! When enabling new features the Synthetic CPU and Portable CPUID
1856 * options may require adjusting (i.e. stripping what was enabled).
1857 */
1858 uSubLeaf = 0;
1859 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 7, uSubLeaf)) != NULL)
1860 {
1861 switch (uSubLeaf)
1862 {
1863 case 0:
1864 {
1865 pCurLeaf->uEax = RT_MIN(pCurLeaf->uEax, 2); /* Max ECX input is 2. */
1866 pCurLeaf->uEbx &= 0
1867 | PASSTHRU_FEATURE(pConfig->enmFsGsBase, pHstFeat->fFsGsBase, X86_CPUID_STEXT_FEATURE_EBX_FSGSBASE)
1868 //| X86_CPUID_STEXT_FEATURE_EBX_TSC_ADJUST RT_BIT(1)
1869 //| X86_CPUID_STEXT_FEATURE_EBX_SGX RT_BIT(2)
1870 | X86_CPUID_STEXT_FEATURE_EBX_BMI1
1871 //| X86_CPUID_STEXT_FEATURE_EBX_HLE RT_BIT(4)
1872 | PASSTHRU_FEATURE(pConfig->enmAvx2, pHstFeat->fAvx2, X86_CPUID_STEXT_FEATURE_EBX_AVX2)
1873 | X86_CPUID_STEXT_FEATURE_EBX_FDP_EXCPTN_ONLY
1874 //| X86_CPUID_STEXT_FEATURE_EBX_SMEP RT_BIT(7)
1875 | X86_CPUID_STEXT_FEATURE_EBX_BMI2
1876 //| X86_CPUID_STEXT_FEATURE_EBX_ERMS RT_BIT(9)
1877 | PASSTHRU_FEATURE_NOT_IEM(pConfig->enmInvpcid, pHstFeat->fInvpcid, X86_CPUID_STEXT_FEATURE_EBX_INVPCID)
1878 //| X86_CPUID_STEXT_FEATURE_EBX_RTM RT_BIT(11)
1879 //| X86_CPUID_STEXT_FEATURE_EBX_PQM RT_BIT(12)
1880 | X86_CPUID_STEXT_FEATURE_EBX_DEPR_FPU_CS_DS
1881 //| X86_CPUID_STEXT_FEATURE_EBX_MPE RT_BIT(14)
1882 //| X86_CPUID_STEXT_FEATURE_EBX_PQE RT_BIT(15)
1883 //| X86_CPUID_STEXT_FEATURE_EBX_AVX512F RT_BIT(16)
1884 //| RT_BIT(17) - reserved
1885 | PASSTHRU_FEATURE_TODO(pConfig->enmRdSeed, X86_CPUID_STEXT_FEATURE_EBX_RDSEED)
1886 | PASSTHRU_FEATURE(pConfig->enmAdx, pHstFeat->fAdx, X86_CPUID_STEXT_FEATURE_EBX_ADX)
1887 //| X86_CPUID_STEXT_FEATURE_EBX_SMAP RT_BIT(20)
1888 //| RT_BIT(21) - reserved
1889 //| RT_BIT(22) - reserved
1890 | PASSTHRU_FEATURE(pConfig->enmCLFlushOpt, pHstFeat->fClFlushOpt, X86_CPUID_STEXT_FEATURE_EBX_CLFLUSHOPT)
1891 //| RT_BIT(24) - reserved
1892 //| X86_CPUID_STEXT_FEATURE_EBX_INTEL_PT RT_BIT(25)
1893 //| X86_CPUID_STEXT_FEATURE_EBX_AVX512PF RT_BIT(26)
1894 //| X86_CPUID_STEXT_FEATURE_EBX_AVX512ER RT_BIT(27)
1895 //| X86_CPUID_STEXT_FEATURE_EBX_AVX512CD RT_BIT(28)
1896 | PASSTHRU_FEATURE(pConfig->enmSha, pHstFeat->fSha, X86_CPUID_STEXT_FEATURE_EBX_SHA)
1897 //| RT_BIT(30) - reserved
1898 //| RT_BIT(31) - reserved
1899 ;
1900 pCurLeaf->uEcx &= 0
1901 //| X86_CPUID_STEXT_FEATURE_ECX_PREFETCHWT1 - we do not do vector functions yet.
1902 ;
1903 pCurLeaf->uEdx &= 0
1904 //| X86_CPUID_STEXT_FEATURE_EDX_SRBDS_CTRL RT_BIT(9)
1905 | PASSTHRU_FEATURE(pConfig->enmMdsClear, pHstFeat->fMdsClear, X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR)
1906 //| X86_CPUID_STEXT_FEATURE_EDX_TSX_FORCE_ABORT RT_BIT_32(11)
1907 //| X86_CPUID_STEXT_FEATURE_EDX_CET_IBT RT_BIT(20)
1908 //| X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB RT_BIT(26)
1909 //| X86_CPUID_STEXT_FEATURE_EDX_STIBP RT_BIT(27)
1910 | PASSTHRU_FEATURE(pConfig->enmFlushCmdMsr, pHstFeat->fFlushCmd, X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD)
1911 | PASSTHRU_FEATURE(pConfig->enmArchCapMsr, pHstFeat->fArchCap, X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP)
1912 //| X86_CPUID_STEXT_FEATURE_EDX_CORECAP RT_BIT_32(30)
1913 //| X86_CPUID_STEXT_FEATURE_EDX_SSBD RT_BIT_32(31)
1914 ;
1915
1916 /* Mask out INVPCID unless FSGSBASE is exposed due to a bug in Windows 10 SMP guests, see @bugref{9089#c15}. */
1917 if ( !pVM->cpum.s.GuestFeatures.fFsGsBase
1918 && (pCurLeaf->uEbx & X86_CPUID_STEXT_FEATURE_EBX_INVPCID))
1919 {
1920 pCurLeaf->uEbx &= ~X86_CPUID_STEXT_FEATURE_EBX_INVPCID;
1921 LogRel(("CPUM: Disabled INVPCID without FSGSBASE to work around buggy guests\n"));
1922 }
1923
1924 if (pCpum->u8PortableCpuIdLevel > 0)
1925 {
1926 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, FSGSBASE, X86_CPUID_STEXT_FEATURE_EBX_FSGSBASE, pConfig->enmFsGsBase);
1927 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, SGX, X86_CPUID_STEXT_FEATURE_EBX_SGX);
1928 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, AVX2, X86_CPUID_STEXT_FEATURE_EBX_AVX2, pConfig->enmAvx2);
1929 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, SMEP, X86_CPUID_STEXT_FEATURE_EBX_SMEP);
1930 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, BMI2, X86_CPUID_STEXT_FEATURE_EBX_BMI2);
1931 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, INVPCID, X86_CPUID_STEXT_FEATURE_EBX_INVPCID, pConfig->enmInvpcid);
1932 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, AVX512F, X86_CPUID_STEXT_FEATURE_EBX_AVX512F);
1933 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, RDSEED, X86_CPUID_STEXT_FEATURE_EBX_RDSEED, pConfig->enmRdSeed);
1934 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, ADX, X86_CPUID_STEXT_FEATURE_EBX_ADX, pConfig->enmAdx);
1935 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, CLFLUSHOPT, X86_CPUID_STEXT_FEATURE_EBX_RDSEED, pConfig->enmCLFlushOpt);
1936 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, AVX512PF, X86_CPUID_STEXT_FEATURE_EBX_AVX512PF);
1937 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, AVX512ER, X86_CPUID_STEXT_FEATURE_EBX_AVX512ER);
1938 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, AVX512CD, X86_CPUID_STEXT_FEATURE_EBX_AVX512CD);
1939 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, SMAP, X86_CPUID_STEXT_FEATURE_EBX_SMAP);
1940 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, SHA, X86_CPUID_STEXT_FEATURE_EBX_SHA, pConfig->enmSha);
1941 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEcx, PREFETCHWT1, X86_CPUID_STEXT_FEATURE_ECX_PREFETCHWT1);
1942 PORTABLE_DISABLE_FEATURE_BIT_CFG(3, pCurLeaf->uEdx, FLUSH_CMD, X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD, pConfig->enmFlushCmdMsr);
1943 PORTABLE_DISABLE_FEATURE_BIT_CFG(3, pCurLeaf->uEdx, MD_CLEAR, X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR, pConfig->enmMdsClear);
1944 PORTABLE_DISABLE_FEATURE_BIT_CFG(3, pCurLeaf->uEdx, ARCHCAP, X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP, pConfig->enmArchCapMsr);
1945 }
1946
1947 /* Dependencies. */
1948 if (!(pCurLeaf->uEdx & X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD))
1949 pCurLeaf->uEdx &= ~X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR;
1950
1951 /* Force standard feature bits. */
1952 if (pConfig->enmFsGsBase == CPUMISAEXTCFG_ENABLED_ALWAYS)
1953 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_FSGSBASE;
1954 if (pConfig->enmAvx2 == CPUMISAEXTCFG_ENABLED_ALWAYS)
1955 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_AVX2;
1956 if (pConfig->enmRdSeed == CPUMISAEXTCFG_ENABLED_ALWAYS)
1957 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_RDSEED;
1958 if (pConfig->enmAdx == CPUMISAEXTCFG_ENABLED_ALWAYS)
1959 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_ADX;
1960 if (pConfig->enmCLFlushOpt == CPUMISAEXTCFG_ENABLED_ALWAYS)
1961 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_CLFLUSHOPT;
1962 if (pConfig->enmSha == CPUMISAEXTCFG_ENABLED_ALWAYS)
1963 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_SHA;
1964 if (pConfig->enmInvpcid == CPUMISAEXTCFG_ENABLED_ALWAYS)
1965 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_INVPCID;
1966 if (pConfig->enmFlushCmdMsr == CPUMISAEXTCFG_ENABLED_ALWAYS)
1967 pCurLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD;
1968 if (pConfig->enmMdsClear == CPUMISAEXTCFG_ENABLED_ALWAYS)
1969 pCurLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR;
1970 if (pConfig->enmArchCapMsr == CPUMISAEXTCFG_ENABLED_ALWAYS)
1971 pCurLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP;
1972 break;
1973 }
1974
1975 case 2:
1976 {
1977 pCurLeaf->uEax = 0;
1978 pCurLeaf->uEbx = 0;
1979 pCurLeaf->uEcx = 0;
1980 pCurLeaf->uEdx &= 0
1981 //| X86_CPUID_STEXT_FEATURE_2_EDX_PSFD RT_BIT_32(0)
1982 //| X86_CPUID_STEXT_FEATURE_2_EDX_IPRED_CTRL RT_BIT_32(1)
1983 //| X86_CPUID_STEXT_FEATURE_2_EDX_RRSBA_CTRL RT_BIT_32(2)
1984 //| X86_CPUID_STEXT_FEATURE_2_EDX_DDPD_U RT_BIT_32(3)
1985 //| X86_CPUID_STEXT_FEATURE_2_EDX_BHI_CTRL RT_BIT_32(4)
1986 | PASSTHRU_FEATURE(pConfig->enmMcdtNo, pHstFeat->fMcdtNo, X86_CPUID_STEXT_FEATURE_2_EDX_MCDT_NO)
1987 //| X86_CPUID_STEXT_FEATURE_2_EDX_UC_LOCK_DIS RT_BIT_32(6)
1988 //| Bit 7 - MONITOR_MITG_NO - No need for MONITOR/UMONITOR power mitigrations. */
1989 | PASSTHRU_FEATURE(pConfig->enmMonitorMitgNo, pHstFeat->fMonitorMitgNo, X86_CPUID_STEXT_FEATURE_2_EDX_MONITOR_MITG_NO)
1990 ;
1991
1992 /* Force standard feature bits. */
1993 if (pConfig->enmMcdtNo == CPUMISAEXTCFG_ENABLED_ALWAYS)
1994 pCurLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_2_EDX_MCDT_NO;
1995 if (pConfig->enmMonitorMitgNo == CPUMISAEXTCFG_ENABLED_ALWAYS)
1996 pCurLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_2_EDX_MONITOR_MITG_NO;
1997 break;
1998 }
1999
2000 default:
2001 /* Invalid index, all values are zero. */
2002 pCurLeaf->uEax = 0;
2003 pCurLeaf->uEbx = 0;
2004 pCurLeaf->uEcx = 0;
2005 pCurLeaf->uEdx = 0;
2006 break;
2007 }
2008 uSubLeaf++;
2009 }
2010
2011 /* Cpuid 8: Marked as reserved by Intel and AMD.
2012 * We zero this since we don't know what it may have been used for.
2013 */
2014 cpumR3CpuIdZeroLeaf(pCpum, 8);
2015
2016 /* Cpuid 9: Direct Cache Access (DCA) Parameters
2017 * Intel: EAX - Value of PLATFORM_DCA_CAP bits.
2018 * EBX, ECX, EDX - reserved.
2019 * AMD: Reserved
2020 * VIA: ??
2021 *
2022 * We zero this.
2023 */
2024 cpumR3CpuIdZeroLeaf(pCpum, 9);
2025
2026 /* Cpuid 0xa: Architectural Performance Monitor Features
2027 * Intel: EAX - Value of PLATFORM_DCA_CAP bits.
2028 * EBX, ECX, EDX - reserved.
2029 * AMD: Reserved
2030 * VIA: ??
2031 *
2032 * We zero this, for now at least.
2033 */
2034 cpumR3CpuIdZeroLeaf(pCpum, 10);
2035
2036 /* Cpuid 0xb+ECX: x2APIC Features / Processor Topology.
2037 * Intel: EAX - APCI ID shift right for next level.
2038 * EBX - Factory configured cores/threads at this level.
2039 * ECX - Level number (same as input) and level type (1,2,0).
2040 * EDX - Extended initial APIC ID.
2041 * AMD: Reserved
2042 * VIA: ??
2043 */
2044 uSubLeaf = 0;
2045 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 11, uSubLeaf)) != NULL)
2046 {
2047 if (pCurLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC_ID)
2048 {
2049 uint8_t bLevelType = RT_BYTE2(pCurLeaf->uEcx);
2050 if (bLevelType == 1)
2051 {
2052 /* Thread level - we don't do threads at the moment. */
2053 pCurLeaf->uEax = 0; /** @todo is this correct? Real CPUs never do 0 here, I think... */
2054 pCurLeaf->uEbx = 1;
2055 }
2056 else if (bLevelType == 2)
2057 {
2058 /* Core level. */
2059 pCurLeaf->uEax = 1; /** @todo real CPUs are supposed to be in the 4-6 range, not 1. Our APIC ID assignments are a little special... */
2060#ifdef VBOX_WITH_MULTI_CORE
2061 while (RT_BIT_32(pCurLeaf->uEax) < pVM->cCpus)
2062 pCurLeaf->uEax++;
2063#endif
2064 pCurLeaf->uEbx = pVM->cCpus;
2065 }
2066 else
2067 {
2068 AssertLogRelMsg(bLevelType == 0, ("bLevelType=%#x uSubLeaf=%#x\n", bLevelType, uSubLeaf));
2069 pCurLeaf->uEax = 0;
2070 pCurLeaf->uEbx = 0;
2071 pCurLeaf->uEcx = 0;
2072 }
2073 pCurLeaf->uEcx = (pCurLeaf->uEcx & UINT32_C(0xffffff00)) | (uSubLeaf & 0xff);
2074 pCurLeaf->uEdx = 0; /* APIC ID is filled in by CPUMGetGuestCpuId() at runtime. Init for EMT(0) as usual. */
2075 }
2076 else
2077 {
2078 pCurLeaf->uEax = 0;
2079 pCurLeaf->uEbx = 0;
2080 pCurLeaf->uEcx = 0;
2081 pCurLeaf->uEdx = 0;
2082 }
2083 uSubLeaf++;
2084 }
2085
2086 /* Cpuid 0xc: Marked as reserved by Intel and AMD.
2087 * We zero this since we don't know what it may have been used for.
2088 */
2089 cpumR3CpuIdZeroLeaf(pCpum, 12);
2090
2091 /* Cpuid 0xd + ECX: Processor Extended State Enumeration
2092 * ECX=0: EAX - Valid bits in XCR0[31:0].
2093 * EBX - Maximum state size as per current XCR0 value.
2094 * ECX - Maximum state size for all supported features.
2095 * EDX - Valid bits in XCR0[63:32].
2096 * ECX=1: EAX - Various X-features.
2097 * EBX - Maximum state size as per current XCR0|IA32_XSS value.
2098 * ECX - Valid bits in IA32_XSS[31:0].
2099 * EDX - Valid bits in IA32_XSS[63:32].
2100 * ECX=N, where N in 2..63 and indicates a bit in XCR0 and/or IA32_XSS,
2101 * if the bit invalid all four registers are set to zero.
2102 * EAX - The state size for this feature.
2103 * EBX - The state byte offset of this feature.
2104 * ECX - Bit 0 indicates whether this sub-leaf maps to a valid IA32_XSS bit (=1) or a valid XCR0 bit (=0).
2105 * EDX - Reserved, but is set to zero if invalid sub-leaf index.
2106 *
2107 * Clear them all as we don't currently implement extended CPU state.
2108 */
2109 /* Figure out the supported XCR0/XSS mask component and make sure CPUID[1].ECX[27] = CR4.OSXSAVE. */
2110 uint64_t fGuestXcr0Mask = 0;
2111 pStdFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 1, 0);
2112 if (pStdFeatureLeaf && (pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_XSAVE))
2113 {
2114 fGuestXcr0Mask = XSAVE_C_X87 | XSAVE_C_SSE;
2115 if (pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_AVX)
2116 fGuestXcr0Mask |= XSAVE_C_YMM;
2117 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 7, 0);
2118 if (pCurLeaf && (pCurLeaf->uEbx & X86_CPUID_STEXT_FEATURE_EBX_AVX512F))
2119 fGuestXcr0Mask |= XSAVE_C_ZMM_16HI | XSAVE_C_ZMM_HI256 | XSAVE_C_OPMASK;
2120 fGuestXcr0Mask &= pCpum->fXStateHostMask;
2121
2122 pStdFeatureLeaf->fFlags |= CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE;
2123 }
2124 pStdFeatureLeaf = NULL;
2125 pCpum->fXStateGuestMask = fGuestXcr0Mask;
2126
2127 /* Work the sub-leaves. */
2128 uint32_t cbXSaveMaxActual = CPUM_MIN_XSAVE_AREA_SIZE;
2129 uint32_t cbXSaveMaxReport = CPUM_MIN_XSAVE_AREA_SIZE;
2130 for (uSubLeaf = 0; uSubLeaf < 63; uSubLeaf++)
2131 {
2132 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 13, uSubLeaf);
2133 if (pCurLeaf)
2134 {
2135 if (fGuestXcr0Mask)
2136 {
2137 switch (uSubLeaf)
2138 {
2139 case 0:
2140 pCurLeaf->uEax &= RT_LO_U32(fGuestXcr0Mask);
2141 pCurLeaf->uEdx &= RT_HI_U32(fGuestXcr0Mask);
2142 AssertLogRelMsgReturn((pCurLeaf->uEax & (XSAVE_C_X87 | XSAVE_C_SSE)) == (XSAVE_C_X87 | XSAVE_C_SSE),
2143 ("CPUID(0xd/0).EAX missing mandatory X87 or SSE bits: %#RX32", pCurLeaf->uEax),
2144 VERR_CPUM_IPE_1);
2145 cbXSaveMaxActual = pCurLeaf->uEcx;
2146 AssertLogRelMsgReturn(cbXSaveMaxActual <= CPUM_MAX_XSAVE_AREA_SIZE && cbXSaveMaxActual >= CPUM_MIN_XSAVE_AREA_SIZE,
2147 ("%#x max=%#x\n", cbXSaveMaxActual, CPUM_MAX_XSAVE_AREA_SIZE), VERR_CPUM_IPE_2);
2148 AssertLogRelMsgReturn(pCurLeaf->uEbx >= CPUM_MIN_XSAVE_AREA_SIZE && pCurLeaf->uEbx <= cbXSaveMaxActual,
2149 ("ebx=%#x cbXSaveMaxActual=%#x\n", pCurLeaf->uEbx, cbXSaveMaxActual),
2150 VERR_CPUM_IPE_2);
2151 continue;
2152 case 1:
2153 pCurLeaf->uEax &= 0;
2154 pCurLeaf->uEcx &= 0;
2155 pCurLeaf->uEdx &= 0;
2156 /** @todo what about checking ebx? */
2157 continue;
2158 default:
2159 if (fGuestXcr0Mask & RT_BIT_64(uSubLeaf))
2160 {
2161 AssertLogRelMsgReturn( pCurLeaf->uEax <= cbXSaveMaxActual
2162 && pCurLeaf->uEax > 0
2163 && pCurLeaf->uEbx < cbXSaveMaxActual
2164 && pCurLeaf->uEbx >= CPUM_MIN_XSAVE_AREA_SIZE
2165 && pCurLeaf->uEbx + pCurLeaf->uEax <= cbXSaveMaxActual,
2166 ("%#x: eax=%#x ebx=%#x cbMax=%#x\n",
2167 uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, cbXSaveMaxActual),
2168 VERR_CPUM_IPE_2);
2169 AssertLogRel(!(pCurLeaf->uEcx & 1));
2170 pCurLeaf->uEcx = 0; /* Bit 0 should be zero (XCR0), the reset are reserved... */
2171 pCurLeaf->uEdx = 0; /* it's reserved... */
2172 if (pCurLeaf->uEbx + pCurLeaf->uEax > cbXSaveMaxReport)
2173 cbXSaveMaxReport = pCurLeaf->uEbx + pCurLeaf->uEax;
2174 continue;
2175 }
2176 break;
2177 }
2178 }
2179
2180 /* Clear the leaf. */
2181 pCurLeaf->uEax = 0;
2182 pCurLeaf->uEbx = 0;
2183 pCurLeaf->uEcx = 0;
2184 pCurLeaf->uEdx = 0;
2185 }
2186 }
2187
2188 /* Update the max and current feature sizes to shut up annoying Linux kernels. */
2189 if (cbXSaveMaxReport != cbXSaveMaxActual && fGuestXcr0Mask)
2190 {
2191 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 13, 0);
2192 if (pCurLeaf)
2193 {
2194 LogRel(("CPUM: Changing leaf 13[0]: EBX=%#RX32 -> %#RX32, ECX=%#RX32 -> %#RX32\n",
2195 pCurLeaf->uEbx, cbXSaveMaxReport, pCurLeaf->uEcx, cbXSaveMaxReport));
2196 pCurLeaf->uEbx = cbXSaveMaxReport;
2197 pCurLeaf->uEcx = cbXSaveMaxReport;
2198 }
2199 }
2200
2201 /* Cpuid 0xe: Marked as reserved by Intel and AMD.
2202 * We zero this since we don't know what it may have been used for.
2203 */
2204 cpumR3CpuIdZeroLeaf(pCpum, 14);
2205
2206 /* Cpuid 0xf + ECX: Platform quality of service monitoring (PQM),
2207 * also known as Intel Resource Director Technology (RDT) Monitoring
2208 * We zero this as we don't currently virtualize PQM.
2209 */
2210 cpumR3CpuIdZeroLeaf(pCpum, 15);
2211
2212 /* Cpuid 0x10 + ECX: Platform quality of service enforcement (PQE),
2213 * also known as Intel Resource Director Technology (RDT) Allocation
2214 * We zero this as we don't currently virtualize PQE.
2215 */
2216 cpumR3CpuIdZeroLeaf(pCpum, 16);
2217
2218 /* Cpuid 0x11: Marked as reserved by Intel and AMD.
2219 * We zero this since we don't know what it may have been used for.
2220 */
2221 cpumR3CpuIdZeroLeaf(pCpum, 17);
2222
2223 /* Cpuid 0x12 + ECX: SGX resource enumeration.
2224 * We zero this as we don't currently virtualize this.
2225 */
2226 cpumR3CpuIdZeroLeaf(pCpum, 18);
2227
2228 /* Cpuid 0x13: Marked as reserved by Intel and AMD.
2229 * We zero this since we don't know what it may have been used for.
2230 */
2231 cpumR3CpuIdZeroLeaf(pCpum, 19);
2232
2233 /* Cpuid 0x14 + ECX: Processor Trace (PT) capability enumeration.
2234 * We zero this as we don't currently virtualize this.
2235 */
2236 cpumR3CpuIdZeroLeaf(pCpum, 20);
2237
2238 /* Cpuid 0x15: Timestamp Counter / Core Crystal Clock info.
2239 * Intel: uTscFrequency = uCoreCrystalClockFrequency * EBX / EAX.
2240 * EAX - denominator (unsigned).
2241 * EBX - numerator (unsigned).
2242 * ECX, EDX - reserved.
2243 * AMD: Reserved / undefined / not implemented.
2244 * VIA: Reserved / undefined / not implemented.
2245 * We zero this as we don't currently virtualize this.
2246 */
2247 cpumR3CpuIdZeroLeaf(pCpum, 21);
2248
2249 /* Cpuid 0x16: Processor frequency info
2250 * Intel: EAX - Core base frequency in MHz.
2251 * EBX - Core maximum frequency in MHz.
2252 * ECX - Bus (reference) frequency in MHz.
2253 * EDX - Reserved.
2254 * AMD: Reserved / undefined / not implemented.
2255 * VIA: Reserved / undefined / not implemented.
2256 * We zero this as we don't currently virtualize this.
2257 */
2258 cpumR3CpuIdZeroLeaf(pCpum, 22);
2259
2260 /* Cpuid 0x17..0x10000000: Unknown.
2261 * We don't know these and what they mean, so remove them. */
2262 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
2263 UINT32_C(0x00000017), UINT32_C(0x0fffffff));
2264
2265
2266 /* CpuId 0x40000000..0x4fffffff: Reserved for hypervisor/emulator.
2267 * We remove all these as we're a hypervisor and must provide our own.
2268 */
2269 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
2270 UINT32_C(0x40000000), UINT32_C(0x4fffffff));
2271
2272
2273 /* Cpuid 0x80000000 is harmless. */
2274
2275 /* Cpuid 0x80000001 is handled with cpuid 1 way up above. */
2276
2277 /* Cpuid 0x80000002...0x80000004 contains the processor name and is considered harmless. */
2278
2279 /* Cpuid 0x80000005 & 0x80000006 contain information about L1, L2 & L3 cache and TLB identifiers.
2280 * Safe to pass on to the guest.
2281 *
2282 * AMD: 0x80000005 L1 cache information
2283 * 0x80000006 L2/L3 cache information
2284 * Intel: 0x80000005 reserved
2285 * 0x80000006 L2 cache information
2286 * VIA: 0x80000005 TLB and L1 cache information
2287 * 0x80000006 L2 cache information
2288 */
2289
2290 uSubLeaf = 0;
2291 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000006), uSubLeaf)) != NULL)
2292 {
2293 if ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
2294 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
2295 {
2296 /*
2297 * Some AMD CPUs (e.g. Ryzen 7940HS) report zero L3 cache line size here and refer
2298 * to CPUID Fn8000_001D. This triggers division by zero in Linux if the
2299 * TopologyExtensions aka TOPOEXT bit in Fn8000_0001_ECX is not set, or if the kernel
2300 * is old enough (e.g. Linux 3.13) that it does not know about the topology extension
2301 * CPUID leaves.
2302 * We put a non-zero value in the cache line size here, if possible the actual value
2303 * gleaned from Fn8000_001D, or worst case a made-up valid number.
2304 */
2305 PCPUMCPUIDLEAF pTopoLeaf;
2306 uint32_t uTopoSubLeaf;
2307 uint32_t uCacheLineSize;
2308
2309 if ((pCurLeaf->uEdx & 0xff) == 0)
2310 {
2311 uTopoSubLeaf = 0;
2312
2313 uCacheLineSize = 64; /* Use 64-byte line size as a fallback. */
2314
2315 /* Find L3 cache information. Have to check the cache level in EAX. */
2316 while ((pTopoLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x8000001d), uTopoSubLeaf)) != NULL)
2317 {
2318 if (((pTopoLeaf->uEax >> 5) & 0x07) == 3) {
2319 uCacheLineSize = (pTopoLeaf->uEbx & 0xfff) + 1;
2320 /* Fn8000_0006 can't report power of two line sizes greater than 128. */
2321 if (uCacheLineSize > 128)
2322 uCacheLineSize = 128;
2323
2324 break;
2325 }
2326 uTopoSubLeaf++;
2327 }
2328
2329 Assert(uCacheLineSize < 256);
2330 pCurLeaf->uEdx |= uCacheLineSize;
2331 LogRel(("CPUM: AMD L3 cache line size in CPUID leaf 0x80000006 was zero, adjusting to %u\n", uCacheLineSize));
2332 }
2333 }
2334 uSubLeaf++;
2335 }
2336
2337 /* Cpuid 0x80000007: Advanced Power Management Information.
2338 * AMD: EAX: Processor feedback capabilities.
2339 * EBX: RAS capabilites.
2340 * ECX: Advanced power monitoring interface.
2341 * EDX: Enhanced power management capabilities.
2342 * Intel: EAX, EBX, ECX - reserved.
2343 * EDX - Invariant TSC indicator supported (bit 8), the rest is reserved.
2344 * VIA: Reserved
2345 * We let the guest see EDX_TSCINVAR (and later maybe EDX_EFRO). Actually, we should set EDX_TSCINVAR.
2346 */
2347 uSubLeaf = 0;
2348 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000007), uSubLeaf)) != NULL)
2349 {
2350 pCurLeaf->uEax = pCurLeaf->uEbx = pCurLeaf->uEcx = 0;
2351 if ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
2352 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
2353 {
2354 /*
2355 * Older 64-bit linux kernels blindly assume that the AMD performance counters work
2356 * if X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR is set, see @bugref{7243#c85}. Exposing this
2357 * bit is now configurable.
2358 */
2359 pCurLeaf->uEdx &= 0
2360 //| X86_CPUID_AMD_ADVPOWER_EDX_TS
2361 //| X86_CPUID_AMD_ADVPOWER_EDX_FID
2362 //| X86_CPUID_AMD_ADVPOWER_EDX_VID
2363 //| X86_CPUID_AMD_ADVPOWER_EDX_TTP
2364 //| X86_CPUID_AMD_ADVPOWER_EDX_TM
2365 //| X86_CPUID_AMD_ADVPOWER_EDX_STC
2366 //| X86_CPUID_AMD_ADVPOWER_EDX_MC
2367 //| X86_CPUID_AMD_ADVPOWER_EDX_HWPSTATE
2368 | X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR
2369 //| X86_CPUID_AMD_ADVPOWER_EDX_CPB RT_BIT(9)
2370 //| X86_CPUID_AMD_ADVPOWER_EDX_EFRO RT_BIT(10)
2371 //| X86_CPUID_AMD_ADVPOWER_EDX_PFI RT_BIT(11)
2372 //| X86_CPUID_AMD_ADVPOWER_EDX_PA RT_BIT(12)
2373 | 0;
2374 }
2375 else
2376 pCurLeaf->uEdx &= X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR;
2377 if (!pConfig->fInvariantTsc)
2378 pCurLeaf->uEdx &= ~X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR;
2379 uSubLeaf++;
2380 }
2381
2382 /* Cpuid 0x80000008:
2383 * AMD: EAX: Long Mode Size Identifiers
2384 * EBX: Extended Feature Identifiers
2385 * ECX: Number of cores + APICIdCoreIdSize
2386 * EDX: RDPRU Register Identifier Range
2387 * Intel: EAX: Virtual/Physical address Size
2388 * EBX, ECX, EDX - reserved
2389 * VIA: EAX: Virtual/Physical address Size
2390 * EBX, ECX, EDX - reserved
2391 *
2392 * We only expose the virtual+pysical address size to the guest atm.
2393 * On AMD we set the core count, but not the apic id stuff as we're
2394 * currently not doing the apic id assignments in a compatible manner.
2395 */
2396 bool fAmdGstSupIbpb = false; /* Used below. */
2397 uSubLeaf = 0;
2398 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000008), uSubLeaf)) != NULL)
2399 {
2400 pCurLeaf->uEax &= UINT32_C(0x0000ffff); /* Virtual & physical address sizes only. */
2401 if ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
2402 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
2403 {
2404 /* Expose XSaveErPtr aka RstrFpErrPtrs to guest. */
2405 pCurLeaf->uEbx &= 0
2406 //| X86_CPUID_AMD_EFEID_EBX_CLZERO
2407 //| X86_CPUID_AMD_EFEID_EBX_IRPERF
2408 //| X86_CPUID_AMD_EFEID_EBX_XSAVE_ER_PTR
2409 //| X86_CPUID_AMD_EFEID_EBX_INVLPGB
2410 //| X86_CPUID_AMD_EFEID_EBX_RDPRU
2411 //| X86_CPUID_AMD_EFEID_EBX_BE
2412 //| X86_CPUID_AMD_EFEID_EBX_MCOMMIT
2413 | (pConfig->fSpecCtrl || PASSTHRU_FEATURE(pConfig->enmFlushCmdMsr, pHstFeat->fFlushCmd, true)
2414 ? X86_CPUID_AMD_EFEID_EBX_IBPB : 0)
2415 //| X86_CPUID_AMD_EFEID_EBX_INT_WBINVD
2416 | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_IBRS : 0)
2417 | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_STIBP : 0)
2418 | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_IBRS_ALWAYS_ON : 0)
2419 | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_STIBP_ALWAYS_ON : 0)
2420 | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_IBRS_PREFERRED : 0)
2421 | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_IBRS_SAME_MODE : 0)
2422 //| X86_CPUID_AMD_EFEID_EBX_NO_EFER_LMSLE
2423 //| X86_CPUID_AMD_EFEID_EBX_INVLPGB_NESTED_PAGES
2424 | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_SPEC_CTRL_SSBD : 0)
2425 /// @todo | X86_CPUID_AMD_EFEID_EBX_VIRT_SPEC_CTRL_SSBD
2426 | X86_CPUID_AMD_EFEID_EBX_SSBD_NOT_REQUIRED
2427 //| X86_CPUID_AMD_EFEID_EBX_CPPC
2428 | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_PSFD : 0)
2429 | X86_CPUID_AMD_EFEID_EBX_BTC_NO
2430 | (pConfig->fSpecCtrl ? X86_CPUID_AMD_EFEID_EBX_IBPB_RET : 0);
2431
2432 PORTABLE_DISABLE_FEATURE_BIT_CFG(3, pCurLeaf->uEbx, IBPB, X86_CPUID_AMD_EFEID_EBX_IBPB, pConfig->enmFlushCmdMsr);
2433
2434 /* Sharing this forced setting with intel would maybe confuse guests... */
2435 if (pConfig->enmFlushCmdMsr == CPUMISAEXTCFG_ENABLED_ALWAYS)
2436 pCurLeaf->uEbx |= X86_CPUID_AMD_EFEID_EBX_IBPB;
2437
2438 fAmdGstSupIbpb = RT_BOOL(pCurLeaf->uEbx & X86_CPUID_AMD_EFEID_EBX_IBPB);
2439 }
2440 else
2441 pCurLeaf->uEbx = 0; /* reserved */
2442
2443 pCurLeaf->uEdx = 0; /* reserved */
2444
2445 /* Set APICIdCoreIdSize to zero (use legacy method to determine the number of cores per cpu).
2446 * Set core count to 0, indicating 1 core. Adjust if we're in multi core mode on AMD. */
2447 pCurLeaf->uEcx = 0;
2448#ifdef VBOX_WITH_MULTI_CORE
2449 if ( pVM->cCpus > 1
2450 && ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
2451 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON))
2452 pCurLeaf->uEcx |= (pVM->cCpus - 1) & UINT32_C(0xff);
2453#endif
2454 uSubLeaf++;
2455 }
2456
2457 /* Cpuid 0x80000009: Reserved
2458 * We zero this since we don't know what it may have been used for.
2459 */
2460 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x80000009));
2461
2462 /* Cpuid 0x8000000a: SVM information on AMD, invalid on Intel.
2463 * AMD: EAX - SVM revision.
2464 * EBX - Number of ASIDs.
2465 * ECX - Reserved.
2466 * EDX - SVM Feature identification.
2467 */
2468 if ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
2469 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
2470 {
2471 pExtFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000001), 0);
2472 if ( pExtFeatureLeaf
2473 && (pExtFeatureLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM))
2474 {
2475 PCPUMCPUIDLEAF pSvmFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 0x8000000a, 0);
2476 if (pSvmFeatureLeaf)
2477 {
2478 pSvmFeatureLeaf->uEax = 0x1;
2479 pSvmFeatureLeaf->uEbx = 0x8000; /** @todo figure out virtual NASID. */
2480 pSvmFeatureLeaf->uEcx = 0;
2481 pSvmFeatureLeaf->uEdx &= ( X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE /** @todo Support other SVM features */
2482 | X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID
2483 | X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS);
2484 }
2485 else
2486 {
2487 /* Should never happen. */
2488 LogRel(("CPUM: Warning! Expected CPUID leaf 0x8000000a not present! SVM features not exposed to the guest\n"));
2489 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000000a));
2490 }
2491 }
2492 else
2493 {
2494 /* If SVM is not supported, this is reserved, zero out. */
2495 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000000a));
2496 }
2497 }
2498 else
2499 {
2500 /* Cpuid 0x8000000a: Reserved on Intel.
2501 * We zero this since we don't know what it may have been used for.
2502 */
2503 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000000a));
2504 }
2505
2506 /* Cpuid 0x8000000b thru 0x80000018: Reserved
2507 * We clear these as we don't know what purpose they might have. */
2508 for (uint32_t uLeaf = UINT32_C(0x8000000b); uLeaf <= UINT32_C(0x80000018); uLeaf++)
2509 cpumR3CpuIdZeroLeaf(pCpum, uLeaf);
2510
2511 /* Cpuid 0x80000019: TLB configuration
2512 * Seems to be harmless, pass them thru as is. */
2513
2514 /* Cpuid 0x8000001a: Peformance optimization identifiers.
2515 * Strip anything we don't know what is or addresses feature we don't implement. */
2516 uSubLeaf = 0;
2517 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x8000001a), uSubLeaf)) != NULL)
2518 {
2519 pCurLeaf->uEax &= RT_BIT_32(0) /* FP128 - use 1x128-bit instead of 2x64-bit. */
2520 | RT_BIT_32(1) /* MOVU - Prefere unaligned MOV over MOVL + MOVH. */
2521 //| RT_BIT_32(2) /* FP256 - use 1x256-bit instead of 2x128-bit. */
2522 ;
2523 pCurLeaf->uEbx = 0; /* reserved */
2524 pCurLeaf->uEcx = 0; /* reserved */
2525 pCurLeaf->uEdx = 0; /* reserved */
2526 uSubLeaf++;
2527 }
2528
2529 /* Cpuid 0x8000001b: Instruct based sampling (IBS) information.
2530 * Clear this as we don't currently virtualize this feature. */
2531 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000001b));
2532
2533 /* Cpuid 0x8000001c: Lightweight profiling (LWP) information.
2534 * Clear this as we don't currently virtualize this feature. */
2535 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000001c));
2536
2537 /* Cpuid 0x8000001d+ECX: Get cache configuration descriptors.
2538 * We need to sanitize the cores per cache (EAX[25:14]).
2539 *
2540 * This is very much the same as Intel's CPUID(4) leaf, except EAX[31:26]
2541 * and EDX[2] are reserved here, and EAX[14:25] is documented having a
2542 * slightly different meaning.
2543 */
2544 uSubLeaf = 0;
2545 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x8000001d), uSubLeaf)) != NULL)
2546 {
2547#ifdef VBOX_WITH_MULTI_CORE
2548 uint32_t cCores = ((pCurLeaf->uEax >> 14) & 0xfff) + 1;
2549 if (cCores > pVM->cCpus)
2550 cCores = pVM->cCpus;
2551 pCurLeaf->uEax &= UINT32_C(0x00003fff);
2552 pCurLeaf->uEax |= ((cCores - 1) & 0xfff) << 14;
2553#else
2554 pCurLeaf->uEax &= UINT32_C(0x00003fff);
2555#endif
2556 uSubLeaf++;
2557 }
2558
2559 /* Cpuid 0x8000001e: Get APIC / unit / node information.
2560 * If AMD, we configure it for our layout (on EMT(0)). In the multi-core
2561 * setup, we have one compute unit with all the cores in it. Single node.
2562 */
2563 uSubLeaf = 0;
2564 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x8000001e), uSubLeaf)) != NULL)
2565 {
2566 pCurLeaf->uEax = 0; /* Extended APIC ID = EMT(0).idApic (== 0). */
2567 if (pCurLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC_ID)
2568 {
2569#ifdef VBOX_WITH_MULTI_CORE
2570 pCurLeaf->uEbx = pVM->cCpus < 0x100
2571 ? (pVM->cCpus - 1) << 8 : UINT32_C(0x0000ff00); /* Compute unit ID 0, core per unit. */
2572#else
2573 pCurLeaf->uEbx = 0; /* Compute unit ID 0, 1 core per unit. */
2574#endif
2575 pCurLeaf->uEcx = 0; /* Node ID 0, 1 node per CPU. */
2576 }
2577 else
2578 {
2579 Assert(pCpum->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_AMD);
2580 Assert(pCpum->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_HYGON);
2581 pCurLeaf->uEbx = 0; /* Reserved. */
2582 pCurLeaf->uEcx = 0; /* Reserved. */
2583 }
2584 pCurLeaf->uEdx = 0; /* Reserved. */
2585 uSubLeaf++;
2586 }
2587
2588 /* Cpuid 0x80000020: Platform Quality of Service (PQOS), may have subleaves.
2589 * For now we just zero it. */
2590 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000020), 0);
2591 if (pCurLeaf)
2592 {
2593 pCurLeaf = cpumR3CpuIdMakeSingleLeaf(pCpum, pCurLeaf);
2594 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x80000020));
2595 }
2596
2597 /* Cpuid 0x80000021: Extended Feature 2 (Zen3+?).
2598 *
2599 */
2600 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000021), 0);
2601 if (pCurLeaf)
2602 {
2603 /** @todo sanitize these bits! */
2604 pCurLeaf->uEax = 0;
2605 pCurLeaf->uEbx = 0;
2606 pCurLeaf->uEcx = 0;
2607 pCurLeaf->uEdx = 0;
2608 }
2609 /* Linux expects us as a hypervisor to insert this leaf for Zen 1 & 2 CPUs
2610 iff IBPB is available to the guest. This is also documented by AMD in
2611 "TECHNICAL UPDATE REGARDING SPECULATIVE RETURN STACK OVERFLOW" rev 2.0
2612 dated 2024-02-00. */
2613 else if ( fAmdGstSupIbpb
2614 && ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
2615 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
2616 && (pExtFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000001), 0)) != NULL
2617 && RTX86GetCpuFamily(pExtFeatureLeaf->uEax) == 0x17)
2618 {
2619 static CPUMCPUIDLEAF const s_NewLeaf =
2620 {
2621 /* .uLeaf =*/ UINT32_C(0x80000021),
2622 /* .uSubLeaf = */ 0,
2623 /* .fSubLeafMask = */ 0,
2624 /* .uEax = */ X86_CPUID_AMD_21_EAX_IBPB_BRTYPE,
2625 /* .uEbx = */ 0,
2626 /* .uEcx = */ 0,
2627 /* .uEdx = */ 0,
2628 /* .fFlags = */ 0,
2629 };
2630 int const rc2 = cpumR3CpuIdInsert(NULL, &pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves, &s_NewLeaf);
2631 AssertRC(rc2);
2632 pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000000), 0);
2633 if (pCurLeaf && pCurLeaf->uEax < UINT32_C(0x80000021))
2634 pCurLeaf->uEax = UINT32_C(0x80000021);
2635 }
2636
2637 /* Cpuid 0x80000022...0x8ffffffd: Unknown.
2638 * We don't know these and what they mean, so remove them. */
2639 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
2640 UINT32_C(0x80000022), UINT32_C(0x8ffffffd));
2641
2642 /* Cpuid 0x8ffffffe: Mystery AMD K6 leaf.
2643 * Just pass it thru for now. */
2644
2645 /* Cpuid 0x8fffffff: Mystery hammer time leaf!
2646 * Just pass it thru for now. */
2647
2648 /* Cpuid 0xc0000000: Centaur stuff.
2649 * Harmless, pass it thru. */
2650
2651 /* Cpuid 0xc0000001: Centaur features.
2652 * VIA: EAX - Family, model, stepping.
2653 * EDX - Centaur extended feature flags. Nothing interesting, except may
2654 * FEMMS (bit 5), but VIA marks it as 'reserved', so never mind.
2655 * EBX, ECX - reserved.
2656 * We keep EAX but strips the rest.
2657 */
2658 uSubLeaf = 0;
2659 while ((pCurLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0xc0000001), uSubLeaf)) != NULL)
2660 {
2661 pCurLeaf->uEbx = 0;
2662 pCurLeaf->uEcx = 0;
2663 pCurLeaf->uEdx = 0; /* Bits 0 thru 9 are documented on sandpil.org, but we don't want them, except maybe 5 (FEMMS). */
2664 uSubLeaf++;
2665 }
2666
2667 /* Cpuid 0xc0000002: Old Centaur Current Performance Data.
2668 * We only have fixed stale values, but should be harmless. */
2669
2670 /* Cpuid 0xc0000003: Reserved.
2671 * We zero this since we don't know what it may have been used for.
2672 */
2673 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0xc0000003));
2674
2675 /* Cpuid 0xc0000004: Centaur Performance Info.
2676 * We only have fixed stale values, but should be harmless. */
2677
2678
2679 /* Cpuid 0xc0000005...0xcfffffff: Unknown.
2680 * We don't know these and what they mean, so remove them. */
2681 cpumR3CpuIdRemoveRange(pCpum->GuestInfo.paCpuIdLeavesR3, &pCpum->GuestInfo.cCpuIdLeaves,
2682 UINT32_C(0xc0000005), UINT32_C(0xcfffffff));
2683
2684 return VINF_SUCCESS;
2685#undef PORTABLE_DISABLE_FEATURE_BIT
2686#undef PORTABLE_CLEAR_BITS_WHEN
2687}
2688
2689
2690/**
2691 * Reads a value in /CPUM/IsaExts/ node.
2692 *
2693 * @returns VBox status code (error message raised).
2694 * @param pVM The cross context VM structure. (For errors.)
2695 * @param pIsaExts The /CPUM/IsaExts node (can be NULL).
2696 * @param pszValueName The value / extension name.
2697 * @param penmValue Where to return the choice.
2698 * @param enmDefault The default choice.
2699 */
2700static int cpumR3CpuIdReadIsaExtCfg(PVM pVM, PCFGMNODE pIsaExts, const char *pszValueName,
2701 CPUMISAEXTCFG *penmValue, CPUMISAEXTCFG enmDefault)
2702{
2703 /*
2704 * Try integer encoding first.
2705 */
2706 uint64_t uValue;
2707 int rc = CFGMR3QueryInteger(pIsaExts, pszValueName, &uValue);
2708 if (RT_SUCCESS(rc))
2709 switch (uValue)
2710 {
2711 case 0: *penmValue = CPUMISAEXTCFG_DISABLED; break;
2712 case 1: *penmValue = CPUMISAEXTCFG_ENABLED_SUPPORTED; break;
2713 case 2: *penmValue = CPUMISAEXTCFG_ENABLED_ALWAYS; break;
2714 case 9: *penmValue = CPUMISAEXTCFG_ENABLED_PORTABLE; break;
2715 default:
2716 return VMSetError(pVM, VERR_CPUM_INVALID_CONFIG_VALUE, RT_SRC_POS,
2717 "Invalid config value for '/CPUM/IsaExts/%s': %llu (expected 0/'disabled', 1/'enabled', 2/'portable', or 9/'forced')",
2718 pszValueName, uValue);
2719 }
2720 /*
2721 * If missing, use default.
2722 */
2723 else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_NO_PARENT)
2724 *penmValue = enmDefault;
2725 else
2726 {
2727 if (rc == VERR_CFGM_NOT_INTEGER)
2728 {
2729 /*
2730 * Not an integer, try read it as a string.
2731 */
2732 char szValue[32];
2733 rc = CFGMR3QueryString(pIsaExts, pszValueName, szValue, sizeof(szValue));
2734 if (RT_SUCCESS(rc))
2735 {
2736 RTStrToLower(szValue);
2737 size_t cchValue = strlen(szValue);
2738#define EQ(a_str) (cchValue == sizeof(a_str) - 1U && memcmp(szValue, a_str, sizeof(a_str) - 1))
2739 if ( EQ("disabled") || EQ("disable") || EQ("off") || EQ("no"))
2740 *penmValue = CPUMISAEXTCFG_DISABLED;
2741 else if (EQ("enabled") || EQ("enable") || EQ("on") || EQ("yes"))
2742 *penmValue = CPUMISAEXTCFG_ENABLED_SUPPORTED;
2743 else if (EQ("forced") || EQ("force") || EQ("always"))
2744 *penmValue = CPUMISAEXTCFG_ENABLED_ALWAYS;
2745 else if (EQ("portable"))
2746 *penmValue = CPUMISAEXTCFG_ENABLED_PORTABLE;
2747 else if (EQ("default") || EQ("def"))
2748 *penmValue = enmDefault;
2749 else
2750 return VMSetError(pVM, VERR_CPUM_INVALID_CONFIG_VALUE, RT_SRC_POS,
2751 "Invalid config value for '/CPUM/IsaExts/%s': '%s' (expected 0/'disabled', 1/'enabled', 2/'portable', or 9/'forced')",
2752 pszValueName, uValue);
2753#undef EQ
2754 }
2755 }
2756 if (RT_FAILURE(rc))
2757 return VMSetError(pVM, rc, RT_SRC_POS, "Error reading config value '/CPUM/IsaExts/%s': %Rrc", pszValueName, rc);
2758 }
2759 return VINF_SUCCESS;
2760}
2761
2762
2763/**
2764 * Reads a value in /CPUM/IsaExts/ node, forcing it to DISABLED if wanted.
2765 *
2766 * @returns VBox status code (error message raised).
2767 * @param pVM The cross context VM structure. (For errors.)
2768 * @param pIsaExts The /CPUM/IsaExts node (can be NULL).
2769 * @param pszValueName The value / extension name.
2770 * @param penmValue Where to return the choice.
2771 * @param enmDefault The default choice.
2772 * @param fAllowed Allowed choice. Applied both to the result and to
2773 * the default value.
2774 */
2775static int cpumR3CpuIdReadIsaExtCfgEx(PVM pVM, PCFGMNODE pIsaExts, const char *pszValueName,
2776 CPUMISAEXTCFG *penmValue, CPUMISAEXTCFG enmDefault, bool fAllowed)
2777{
2778 int rc;
2779 if (fAllowed)
2780 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, pszValueName, penmValue, enmDefault);
2781 else
2782 {
2783 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, pszValueName, penmValue, false /*enmDefault*/);
2784 if (RT_SUCCESS(rc) && *penmValue == CPUMISAEXTCFG_ENABLED_ALWAYS)
2785 LogRel(("CPUM: Ignoring forced '%s'\n", pszValueName));
2786 *penmValue = CPUMISAEXTCFG_DISABLED;
2787 }
2788 return rc;
2789}
2790
2791
2792/**
2793 * Reads a value in /CPUM/IsaExts/ node that used to be located in /CPUM/.
2794 *
2795 * @returns VBox status code (error message raised).
2796 * @param pVM The cross context VM structure. (For errors.)
2797 * @param pIsaExts The /CPUM/IsaExts node (can be NULL).
2798 * @param pCpumCfg The /CPUM node (can be NULL).
2799 * @param pszValueName The value / extension name.
2800 * @param penmValue Where to return the choice.
2801 * @param enmDefault The default choice.
2802 */
2803static int cpumR3CpuIdReadIsaExtCfgLegacy(PVM pVM, PCFGMNODE pIsaExts, PCFGMNODE pCpumCfg, const char *pszValueName,
2804 CPUMISAEXTCFG *penmValue, CPUMISAEXTCFG enmDefault)
2805{
2806 if (CFGMR3Exists(pCpumCfg, pszValueName))
2807 {
2808 if (!CFGMR3Exists(pIsaExts, pszValueName))
2809 LogRel(("Warning: /CPUM/%s is deprecated, use /CPUM/IsaExts/%s instead.\n", pszValueName, pszValueName));
2810 else
2811 return VMSetError(pVM, VERR_DUPLICATE, RT_SRC_POS,
2812 "Duplicate config values '/CPUM/%s' and '/CPUM/IsaExts/%s' - please remove the former!",
2813 pszValueName, pszValueName);
2814
2815 bool fLegacy;
2816 int rc = CFGMR3QueryBoolDef(pCpumCfg, pszValueName, &fLegacy, enmDefault != CPUMISAEXTCFG_DISABLED);
2817 if (RT_SUCCESS(rc))
2818 {
2819 *penmValue = fLegacy;
2820 return VINF_SUCCESS;
2821 }
2822 return VMSetError(pVM, VERR_DUPLICATE, RT_SRC_POS, "Error querying '/CPUM/%s': %Rrc", pszValueName, rc);
2823 }
2824
2825 return cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, pszValueName, penmValue, enmDefault);
2826}
2827
2828
2829static int cpumR3CpuIdReadConfig(PVM pVM, PCPUMCPUIDCONFIG pConfig, PCFGMNODE pCpumCfg, bool fNestedPagingAndFullGuestExec)
2830{
2831 int rc;
2832
2833 /** @cfgm{/CPUM/PortableCpuIdLevel, 8-bit, 0, 3, 0}
2834 * When non-zero CPUID features that could cause portability issues will be
2835 * stripped. The higher the value the more features gets stripped. Higher
2836 * values should only be used when older CPUs are involved since it may
2837 * harm performance and maybe also cause problems with specific guests. */
2838 rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pVM->cpum.s.u8PortableCpuIdLevel, 0);
2839 AssertLogRelRCReturn(rc, rc);
2840
2841 /** @cfgm{/CPUM/GuestCpuName, string}
2842 * The name of the CPU we're to emulate. The default is the host CPU.
2843 * Note! CPUs other than "host" one is currently unsupported. */
2844 rc = CFGMR3QueryStringDef(pCpumCfg, "GuestCpuName", pConfig->szCpuName, sizeof(pConfig->szCpuName), "host");
2845 AssertLogRelRCReturn(rc, rc);
2846
2847 /** @cfgm{/CPUM/NT4LeafLimit, boolean, false}
2848 * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from
2849 * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e).
2850 * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22].
2851 */
2852 rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &pConfig->fNt4LeafLimit, false);
2853 AssertLogRelRCReturn(rc, rc);
2854
2855 /** @cfgm{/CPUM/InvariantTsc, boolean, true}
2856 * Pass-through the invariant TSC flag in 0x80000007 if available on the host
2857 * CPU. On AMD CPUs, users may wish to suppress it to avoid trouble from older
2858 * 64-bit linux guests which assume the presence of AMD performance counters
2859 * that we do not virtualize.
2860 */
2861 rc = CFGMR3QueryBoolDef(pCpumCfg, "InvariantTsc", &pConfig->fInvariantTsc, true);
2862 AssertLogRelRCReturn(rc, rc);
2863
2864 /** @cfgm{/CPUM/InvariantApic, boolean, true}
2865 * Set the Always Running APIC Timer (ARAT) flag in lea if true; otherwise
2866 * pass through the host setting. The Windows 10/11 HAL won't use APIC timers
2867 * unless the ARAT bit is set. Note that both Intel and AMD set this bit.
2868 */
2869 rc = CFGMR3QueryBoolDef(pCpumCfg, "InvariantApic", &pConfig->fInvariantApic, true);
2870 AssertLogRelRCReturn(rc, rc);
2871
2872 /** @cfgm{/CPUM/ForceVme, boolean, false}
2873 * Always expose the VME (Virtual-8086 Mode Extensions) capability if true.
2874 * By default the flag is passed thru as is from the host CPU, except
2875 * on AMD Ryzen CPUs where it's masked to avoid trouble with XP/Server 2003
2876 * guests and DOS boxes in general.
2877 */
2878 rc = CFGMR3QueryBoolDef(pCpumCfg, "ForceVme", &pConfig->fForceVme, false);
2879 AssertLogRelRCReturn(rc, rc);
2880
2881 /** @cfgm{/CPUM/MaxIntelFamilyModelStep, uint32_t, UINT32_MAX}
2882 * Restrict the reported CPU family+model+stepping of intel CPUs. This is
2883 * probably going to be a temporary hack, so don't depend on this.
2884 * The 1st byte of the value is the stepping, the 2nd byte value is the model
2885 * number and the 3rd byte value is the family, and the 4th value must be zero.
2886 */
2887 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxIntelFamilyModelStep", &pConfig->uMaxIntelFamilyModelStep, UINT32_MAX);
2888 AssertLogRelRCReturn(rc, rc);
2889
2890 /** @cfgm{/CPUM/MaxStdLeaf, uint32_t, 0x00000016}
2891 * The last standard leaf to keep. The actual last value that is stored in EAX
2892 * is RT_MAX(CPUID[0].EAX,/CPUM/MaxStdLeaf). Leaves beyond the max leaf are
2893 * removed. (This works independently of and differently from NT4LeafLimit.)
2894 * The default is usually set to what we're able to reasonably sanitize.
2895 */
2896 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxStdLeaf", &pConfig->uMaxStdLeaf, UINT32_C(0x00000016));
2897 AssertLogRelRCReturn(rc, rc);
2898
2899 /** @cfgm{/CPUM/MaxExtLeaf, uint32_t, 0x8000001e}
2900 * The last extended leaf to keep. The actual last value that is stored in EAX
2901 * is RT_MAX(CPUID[0x80000000].EAX,/CPUM/MaxStdLeaf). Leaves beyond the max
2902 * leaf are removed. The default is set to what we're able to sanitize.
2903 */
2904 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxExtLeaf", &pConfig->uMaxExtLeaf, UINT32_C(0x80000021));
2905 AssertLogRelRCReturn(rc, rc);
2906
2907 /** @cfgm{/CPUM/MaxCentaurLeaf, uint32_t, 0xc0000004}
2908 * The last extended leaf to keep. The actual last value that is stored in EAX
2909 * is RT_MAX(CPUID[0xc0000000].EAX,/CPUM/MaxCentaurLeaf). Leaves beyond the max
2910 * leaf are removed. The default is set to what we're able to sanitize.
2911 */
2912 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxCentaurLeaf", &pConfig->uMaxCentaurLeaf, UINT32_C(0xc0000004));
2913 AssertLogRelRCReturn(rc, rc);
2914
2915 /** @cfgm{/CPUM/SpecCtrl, bool, false}
2916 * Enables passing thru IA32_SPEC_CTRL and associated CPU bugfixes.
2917 */
2918 rc = CFGMR3QueryBoolDef(pCpumCfg, "SpecCtrl", &pConfig->fSpecCtrl, false);
2919 AssertRCReturn(rc, rc);
2920
2921#ifdef RT_ARCH_AMD64 /** @todo next VT-x/AMD-V on non-AMD64 hosts */
2922 bool fQueryNestedHwvirt = false
2923#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2924 || pVM->cpum.s.HostFeatures.s.enmCpuVendor == CPUMCPUVENDOR_AMD
2925 || pVM->cpum.s.HostFeatures.s.enmCpuVendor == CPUMCPUVENDOR_HYGON
2926#endif
2927#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2928 || pVM->cpum.s.HostFeatures.s.enmCpuVendor == CPUMCPUVENDOR_INTEL
2929 || pVM->cpum.s.HostFeatures.s.enmCpuVendor == CPUMCPUVENDOR_VIA
2930#endif
2931 ;
2932 if (fQueryNestedHwvirt)
2933 {
2934 /** @cfgm{/CPUM/NestedHWVirt, bool, false}
2935 * Whether to expose the hardware virtualization (VMX/SVM) feature to the guest.
2936 * The default is false, and when enabled requires a 64-bit CPU with support for
2937 * nested-paging and AMD-V or unrestricted guest mode.
2938 */
2939 rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedHWVirt", &pConfig->fNestedHWVirt, false);
2940 AssertLogRelRCReturn(rc, rc);
2941 if (pConfig->fNestedHWVirt)
2942 {
2943 /** @todo Think about enabling this later with NEM/KVM. */
2944 if (VM_IS_NEM_ENABLED(pVM))
2945 {
2946 LogRel(("CPUM: Warning! Can't turn on nested VT-x/AMD-V when NEM is used! (later)\n"));
2947 pConfig->fNestedHWVirt = false;
2948 }
2949 else if (!fNestedPagingAndFullGuestExec)
2950 return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS,
2951 "Cannot enable nested VT-x/AMD-V without nested-paging and unrestricted guest execution!\n");
2952 }
2953 }
2954#endif /** @todo */
2955
2956 /*
2957 * Instruction Set Architecture (ISA) Extensions.
2958 */
2959 PCFGMNODE pIsaExts = CFGMR3GetChild(pCpumCfg, "IsaExts");
2960 if (pIsaExts)
2961 {
2962 rc = CFGMR3ValidateConfig(pIsaExts, "/CPUM/IsaExts/",
2963 "CMPXCHG16B"
2964 "|MONITOR"
2965 "|MWaitExtensions"
2966 "|SSE4.1"
2967 "|SSE4.2"
2968 "|XSAVE"
2969 "|AVX"
2970 "|AVX2"
2971 "|AESNI"
2972 "|PCLMUL"
2973 "|POPCNT"
2974 "|MOVBE"
2975 "|RDRAND"
2976 "|RDSEED"
2977 "|ADX"
2978 "|CLFLUSHOPT"
2979 "|SHA"
2980 "|FSGSBASE"
2981 "|PCID"
2982 "|INVPCID"
2983 "|FlushCmdMsr"
2984 "|MdsClear"
2985 "|ArchCapMsr"
2986 "|FMA"
2987 "|F16C"
2988 "|McdtNo"
2989 "|MonitorMitgNo"
2990 "|ABM"
2991 "|SSE4A"
2992 "|MISALNSSE"
2993 "|3DNOWPRF"
2994 "|AXMMX"
2995 , "" /*pszValidNodes*/, "CPUM" /*pszWho*/, 0 /*uInstance*/);
2996 if (RT_FAILURE(rc))
2997 return rc;
2998 }
2999
3000 /** @cfgm{/CPUM/IsaExts/CMPXCHG16B, boolean, true}
3001 * Expose CMPXCHG16B to the guest if available. All host CPUs which support
3002 * hardware virtualization have it.
3003 */
3004 rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "CMPXCHG16B", &pConfig->enmCmpXchg16b, true);
3005 AssertLogRelRCReturn(rc, rc);
3006
3007 /** @cfgm{/CPUM/IsaExts/MONITOR, boolean, true}
3008 * Expose MONITOR/MWAIT instructions to the guest.
3009 */
3010 rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "MONITOR", &pConfig->enmMonitor, true);
3011 AssertLogRelRCReturn(rc, rc);
3012
3013 /** @cfgm{/CPUM/IsaExts/MWaitExtensions, boolean, false}
3014 * Expose MWAIT extended features to the guest. For now we expose just MWAIT
3015 * break on interrupt feature (bit 1).
3016 */
3017 rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "MWaitExtensions", &pConfig->enmMWaitExtensions, false);
3018 AssertLogRelRCReturn(rc, rc);
3019
3020 /** @cfgm{/CPUM/IsaExts/SSE4.1, boolean, true}
3021 * Expose SSE4.1 to the guest if available.
3022 */
3023 rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "SSE4.1", &pConfig->enmSse41, true);
3024 AssertLogRelRCReturn(rc, rc);
3025
3026 /** @cfgm{/CPUM/IsaExts/SSE4.2, boolean, true}
3027 * Expose SSE4.2 to the guest if available.
3028 */
3029 rc = cpumR3CpuIdReadIsaExtCfgLegacy(pVM, pIsaExts, pCpumCfg, "SSE4.2", &pConfig->enmSse42, true);
3030 AssertLogRelRCReturn(rc, rc);
3031
3032#ifdef RT_ARCH_AMD64
3033 bool const fMayHaveXSave = pVM->cpum.s.HostFeatures.s.fXSaveRstor
3034 && pVM->cpum.s.HostFeatures.s.fOpSysXSaveRstor
3035 && ( VM_IS_NEM_ENABLED(pVM)
3036 ? NEMHCGetFeatures(pVM) & NEM_FEAT_F_XSAVE_XRSTOR
3037 : VM_IS_EXEC_ENGINE_IEM(pVM)
3038 ? true
3039 : fNestedPagingAndFullGuestExec);
3040 uint64_t const fXStateHostMask = pVM->cpum.s.fXStateHostMask;
3041#else
3042 bool const fMayHaveXSave = true;
3043 uint64_t const fXStateHostMask = XSAVE_C_YMM | XSAVE_C_SSE | XSAVE_C_X87;
3044#endif
3045
3046 /** @cfgm{/CPUM/IsaExts/XSAVE, boolean, depends}
3047 * Expose XSAVE/XRSTOR to the guest if available. For the time being the
3048 * default is to only expose this to VMs with nested paging and AMD-V or
3049 * unrestricted guest execution mode. Not possible to force this one without
3050 * host support at the moment.
3051 */
3052 rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "XSAVE", &pConfig->enmXSave, true,
3053 fMayHaveXSave /*fAllowed*/);
3054 AssertLogRelRCReturn(rc, rc);
3055
3056 /** @cfgm{/CPUM/IsaExts/AVX, boolean, depends}
3057 * Expose the AVX instruction set extensions to the guest if available and
3058 * XSAVE is exposed too. For the time being the default is to only expose this
3059 * to VMs with nested paging and AMD-V or unrestricted guest execution mode.
3060 */
3061 rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "AVX", &pConfig->enmAvx, fNestedPagingAndFullGuestExec,
3062 fMayHaveXSave && pConfig->enmXSave && (fXStateHostMask & XSAVE_C_YMM) /*fAllowed*/);
3063 AssertLogRelRCReturn(rc, rc);
3064
3065 /** @cfgm{/CPUM/IsaExts/AVX2, boolean, depends}
3066 * Expose the AVX2 instruction set extensions to the guest if available and
3067 * XSAVE is exposed too. For the time being the default is to only expose this
3068 * to VMs with nested paging and AMD-V or unrestricted guest execution mode.
3069 */
3070 rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "AVX2", &pConfig->enmAvx2, fNestedPagingAndFullGuestExec /* temporarily */,
3071 fMayHaveXSave && pConfig->enmXSave && (fXStateHostMask & XSAVE_C_YMM) /*fAllowed*/);
3072 AssertLogRelRCReturn(rc, rc);
3073
3074 /** @cfgm{/CPUM/IsaExts/AESNI, isaextcfg, depends}
3075 * Whether to expose the AES instructions to the guest. For the time being the
3076 * default is to only do this for VMs with nested paging and AMD-V or
3077 * unrestricted guest mode.
3078 */
3079 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "AESNI", &pConfig->enmAesNi, fNestedPagingAndFullGuestExec);
3080 AssertLogRelRCReturn(rc, rc);
3081
3082 /** @cfgm{/CPUM/IsaExts/PCLMUL, isaextcfg, depends}
3083 * Whether to expose the PCLMULQDQ instructions to the guest. For the time
3084 * being the default is to only do this for VMs with nested paging and AMD-V or
3085 * unrestricted guest mode.
3086 */
3087 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "PCLMUL", &pConfig->enmPClMul, fNestedPagingAndFullGuestExec);
3088 AssertLogRelRCReturn(rc, rc);
3089
3090 /** @cfgm{/CPUM/IsaExts/POPCNT, isaextcfg, true}
3091 * Whether to expose the POPCNT instructions to the guest.
3092 */
3093 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "POPCNT", &pConfig->enmPopCnt, CPUMISAEXTCFG_ENABLED_SUPPORTED);
3094 AssertLogRelRCReturn(rc, rc);
3095
3096 /** @cfgm{/CPUM/IsaExts/MOVBE, isaextcfg, depends}
3097 * Whether to expose the MOVBE instructions to the guest. For the time
3098 * being the default is to only do this for VMs with nested paging and AMD-V or
3099 * unrestricted guest mode.
3100 */
3101 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "MOVBE", &pConfig->enmMovBe, true);
3102 AssertLogRelRCReturn(rc, rc);
3103
3104 /** @cfgm{/CPUM/IsaExts/RDRAND, isaextcfg, depends}
3105 * Whether to expose the RDRAND instructions to the guest. For the time being
3106 * the default is to only do this for VMs with nested paging and AMD-V or
3107 * unrestricted guest mode.
3108 */
3109 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "RDRAND", &pConfig->enmRdRand, fNestedPagingAndFullGuestExec);
3110 AssertLogRelRCReturn(rc, rc);
3111
3112 /** @cfgm{/CPUM/IsaExts/RDSEED, isaextcfg, depends}
3113 * Whether to expose the RDSEED instructions to the guest. For the time being
3114 * the default is to only do this for VMs with nested paging and AMD-V or
3115 * unrestricted guest mode.
3116 */
3117 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "RDSEED", &pConfig->enmRdSeed, fNestedPagingAndFullGuestExec);
3118 AssertLogRelRCReturn(rc, rc);
3119
3120 /** @cfgm{/CPUM/IsaExts/ADX, isaextcfg, depends}
3121 * Whether to expose the ADX instructions to the guest. For the time being
3122 * the default is to only do this for VMs with nested paging and AMD-V or
3123 * unrestricted guest mode.
3124 */
3125 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "ADX", &pConfig->enmAdx, fNestedPagingAndFullGuestExec);
3126 AssertLogRelRCReturn(rc, rc);
3127
3128 /** @cfgm{/CPUM/IsaExts/CLFLUSHOPT, isaextcfg, depends}
3129 * Whether to expose the CLFLUSHOPT instructions to the guest. For the time
3130 * being the default is to only do this for VMs with nested paging and AMD-V or
3131 * unrestricted guest mode.
3132 */
3133 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "CLFLUSHOPT", &pConfig->enmCLFlushOpt, fNestedPagingAndFullGuestExec);
3134 AssertLogRelRCReturn(rc, rc);
3135
3136 /** @cfgm{/CPUM/IsaExts/SHA, isaextcfg, depends}
3137 * Whether to expose the SHA instructions to the guest. For the time being
3138 * the default is to only do this for VMs with nested paging and AMD-V or
3139 * unrestricted guest mode.
3140 */
3141 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "SHA", &pConfig->enmSha, fNestedPagingAndFullGuestExec);
3142 AssertLogRelRCReturn(rc, rc);
3143
3144 /** @cfgm{/CPUM/IsaExts/FSGSBASE, isaextcfg, true}
3145 * Whether to expose the read/write FSGSBASE instructions to the guest.
3146 */
3147 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "FSGSBASE", &pConfig->enmFsGsBase, true);
3148 AssertLogRelRCReturn(rc, rc);
3149
3150 /** @cfgm{/CPUM/IsaExts/PCID, isaextcfg, true}
3151 * Whether to expose the PCID feature to the guest.
3152 */
3153 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "PCID", &pConfig->enmPcid, pConfig->enmFsGsBase);
3154 AssertLogRelRCReturn(rc, rc);
3155
3156 /** @cfgm{/CPUM/IsaExts/INVPCID, isaextcfg, true}
3157 * Whether to expose the INVPCID instruction to the guest.
3158 */
3159 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "INVPCID", &pConfig->enmInvpcid, pConfig->enmFsGsBase);
3160 AssertLogRelRCReturn(rc, rc);
3161
3162 /** @cfgm{/CPUM/IsaExts/FlushCmdMsr, isaextcfg, true}
3163 * Whether to expose the IA32_FLUSH_CMD MSR to the guest.
3164 */
3165 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "FlushCmdMsr", &pConfig->enmFlushCmdMsr, CPUMISAEXTCFG_ENABLED_SUPPORTED);
3166 AssertLogRelRCReturn(rc, rc);
3167
3168 /** @cfgm{/CPUM/IsaExts/MdsClear, isaextcfg, true}
3169 * Whether to advertise the VERW and MDS related IA32_FLUSH_CMD MSR bits to
3170 * the guest. Requires FlushCmdMsr to be present too.
3171 */
3172 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "MdsClear", &pConfig->enmMdsClear, CPUMISAEXTCFG_ENABLED_SUPPORTED);
3173 AssertLogRelRCReturn(rc, rc);
3174
3175 /** @cfgm{/CPUM/IsaExts/ArchCapMSr, isaextcfg, true}
3176 * Whether to expose the MSR_IA32_ARCH_CAPABILITIES MSR to the guest.
3177 */
3178 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "ArchCapMsr", &pConfig->enmArchCapMsr, CPUMISAEXTCFG_ENABLED_SUPPORTED_OR_NOT_AMD64);
3179 AssertLogRelRCReturn(rc, rc);
3180
3181 /** @cfgm{/CPUM/IsaExts/FMA, boolean, depends}
3182 * Expose the FMA instruction set extensions to the guest if available and
3183 * XSAVE is exposed too. For the time being the default is to only expose this
3184 * to VMs with nested paging and AMD-V or unrestricted guest execution mode.
3185 */
3186 rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "FMA", &pConfig->enmFma, fNestedPagingAndFullGuestExec /* temporarily */,
3187 fMayHaveXSave && pConfig->enmXSave && (fXStateHostMask & XSAVE_C_YMM) /*fAllowed*/);
3188 AssertLogRelRCReturn(rc, rc);
3189
3190 /** @cfgm{/CPUM/IsaExts/F16C, boolean, depends}
3191 * Expose the F16C instruction set extensions to the guest if available and
3192 * XSAVE is exposed too. For the time being the default is to only expose this
3193 * to VMs with nested paging and AMD-V or unrestricted guest execution mode.
3194 */
3195 rc = cpumR3CpuIdReadIsaExtCfgEx(pVM, pIsaExts, "F16C", &pConfig->enmF16c, fNestedPagingAndFullGuestExec /* temporarily */,
3196 fMayHaveXSave && pConfig->enmXSave && (fXStateHostMask & XSAVE_C_YMM) /*fAllowed*/);
3197 AssertLogRelRCReturn(rc, rc);
3198
3199 /** @cfgm{/CPUM/IsaExts/McdtNo, isaextcfg, true}
3200 * Whether the CPU is not susceptible to the MXCSR configuration dependent
3201 * timing (MCDT) behaviour.
3202 */
3203 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "McdtNo", &pConfig->enmMcdtNo, CPUMISAEXTCFG_ENABLED_SUPPORTED);
3204 AssertLogRelRCReturn(rc, rc);
3205
3206 /** @cfgm{/CPUM/IsaExts/MonitorMitgNo, isaextcfg, true}
3207 * Whether the CPU is not susceptible MONITOR/UMONITOR internal table capacity
3208 * issues.
3209 */
3210 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "MonitorMitgNo", &pConfig->enmMonitorMitgNo, CPUMISAEXTCFG_ENABLED_SUPPORTED);
3211 AssertLogRelRCReturn(rc, rc);
3212
3213
3214 /* AMD: */
3215
3216 /** @cfgm{/CPUM/IsaExts/ABM, isaextcfg, true}
3217 * Whether to expose the AMD ABM instructions to the guest.
3218 */
3219 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "ABM", &pConfig->enmAbm, CPUMISAEXTCFG_ENABLED_SUPPORTED);
3220 AssertLogRelRCReturn(rc, rc);
3221
3222 /** @cfgm{/CPUM/IsaExts/SSE4A, isaextcfg, depends}
3223 * Whether to expose the AMD SSE4A instructions to the guest. For the time
3224 * being the default is to only do this for VMs with nested paging and AMD-V or
3225 * unrestricted guest mode.
3226 */
3227 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "SSE4A", &pConfig->enmSse4A, fNestedPagingAndFullGuestExec);
3228 AssertLogRelRCReturn(rc, rc);
3229
3230 /** @cfgm{/CPUM/IsaExts/MISALNSSE, isaextcfg, depends}
3231 * Whether to expose the AMD MisAlSse feature (MXCSR flag 17) to the guest. For
3232 * the time being the default is to only do this for VMs with nested paging and
3233 * AMD-V or unrestricted guest mode.
3234 */
3235 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "MISALNSSE", &pConfig->enmMisAlnSse, fNestedPagingAndFullGuestExec);
3236 AssertLogRelRCReturn(rc, rc);
3237
3238 /** @cfgm{/CPUM/IsaExts/3DNOWPRF, isaextcfg, depends}
3239 * Whether to expose the AMD 3D Now! prefetch instructions to the guest.
3240 * For the time being the default is to only do this for VMs with nested paging
3241 * and AMD-V or unrestricted guest mode.
3242 */
3243 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "3DNOWPRF", &pConfig->enm3dNowPrf, fNestedPagingAndFullGuestExec);
3244 AssertLogRelRCReturn(rc, rc);
3245
3246 /** @cfgm{/CPUM/IsaExts/AXMMX, isaextcfg, depends}
3247 * Whether to expose the AMD's MMX Extensions to the guest. For the time being
3248 * the default is to only do this for VMs with nested paging and AMD-V or
3249 * unrestricted guest mode.
3250 */
3251 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "AXMMX", &pConfig->enmAmdExtMmx, fNestedPagingAndFullGuestExec);
3252 AssertLogRelRCReturn(rc, rc);
3253
3254 return VINF_SUCCESS;
3255}
3256
3257
3258/**
3259 * Checks and fixes the maximum physical address width supported by the
3260 * variable-range MTRR MSRs to be consistent with what is reported in CPUID.
3261 *
3262 * @returns VBox status code.
3263 * @param pVM The cross context VM structure.
3264 * @param cVarMtrrs The number of variable-range MTRRs reported to the guest.
3265 */
3266static int cpumR3FixVarMtrrPhysAddrWidths(PVM pVM, uint8_t const cVarMtrrs)
3267{
3268 AssertLogRelMsgReturn(cVarMtrrs <= RT_ELEMENTS(pVM->apCpusR3[0]->cpum.s.GuestMsrs.msr.aMtrrVarMsrs),
3269 ("Invalid number of variable range MTRRs reported (%u)\n", cVarMtrrs),
3270 VERR_CPUM_IPE_2);
3271
3272 /*
3273 * CPUID determines the actual maximum physical address width reported and supported.
3274 * If the CPU DB profile reported fewer address bits, we must correct it here by
3275 * updating the MSR write #GP masks of all the variable-range MTRR MSRs. Otherwise,
3276 * they cause problems when guests write to these MTRR MSRs, see @bugref{10498#c32}.
3277 */
3278 PCPUMMSRRANGE pBaseRange0 = cpumLookupMsrRange(pVM, MSR_IA32_MTRR_PHYSBASE0);
3279 AssertLogRelMsgReturn(pBaseRange0, ("Failed to lookup the IA32_MTRR_PHYSBASE[0] MSR range\n"), VERR_NOT_FOUND);
3280
3281 PCPUMMSRRANGE pMaskRange0 = cpumLookupMsrRange(pVM, MSR_IA32_MTRR_PHYSMASK0);
3282 AssertLogRelMsgReturn(pMaskRange0, ("Failed to lookup the IA32_MTRR_PHYSMASK[0] MSR range\n"), VERR_NOT_FOUND);
3283
3284 uint64_t const fPhysBaseWrGpMask = pBaseRange0->fWrGpMask;
3285 uint64_t const fPhysMaskWrGpMask = pMaskRange0->fWrGpMask;
3286
3287 uint8_t const cGuestMaxPhysAddrWidth = pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth;
3288 uint8_t const cProfilePhysBaseMaxPhysAddrWidth = ASMBitLastSetU64(~fPhysBaseWrGpMask);
3289 uint8_t const cProfilePhysMaskMaxPhysAddrWidth = ASMBitLastSetU64(~fPhysMaskWrGpMask);
3290
3291 AssertLogRelMsgReturn(cProfilePhysBaseMaxPhysAddrWidth == cProfilePhysMaskMaxPhysAddrWidth,
3292 ("IA32_MTRR_PHYSBASE and IA32_MTRR_PHYSMASK report different physical address widths (%u and %u)\n",
3293 cProfilePhysBaseMaxPhysAddrWidth, cProfilePhysMaskMaxPhysAddrWidth),
3294 VERR_CPUM_IPE_2);
3295 AssertLogRelMsgReturn(cProfilePhysBaseMaxPhysAddrWidth > 12 && cProfilePhysBaseMaxPhysAddrWidth <= 64,
3296 ("IA32_MTRR_PHYSBASE and IA32_MTRR_PHYSMASK reports an invalid physical address width of %u bits\n",
3297 cProfilePhysBaseMaxPhysAddrWidth), VERR_CPUM_IPE_2);
3298
3299 if (cProfilePhysBaseMaxPhysAddrWidth < cGuestMaxPhysAddrWidth)
3300 {
3301 uint64_t fNewPhysBaseWrGpMask = fPhysBaseWrGpMask;
3302 uint64_t fNewPhysMaskWrGpMask = fPhysMaskWrGpMask;
3303 int8_t cBits = cGuestMaxPhysAddrWidth - cProfilePhysBaseMaxPhysAddrWidth;
3304 while (cBits)
3305 {
3306 uint64_t const fWrGpAndMask = ~(uint64_t)RT_BIT_64(cProfilePhysBaseMaxPhysAddrWidth + cBits - 1);
3307 fNewPhysBaseWrGpMask &= fWrGpAndMask;
3308 fNewPhysMaskWrGpMask &= fWrGpAndMask;
3309 --cBits;
3310 }
3311
3312 for (uint8_t iVarMtrr = 1; iVarMtrr < cVarMtrrs; iVarMtrr++)
3313 {
3314 PCPUMMSRRANGE pBaseRange = cpumLookupMsrRange(pVM, MSR_IA32_MTRR_PHYSBASE0 + (iVarMtrr * 2));
3315 AssertLogRelMsgReturn(pBaseRange, ("Failed to lookup the IA32_MTRR_PHYSBASE[%u] MSR range\n", iVarMtrr),
3316 VERR_NOT_FOUND);
3317
3318 PCPUMMSRRANGE pMaskRange = cpumLookupMsrRange(pVM, MSR_IA32_MTRR_PHYSMASK0 + (iVarMtrr * 2));
3319 AssertLogRelMsgReturn(pMaskRange, ("Failed to lookup the IA32_MTRR_PHYSMASK[%u] MSR range\n", iVarMtrr),
3320 VERR_NOT_FOUND);
3321
3322 AssertLogRelMsgReturn(pBaseRange->fWrGpMask == fPhysBaseWrGpMask,
3323 ("IA32_MTRR_PHYSBASE[%u] write GP mask (%#016RX64) differs from IA32_MTRR_PHYSBASE[0] write GP mask (%#016RX64)\n",
3324 iVarMtrr, pBaseRange->fWrGpMask, fPhysBaseWrGpMask),
3325 VERR_CPUM_IPE_1);
3326 AssertLogRelMsgReturn(pMaskRange->fWrGpMask == fPhysMaskWrGpMask,
3327 ("IA32_MTRR_PHYSMASK[%u] write GP mask (%#016RX64) differs from IA32_MTRR_PHYSMASK[0] write GP mask (%#016RX64)\n",
3328 iVarMtrr, pMaskRange->fWrGpMask, fPhysMaskWrGpMask),
3329 VERR_CPUM_IPE_1);
3330
3331 pBaseRange->fWrGpMask = fNewPhysBaseWrGpMask;
3332 pMaskRange->fWrGpMask = fNewPhysMaskWrGpMask;
3333 }
3334
3335 pBaseRange0->fWrGpMask = fNewPhysBaseWrGpMask;
3336 pMaskRange0->fWrGpMask = fNewPhysMaskWrGpMask;
3337
3338 LogRel(("CPUM: Updated IA32_MTRR_PHYSBASE[0..%u] MSR write #GP mask (old=%#016RX64 new=%#016RX64)\n",
3339 cVarMtrrs - 1, fPhysBaseWrGpMask, fNewPhysBaseWrGpMask));
3340 LogRel(("CPUM: Updated IA32_MTRR_PHYSMASK[0..%u] MSR write #GP mask (old=%#016RX64 new=%#016RX64)\n",
3341 cVarMtrrs - 1, fPhysMaskWrGpMask, fNewPhysMaskWrGpMask));
3342 }
3343
3344 return VINF_SUCCESS;
3345}
3346
3347
3348/**
3349 * Inserts variable-range MTRR MSR ranges based on the given count.
3350 *
3351 * Since we need to insert the MSRs beyond what the CPU profile has inserted, we
3352 * reinsert the whole range here since the variable-range MTRR MSR read+write
3353 * functions handle ranges as well as the \#GP checking.
3354 *
3355 * @returns VBox status code.
3356 * @param pVM The cross context VM structure.
3357 * @param cVarMtrrs The number of variable-range MTRRs to insert. This must be
3358 * less than or equal to CPUMCTX_MAX_MTRRVAR_COUNT.
3359 */
3360static int cpumR3VarMtrrMsrRangeInsert(PVM pVM, uint8_t const cVarMtrrs)
3361{
3362#ifdef VBOX_WITH_STATISTICS
3363# define CPUM_MTRR_PHYSBASE_MSRRANGE(a_uMsr, a_uValue, a_szName) \
3364 { (a_uMsr), (a_uMsr), kCpumMsrRdFn_Ia32MtrrPhysBaseN, kCpumMsrWrFn_Ia32MtrrPhysBaseN, 0, 0, a_uValue, 0, 0, a_szName, { 0 }, { 0 }, { 0 }, { 0 } }
3365# define CPUM_MTRR_PHYSMASK_MSRRANGE(a_uMsr, a_uValue, a_szName) \
3366 { (a_uMsr), (a_uMsr), kCpumMsrRdFn_Ia32MtrrPhysMaskN, kCpumMsrWrFn_Ia32MtrrPhysMaskN, 0, 0, a_uValue, 0, 0, a_szName, { 0 }, { 0 }, { 0 }, { 0 } }
3367#else
3368# define CPUM_MTRR_PHYSBASE_MSRRANGE(a_uMsr, a_uValue, a_szName) \
3369 { (a_uMsr), (a_uMsr), kCpumMsrRdFn_Ia32MtrrPhysBaseN, kCpumMsrWrFn_Ia32MtrrPhysBaseN, 0, 0, a_uValue, 0, 0, a_szName }
3370# define CPUM_MTRR_PHYSMASK_MSRRANGE(a_uMsr, a_uValue, a_szName) \
3371 { (a_uMsr), (a_uMsr), kCpumMsrRdFn_Ia32MtrrPhysMaskN, kCpumMsrWrFn_Ia32MtrrPhysMaskN, 0, 0, a_uValue, 0, 0, a_szName }
3372#endif
3373 static CPUMMSRRANGE const s_aMsrRanges_MtrrPhysBase[CPUMCTX_MAX_MTRRVAR_COUNT] =
3374 {
3375 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE0, 0, "MSR_IA32_MTRR_PHYSBASE0"),
3376 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE1, 1, "MSR_IA32_MTRR_PHYSBASE1"),
3377 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE2, 2, "MSR_IA32_MTRR_PHYSBASE2"),
3378 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE3, 3, "MSR_IA32_MTRR_PHYSBASE3"),
3379 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE4, 4, "MSR_IA32_MTRR_PHYSBASE4"),
3380 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE5, 5, "MSR_IA32_MTRR_PHYSBASE5"),
3381 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE6, 6, "MSR_IA32_MTRR_PHYSBASE6"),
3382 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE7, 7, "MSR_IA32_MTRR_PHYSBASE7"),
3383 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE8, 8, "MSR_IA32_MTRR_PHYSBASE8"),
3384 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE9, 9, "MSR_IA32_MTRR_PHYSBASE9"),
3385 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE9 + 2, 10, "MSR_IA32_MTRR_PHYSBASE10"),
3386 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE9 + 4, 11, "MSR_IA32_MTRR_PHYSBASE11"),
3387 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE9 + 6, 12, "MSR_IA32_MTRR_PHYSBASE12"),
3388 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE9 + 8, 13, "MSR_IA32_MTRR_PHYSBASE13"),
3389 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE9 + 10, 14, "MSR_IA32_MTRR_PHYSBASE14"),
3390 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE9 + 12, 15, "MSR_IA32_MTRR_PHYSBASE15"),
3391 };
3392 static CPUMMSRRANGE const s_aMsrRanges_MtrrPhysMask[CPUMCTX_MAX_MTRRVAR_COUNT] =
3393 {
3394 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK0, 0, "MSR_IA32_MTRR_PHYSMASK0"),
3395 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK1, 1, "MSR_IA32_MTRR_PHYSMASK1"),
3396 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK2, 2, "MSR_IA32_MTRR_PHYSMASK2"),
3397 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK3, 3, "MSR_IA32_MTRR_PHYSMASK3"),
3398 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK4, 4, "MSR_IA32_MTRR_PHYSMASK4"),
3399 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK5, 5, "MSR_IA32_MTRR_PHYSMASK5"),
3400 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK6, 6, "MSR_IA32_MTRR_PHYSMASK6"),
3401 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK7, 7, "MSR_IA32_MTRR_PHYSMASK7"),
3402 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK8, 8, "MSR_IA32_MTRR_PHYSMASK8"),
3403 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK9, 9, "MSR_IA32_MTRR_PHYSMASK9"),
3404 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK9 + 2, 10, "MSR_IA32_MTRR_PHYSMASK10"),
3405 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK9 + 4, 11, "MSR_IA32_MTRR_PHYSMASK11"),
3406 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK9 + 6, 12, "MSR_IA32_MTRR_PHYSMASK12"),
3407 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK9 + 8, 13, "MSR_IA32_MTRR_PHYSMASK13"),
3408 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK9 + 10, 14, "MSR_IA32_MTRR_PHYSMASK14"),
3409 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK9 + 12, 15, "MSR_IA32_MTRR_PHYSMASK15"),
3410 };
3411 AssertCompile(RT_ELEMENTS(s_aMsrRanges_MtrrPhysBase) == RT_ELEMENTS(pVM->apCpusR3[0]->cpum.s.GuestMsrs.msr.aMtrrVarMsrs));
3412 AssertCompile(RT_ELEMENTS(s_aMsrRanges_MtrrPhysMask) == RT_ELEMENTS(pVM->apCpusR3[0]->cpum.s.GuestMsrs.msr.aMtrrVarMsrs));
3413
3414 Assert(cVarMtrrs <= RT_ELEMENTS(pVM->apCpusR3[0]->cpum.s.GuestMsrs.msr.aMtrrVarMsrs));
3415 for (unsigned i = 0; i < cVarMtrrs; i++)
3416 {
3417 int rc = CPUMR3MsrRangesInsert(pVM, &s_aMsrRanges_MtrrPhysBase[i]);
3418 AssertLogRelRCReturn(rc, rc);
3419 rc = CPUMR3MsrRangesInsert(pVM, &s_aMsrRanges_MtrrPhysMask[i]);
3420 AssertLogRelRCReturn(rc, rc);
3421 }
3422 return VINF_SUCCESS;
3423
3424#undef CPUM_MTRR_PHYSBASE_MSRRANGE
3425#undef CPUM_MTRR_PHYSMASK_MSRRANGE
3426}
3427
3428
3429/**
3430 * Initialize MTRR capability based on what the guest CPU profile (typically host)
3431 * supports.
3432 *
3433 * @returns VBox status code.
3434 * @param pVM The cross context VM structure.
3435 * @param fMtrrVarCountIsVirt Whether the variable-range MTRR count is fully
3436 * virtualized (@c true) or derived from the CPU
3437 * profile (@c false).
3438 */
3439static int cpumR3InitMtrrCap(PVM pVM, bool fMtrrVarCountIsVirt)
3440{
3441#ifdef RT_ARCH_AMD64
3442 Assert(pVM->cpum.s.HostFeatures.s.fMtrr);
3443#endif
3444
3445 /* Lookup the number of variable-range MTRRs supported by the CPU profile. */
3446 PCCPUMMSRRANGE pMtrrCapRange = cpumLookupMsrRange(pVM, MSR_IA32_MTRR_CAP);
3447 AssertLogRelMsgReturn(pMtrrCapRange, ("Failed to lookup IA32_MTRR_CAP MSR range\n"), VERR_NOT_FOUND);
3448 uint8_t const cProfileVarRangeRegs = pMtrrCapRange->uValue & MSR_IA32_MTRR_CAP_VCNT_MASK;
3449
3450 /* Construct guest MTRR support capabilities. */
3451 uint8_t const cGuestVarRangeRegs = fMtrrVarCountIsVirt ? CPUMCTX_MAX_MTRRVAR_COUNT
3452 : RT_MIN(cProfileVarRangeRegs, CPUMCTX_MAX_MTRRVAR_COUNT);
3453 uint64_t const uGstMtrrCap = cGuestVarRangeRegs
3454 | MSR_IA32_MTRR_CAP_FIX
3455 | MSR_IA32_MTRR_CAP_WC;
3456 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3457 {
3458 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3459 pVCpu->cpum.s.GuestMsrs.msr.MtrrCap = uGstMtrrCap;
3460 pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType = MSR_IA32_MTRR_DEF_TYPE_FIXED_EN
3461 | MSR_IA32_MTRR_DEF_TYPE_MTRR_EN
3462 | X86_MTRR_MT_UC;
3463 }
3464
3465 if (fMtrrVarCountIsVirt)
3466 {
3467 /*
3468 * Insert the full variable-range MTRR MSR range ourselves so it extends beyond what is
3469 * typically reported by the hardware CPU profile.
3470 */
3471 LogRel(("CPUM: Enabled fixed-range MTRRs and %u (virtualized) variable-range MTRRs\n", cGuestVarRangeRegs));
3472 return cpumR3VarMtrrMsrRangeInsert(pVM, cGuestVarRangeRegs);
3473 }
3474
3475 /*
3476 * Ensure that the maximum physical address width supported by the variable-range MTRRs
3477 * are consistent with what is reported to the guest via CPUID.
3478 */
3479 LogRel(("CPUM: Enabled fixed-range MTRRs and %u (CPU profile derived) variable-range MTRRs\n", cGuestVarRangeRegs));
3480 return cpumR3FixVarMtrrPhysAddrWidths(pVM, cGuestVarRangeRegs);
3481}
3482
3483
3484/**
3485 * Initializes the emulated CPU's CPUID & MSR information.
3486 *
3487 * @returns VBox status code.
3488 * @param pVM The cross context VM structure.
3489 * @param pHostMsrs Pointer to the host MSRs.
3490 */
3491int cpumR3InitCpuIdAndMsrs(PVM pVM, PCCPUMMSRS pHostMsrs)
3492{
3493 Assert(pHostMsrs);
3494
3495 PCPUM pCpum = &pVM->cpum.s;
3496 PCFGMNODE pCpumCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM");
3497
3498 /*
3499 * Set the fCpuIdApicFeatureVisible flags so the APIC can assume visibility
3500 * on construction and manage everything from here on.
3501 */
3502 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3503 {
3504 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3505 pVCpu->cpum.s.fCpuIdApicFeatureVisible = true;
3506 }
3507
3508 /*
3509 * Read the configuration.
3510 */
3511 CPUMCPUIDCONFIG Config;
3512 RT_ZERO(Config);
3513
3514 bool const fNestedPagingAndFullGuestExec = VM_IS_NEM_ENABLED(pVM)
3515 || HMAreNestedPagingAndFullGuestExecEnabled(pVM);
3516 int rc = cpumR3CpuIdReadConfig(pVM, &Config, pCpumCfg, fNestedPagingAndFullGuestExec);
3517 AssertRCReturn(rc, rc);
3518
3519 /*
3520 * Get the guest CPU data from the database and/or the host.
3521 *
3522 * The CPUID and MSRs are currently living on the regular heap to avoid
3523 * fragmenting the hyper heap (and because there isn't/wasn't any realloc
3524 * API for the hyper heap). This means special cleanup considerations.
3525 */
3526 /** @todo The hyper heap will be removed ASAP, so the final destination is
3527 * now a fixed sized arrays in the VM structure. Maybe we can simplify
3528 * this allocation fun a little now? Or maybe it's too convenient for
3529 * the CPU reporter code... No time to figure that out now. */
3530 rc = cpumR3DbGetCpuInfo(Config.szCpuName, &pCpum->GuestInfo);
3531 if (RT_FAILURE(rc))
3532 return rc == VERR_CPUM_DB_CPU_NOT_FOUND
3533 ? VMSetError(pVM, rc, RT_SRC_POS,
3534 "Info on guest CPU '%s' could not be found. Please, select a different CPU.", Config.szCpuName)
3535 : rc;
3536
3537#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
3538 if (pCpum->GuestInfo.fMxCsrMask & ~pVM->cpum.s.fHostMxCsrMask)
3539 {
3540 LogRel(("Stripping unsupported MXCSR bits from guest mask: %#x -> %#x (host: %#x)\n", pCpum->GuestInfo.fMxCsrMask,
3541 pCpum->GuestInfo.fMxCsrMask & pVM->cpum.s.fHostMxCsrMask, pVM->cpum.s.fHostMxCsrMask));
3542 pCpum->GuestInfo.fMxCsrMask &= pVM->cpum.s.fHostMxCsrMask;
3543 }
3544 LogRel(("CPUM: MXCSR_MASK=%#x (host: %#x)\n", pCpum->GuestInfo.fMxCsrMask, pVM->cpum.s.fHostMxCsrMask));
3545#else
3546 LogRel(("CPUM: MXCSR_MASK=%#x\n", pCpum->GuestInfo.fMxCsrMask));
3547#endif
3548
3549 /** @cfgm{/CPUM/GuestMicrocodeRev,32-bit}
3550 * CPU microcode revision number to use. If UINT32_MAX we use the host
3551 * revision of the host CPU for the host-cpu profile and the database entry if a
3552 * specific one is selected (amd64 host only). */
3553 rc = CFGMR3QueryU32Def(pCpumCfg, "GuestMicrocodeRevision", &pCpum->GuestInfo.uMicrocodeRevision, UINT32_MAX);
3554 AssertLogRelRCReturn(rc, rc);
3555#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
3556 if ( pCpum->GuestInfo.uMicrocodeRevision == UINT32_MAX
3557 && strcmp(Config.szCpuName, "host") == 0)
3558 {
3559 rc = SUPR3QueryMicrocodeRev(&pCpum->GuestInfo.uMicrocodeRevision);
3560 if (RT_FAILURE(rc))
3561 pCpum->GuestInfo.uMicrocodeRevision = UINT32_MAX;
3562 }
3563#endif
3564
3565 /** @cfgm{/CPUM/MSRs/[Name]/[First|Last|Type|Value|...],}
3566 * Overrides the guest MSRs.
3567 */
3568 rc = cpumR3LoadMsrOverrides(pVM, CFGMR3GetChild(pCpumCfg, "MSRs"));
3569
3570 /** @cfgm{/CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
3571 * Overrides the CPUID leaf values (from the host CPU usually) used for
3572 * calculating the guest CPUID leaves. This can be used to preserve the CPUID
3573 * values when moving a VM to a different machine. Another use is restricting
3574 * (or extending) the feature set exposed to the guest. */
3575 if (RT_SUCCESS(rc))
3576 rc = cpumR3LoadCpuIdOverrides(pVM, CFGMR3GetChild(pCpumCfg, "HostCPUID"), "HostCPUID");
3577
3578 if (RT_SUCCESS(rc) && CFGMR3GetChild(pCpumCfg, "CPUID")) /* 2nd override, now discontinued. */
3579 rc = VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_NODE, RT_SRC_POS,
3580 "Found unsupported configuration node '/CPUM/CPUID/'. "
3581 "Please use IMachine::setCPUIDLeaf() instead.");
3582
3583 CPUMMSRS GuestMsrs;
3584 RT_ZERO(GuestMsrs);
3585
3586 /*
3587 * Pre-explode the CPUID info.
3588 */
3589 if (RT_SUCCESS(rc))
3590 rc = cpumCpuIdExplodeFeaturesX86(pCpum->GuestInfo.paCpuIdLeavesR3, pCpum->GuestInfo.cCpuIdLeaves, &GuestMsrs,
3591 &pCpum->GuestFeatures);
3592
3593 /*
3594 * Sanitize the cpuid information passed on to the guest.
3595 */
3596 if (RT_SUCCESS(rc))
3597 {
3598 rc = cpumR3CpuIdSanitize(pVM, pCpum, &Config);
3599 if (RT_SUCCESS(rc))
3600 {
3601 cpumR3CpuIdLimitLeaves(pCpum, &Config);
3602 cpumR3CpuIdLimitIntelFamModStep(pCpum, &Config);
3603 }
3604 }
3605
3606 /*
3607 * Move the CPUID array over to the static VM structure allocation
3608 * and explode guest CPU features again. We must do this *before*
3609 * reconciling MSRs with CPUIDs and applying any fudging (esp on ARM64).
3610 */
3611 if (RT_SUCCESS(rc))
3612 {
3613 void * const pvFree = pCpum->GuestInfo.paCpuIdLeavesR3;
3614 rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, pCpum, pCpum->GuestInfo.paCpuIdLeavesR3,
3615 pCpum->GuestInfo.cCpuIdLeaves, &GuestMsrs);
3616 AssertLogRelRC(rc);
3617 RTMemFree(pvFree);
3618 if (RT_SUCCESS(rc))
3619 {
3620 /*
3621 * Setup MSRs introduced in microcode updates or that are otherwise not in
3622 * the CPU profile, but are advertised in the CPUID info we just sanitized.
3623 */
3624 if (RT_SUCCESS(rc))
3625 rc = cpumR3MsrReconcileWithCpuId(pVM, false, false);
3626 /*
3627 * MSR fudging.
3628 */
3629 if (RT_SUCCESS(rc))
3630 {
3631 /** @cfgm{/CPUM/FudgeMSRs, boolean, true}
3632 * Fudges some common MSRs if not present in the selected CPU database entry.
3633 * This is for trying to keep VMs running when moved between different hosts
3634 * and different CPU vendors. */
3635 bool fEnable;
3636 rc = CFGMR3QueryBoolDef(pCpumCfg, "FudgeMSRs", &fEnable, true); AssertRC(rc);
3637 if (RT_SUCCESS(rc) && fEnable)
3638 {
3639 rc = cpumR3MsrApplyFudge(pVM);
3640 AssertLogRelRC(rc);
3641 }
3642 }
3643 if (RT_SUCCESS(rc))
3644 {
3645 /*
3646 * Move the MSR arrays over to the static VM structure allocation.
3647 */
3648 AssertFatalMsg(pCpum->GuestInfo.cMsrRanges <= RT_ELEMENTS(pCpum->GuestInfo.aMsrRanges),
3649 ("%u\n", pCpum->GuestInfo.cMsrRanges));
3650 memcpy(pCpum->GuestInfo.aMsrRanges, pCpum->GuestInfo.paMsrRangesR3,
3651 sizeof(pCpum->GuestInfo.paMsrRangesR3[0]) * pCpum->GuestInfo.cMsrRanges);
3652 RTMemFree(pCpum->GuestInfo.paMsrRangesR3);
3653 pCpum->GuestInfo.paMsrRangesR3 = pCpum->GuestInfo.aMsrRanges;
3654
3655 /*
3656 * Some more configuration that we're applying at the end of everything
3657 * via the CPUMR3SetGuestCpuIdFeature API.
3658 */
3659
3660 /* Check if 64-bit guest supported was enabled. */
3661 bool fEnable64bit;
3662 rc = CFGMR3QueryBoolDef(pCpumCfg, "Enable64bit", &fEnable64bit, false);
3663 AssertRCReturn(rc, rc);
3664 if (fEnable64bit)
3665 {
3666 /* In case of a CPU upgrade: */
3667 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
3668 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* (Long mode only on Intel CPUs.) */
3669 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
3670 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
3671 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
3672
3673 /* The actual feature: */
3674 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
3675 }
3676
3677 /* Check if PAE was explicitely enabled by the user. */
3678 bool fEnable;
3679 rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable, fEnable64bit);
3680 AssertRCReturn(rc, rc);
3681 if (fEnable && !pVM->cpum.s.GuestFeatures.fPae)
3682 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
3683
3684 /* We don't normally enable NX for raw-mode, so give the user a chance to force it on. */
3685 rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableNX", &fEnable, fEnable64bit);
3686 AssertRCReturn(rc, rc);
3687 if (fEnable && !pVM->cpum.s.GuestFeatures.fNoExecute)
3688 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
3689
3690 /* Check if speculation control is enabled. */
3691 if (Config.fSpecCtrl)
3692 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SPEC_CTRL);
3693 else
3694 {
3695 /*
3696 * Set the "SSBD-not-needed" flag to work around a bug in some Linux kernels when the VIRT_SPEC_CTL
3697 * feature is not exposed on AMD CPUs and there is only 1 vCPU configured.
3698 * This was observed with kernel "4.15.0-29-generic #31~16.04.1-Ubuntu" but more versions are likely affected.
3699 *
3700 * The kernel doesn't initialize a lock and causes a NULL pointer exception later on when configuring SSBD:
3701 * EIP: _raw_spin_lock+0x14/0x30
3702 * EFLAGS: 00010046 CPU: 0
3703 * EAX: 00000000 EBX: 00000001 ECX: 00000004 EDX: 00000000
3704 * ESI: 00000000 EDI: 00000000 EBP: ee023f1c ESP: ee023f18
3705 * DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
3706 * CR0: 80050033 CR2: 00000004 CR3: 3671c180 CR4: 000006f0
3707 * Call Trace:
3708 * speculative_store_bypass_update+0x8e/0x180
3709 * ssb_prctl_set+0xc0/0xe0
3710 * arch_seccomp_spec_mitigate+0x1d/0x20
3711 * do_seccomp+0x3cb/0x610
3712 * SyS_seccomp+0x16/0x20
3713 * do_fast_syscall_32+0x7f/0x1d0
3714 * entry_SYSENTER_32+0x4e/0x7c
3715 *
3716 * The lock would've been initialized in process.c:speculative_store_bypass_ht_init() called from two places in smpboot.c.
3717 * First when a secondary CPU is started and second in native_smp_prepare_cpus() which is not called in a single vCPU environment.
3718 *
3719 * As spectre control features are completely disabled anyway when we arrived here there is no harm done in informing the
3720 * guest to not even try.
3721 */
3722 if ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
3723 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
3724 {
3725 PCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x80000008), 0);
3726 if (pLeaf)
3727 {
3728 pLeaf->uEbx |= X86_CPUID_AMD_EFEID_EBX_SSBD_NOT_REQUIRED;
3729 LogRel(("CPUM: Set SSBD not required flag for AMD to work around some buggy Linux kernels!\n"));
3730 }
3731 }
3732 }
3733
3734 /*
3735 * MTRR support.
3736 * We've always reported the MTRR feature bit in CPUID.
3737 * Here we allow exposing MTRRs with reasonable default values (especially required
3738 * by Windows 10 guests with Hyper-V enabled). The MTRR support isn't feature
3739 * complete, see @bugref{10318} and bugref{10498}.
3740 */
3741 if (pVM->cpum.s.GuestFeatures.fMtrr)
3742 {
3743 /** @cfgm{/CPUM/MtrrWrite, boolean, true}
3744 * Whether to enable MTRR read-write support. This overrides the MTRR read-only CFGM
3745 * setting. */
3746 bool fEnableMtrrReadWrite;
3747 rc = CFGMR3QueryBoolDef(pCpumCfg, "MtrrReadWrite", &fEnableMtrrReadWrite, true);
3748 AssertRCReturn(rc, rc);
3749 if (fEnableMtrrReadWrite)
3750 {
3751 pVM->cpum.s.fMtrrRead = true;
3752 pVM->cpum.s.fMtrrWrite = true;
3753 LogRel(("CPUM: Enabled MTRR read-write support\n"));
3754 }
3755 else
3756 {
3757 /** @cfgm{/CPUM/MtrrReadOnly, boolean, false}
3758 * Whether to enable MTRR read-only support and to initialize mapping of guest
3759 * memory via MTRRs. When disabled, MTRRs are left blank, returns 0 on reads and
3760 * ignores writes. Some guests like GNU/Linux recognize a virtual system when MTRRs
3761 * are left blank but some guests may expect their RAM to be mapped via MTRRs
3762 * similar to real hardware. */
3763 rc = CFGMR3QueryBoolDef(pCpumCfg, "MtrrReadOnly", &pVM->cpum.s.fMtrrRead, false);
3764 AssertRCReturn(rc, rc);
3765 LogRel(("CPUM: Enabled MTRR read-only support\n"));
3766 }
3767
3768 /* Setup MTRR capability based on what the guest CPU profile (typically host) supports. */
3769 Assert(!pVM->cpum.s.fMtrrWrite || pVM->cpum.s.fMtrrRead);
3770 if (pVM->cpum.s.fMtrrRead)
3771 {
3772 /** @cfgm{/CPUM/MtrrVarCountIsVirtual, boolean, true}
3773 * When enabled, the number of variable-range MTRRs are virtualized. When disabled,
3774 * the number of variable-range MTRRs are derived from the CPU profile. Unless
3775 * guests have problems with a virtualized number of variable-range MTRRs, it is
3776 * recommended to keep this enabled so that there are sufficient MTRRs to fully
3777 * describe all regions of the guest RAM. */
3778 bool fMtrrVarCountIsVirt;
3779 rc = CFGMR3QueryBoolDef(pCpumCfg, "MtrrVarCountIsVirtual", &fMtrrVarCountIsVirt, true);
3780 AssertRCReturn(rc, rc);
3781
3782 rc = cpumR3InitMtrrCap(pVM, fMtrrVarCountIsVirt);
3783 if (RT_SUCCESS(rc))
3784 { /* likely */ }
3785 else
3786 return rc;
3787 }
3788 }
3789
3790 /*
3791 * Finally, initialize guest VMX MSRs.
3792 *
3793 * This needs to be done -after- exploding guest features and sanitizing CPUID leaves
3794 * as constructing VMX capabilities MSRs rely on CPU feature bits like long mode,
3795 * unrestricted-guest execution, CR4 feature bits and possibly more in the future.
3796 */
3797 /** @todo r=bird: given that long mode never used to be enabled before the
3798 * VMINITCOMPLETED_RING0 state, and we're a lot earlier here in ring-3
3799 * init, the above comment cannot be entirely accurate. */
3800 if (pVM->cpum.s.GuestFeatures.fVmx)
3801 {
3802 Assert(Config.fNestedHWVirt);
3803 cpumR3InitVmxGuestFeaturesAndMsrs(pVM, pCpumCfg, &pHostMsrs->hwvirt.vmx, &GuestMsrs.hwvirt.vmx);
3804
3805 /* Copy MSRs to all VCPUs */
3806 PCVMXMSRS pVmxMsrs = &GuestMsrs.hwvirt.vmx;
3807 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3808 {
3809 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3810 memcpy(&pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs, pVmxMsrs, sizeof(*pVmxMsrs));
3811 }
3812 }
3813
3814 return VINF_SUCCESS;
3815 }
3816
3817 /*
3818 * Failed before/while switching to internal VM structure storage.
3819 */
3820 RTMemFree(pCpum->GuestInfo.paCpuIdLeavesR3);
3821 pCpum->GuestInfo.paCpuIdLeavesR3 = NULL;
3822 }
3823 }
3824 RTMemFree(pCpum->GuestInfo.paMsrRangesR3);
3825 pCpum->GuestInfo.paMsrRangesR3 = NULL;
3826 return rc;
3827}
3828
3829
3830/**
3831 * Sets a CPUID feature bit during VM initialization.
3832 *
3833 * Since the CPUID feature bits are generally related to CPU features, other
3834 * CPUM configuration like MSRs can also be modified by calls to this API.
3835 *
3836 * @param pVM The cross context VM structure.
3837 * @param enmFeature The feature to set.
3838 */
3839VMMR3_INT_DECL(void) CPUMR3SetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
3840{
3841 PCPUMCPUIDLEAF pLeaf;
3842 PCPUMMSRRANGE pMsrRange;
3843
3844#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
3845# define CHECK_X86_HOST_FEATURE_RET(a_fFeature, a_szFeature) \
3846 if (!pVM->cpum.s.HostFeatures.s. a_fFeature) \
3847 { \
3848 LogRel(("CPUM: WARNING! Can't turn on " a_szFeature " when the host doesn't support it!\n")); \
3849 return; \
3850 } else do { } while (0)
3851#else
3852# define CHECK_X86_HOST_FEATURE_RET(a_fFeature, a_szFeature) do { } while (0)
3853#endif
3854
3855#define GET_8000_0001_CHECK_X86_HOST_FEATURE_RET(a_fFeature, a_szFeature) \
3856 do \
3857 { \
3858 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001)); \
3859 if (!pLeaf) \
3860 { \
3861 LogRel(("CPUM: WARNING! Can't turn on " a_szFeature " when no 0x80000001 CPUID leaf!\n")); \
3862 return; \
3863 } \
3864 CHECK_X86_HOST_FEATURE_RET(a_fFeature,a_szFeature); \
3865 } while (0)
3866
3867 switch (enmFeature)
3868 {
3869 /*
3870 * Set the APIC bit in both feature masks.
3871 */
3872 case CPUMCPUIDFEATURE_APIC:
3873 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
3874 if (pLeaf && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
3875 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC;
3876
3877 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
3878 if (pLeaf && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
3879 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
3880
3881 pVM->cpum.s.GuestFeatures.fApic = 1;
3882
3883 /* Make sure we've got the APICBASE MSR present. */
3884 pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_APICBASE);
3885 if (!pMsrRange)
3886 {
3887 static CPUMMSRRANGE const s_ApicBase =
3888 {
3889 /*.uFirst =*/ MSR_IA32_APICBASE, /*.uLast =*/ MSR_IA32_APICBASE,
3890 /*.enmRdFn =*/ kCpumMsrRdFn_Ia32ApicBase, /*.enmWrFn =*/ kCpumMsrWrFn_Ia32ApicBase,
3891 /*.offCpumCpu =*/ UINT16_MAX, /*.fReserved =*/ 0, /*.uValue =*/ 0, /*.fWrIgnMask =*/ 0, /*.fWrGpMask =*/ 0,
3892 /*.szName = */ "IA32_APIC_BASE"
3893 };
3894 int rc = CPUMR3MsrRangesInsert(pVM, &s_ApicBase);
3895 AssertLogRelRC(rc);
3896 }
3897
3898 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled xAPIC\n"));
3899 break;
3900
3901 /*
3902 * Set the x2APIC bit in the standard feature mask.
3903 * Note! ASSUMES CPUMCPUIDFEATURE_APIC is called first.
3904 */
3905 case CPUMCPUIDFEATURE_X2APIC:
3906 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
3907 if (pLeaf)
3908 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC;
3909 pVM->cpum.s.GuestFeatures.fX2Apic = 1;
3910
3911 /* Make sure the MSR doesn't GP or ignore the EXTD bit. */
3912 pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_APICBASE);
3913 if (pMsrRange)
3914 {
3915 pMsrRange->fWrGpMask &= ~MSR_IA32_APICBASE_EXTD;
3916 pMsrRange->fWrIgnMask &= ~MSR_IA32_APICBASE_EXTD;
3917 }
3918
3919 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n"));
3920 break;
3921
3922 /*
3923 * Set the sysenter/sysexit bit in the standard feature mask.
3924 * Assumes the caller knows what it's doing! (host must support these)
3925 */
3926 case CPUMCPUIDFEATURE_SEP:
3927 CHECK_X86_HOST_FEATURE_RET(fSysEnter, "SEP");
3928 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
3929 if (pLeaf)
3930 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP;
3931 pVM->cpum.s.GuestFeatures.fSysEnter = 1;
3932 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n"));
3933 break;
3934
3935 /*
3936 * Set the syscall/sysret bit in the extended feature mask.
3937 * Assumes the caller knows what it's doing! (host must support these)
3938 */
3939 case CPUMCPUIDFEATURE_SYSCALL:
3940 GET_8000_0001_CHECK_X86_HOST_FEATURE_RET(fSysCall, "SYSCALL/SYSRET");
3941
3942 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
3943 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
3944 pVM->cpum.s.GuestFeatures.fSysCall = 1;
3945 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n"));
3946 break;
3947
3948 /*
3949 * Set the PAE bit in both feature masks.
3950 * Assumes the caller knows what it's doing! (host must support these)
3951 */
3952 case CPUMCPUIDFEATURE_PAE:
3953 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
3954 if (pLeaf)
3955 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE;
3956
3957 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
3958 if ( pLeaf
3959 && ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
3960 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON))
3961 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
3962
3963 pVM->cpum.s.GuestFeatures.fPae = 1;
3964 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAE\n"));
3965 break;
3966
3967 /*
3968 * Set the LONG MODE bit in the extended feature mask.
3969 * Assumes the caller knows what it's doing! (host must support these)
3970 */
3971 case CPUMCPUIDFEATURE_LONG_MODE:
3972 GET_8000_0001_CHECK_X86_HOST_FEATURE_RET(fLongMode, "LONG MODE");
3973
3974 /* Valid for both Intel and AMD. */
3975 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
3976 pVM->cpum.s.GuestFeatures.fLongMode = 1;
3977 pVM->cpum.s.GuestFeatures.cVmxMaxPhysAddrWidth = pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth;
3978 if (pVM->cpum.s.GuestFeatures.fVmx)
3979 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3980 {
3981 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3982 pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Basic &= ~VMX_BASIC_PHYSADDR_WIDTH_32BIT;
3983 }
3984 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n"));
3985 break;
3986
3987 /*
3988 * Set the NX/XD bit in the extended feature mask.
3989 * Assumes the caller knows what it's doing! (host must support these)
3990 */
3991 case CPUMCPUIDFEATURE_NX:
3992 GET_8000_0001_CHECK_X86_HOST_FEATURE_RET(fNoExecute, "NX/XD");
3993
3994 /* Valid for both Intel and AMD. */
3995 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX;
3996 pVM->cpum.s.GuestFeatures.fNoExecute = 1;
3997 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n"));
3998 break;
3999
4000
4001 /*
4002 * Set the LAHF/SAHF support in 64-bit mode.
4003 * Assumes the caller knows what it's doing! (host must support this)
4004 */
4005 case CPUMCPUIDFEATURE_LAHF:
4006 GET_8000_0001_CHECK_X86_HOST_FEATURE_RET(fLahfSahf, "LAHF/SAHF");
4007
4008 /* Valid for both Intel and AMD. */
4009 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
4010 pVM->cpum.s.GuestFeatures.fLahfSahf = 1;
4011 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
4012 break;
4013
4014 /*
4015 * Set the RDTSCP support bit.
4016 * Assumes the caller knows what it's doing! (host must support this)
4017 */
4018 case CPUMCPUIDFEATURE_RDTSCP:
4019 if (pVM->cpum.s.u8PortableCpuIdLevel > 0)
4020 return;
4021 GET_8000_0001_CHECK_X86_HOST_FEATURE_RET(fRdTscP, "RDTSCP");
4022 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
4023
4024 /* Valid for both Intel and AMD. */
4025 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
4026 pVM->cpum.s.GuestFeatures.fRdTscP = 1;
4027 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
4028 break;
4029
4030 /*
4031 * Set the Hypervisor Present bit in the standard feature mask.
4032 */
4033 case CPUMCPUIDFEATURE_HVP:
4034 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
4035 if (pLeaf)
4036 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_HVP;
4037 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 1;
4038 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
4039 break;
4040
4041 /*
4042 * Set up the speculation control CPUID bits and MSRs. This is quite complicated
4043 * on Intel CPUs, and different on AMDs.
4044 */
4045 case CPUMCPUIDFEATURE_SPEC_CTRL:
4046 {
4047 AssertReturnVoid(!pVM->cpum.s.GuestFeatures.fSpeculationControl); /* should only be done once! */
4048
4049#ifdef RT_ARCH_AMD64
4050 if (!pVM->cpum.s.HostFeatures.s.fIbpb && !pVM->cpum.s.HostFeatures.s.fIbrs)
4051 {
4052 LogRel(("CPUM: WARNING! Can't turn on Speculation Control when the host doesn't support it!\n"));
4053 return;
4054 }
4055#endif
4056 bool fForceSpecCtrl = false;
4057 bool fForceFlushCmd = false;
4058
4059 /*
4060 * Intel spread feature info around a bit...
4061 */
4062 if (pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
4063 {
4064 pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x00000007), 0);
4065 if (!pLeaf)
4066 {
4067 LogRel(("CPUM: WARNING! Can't turn on Speculation Control on Intel CPUs without leaf 0x00000007!\n"));
4068 return;
4069 }
4070
4071 /* Okay, the feature can be enabled. Let's see what we can actually do. */
4072
4073#ifdef RT_ARCH_AMD64
4074 /* We will only expose STIBP if IBRS is present to keep things simpler (simple is not an option). */
4075 if (pVM->cpum.s.HostFeatures.s.fIbrs)
4076#endif
4077 {
4078/** @todo make this more configurable? */
4079 pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB;
4080 pVM->cpum.s.GuestFeatures.fIbrs = 1;
4081 pVM->cpum.s.GuestFeatures.fIbpb = 1;
4082#ifdef RT_ARCH_AMD64
4083 if (pVM->cpum.s.HostFeatures.s.fStibp)
4084#endif
4085 {
4086 pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_STIBP;
4087 pVM->cpum.s.GuestFeatures.fStibp = 1;
4088 }
4089
4090#ifdef RT_ARCH_AMD64
4091 if (pVM->cpum.s.HostFeatures.s.fSsbd)
4092#endif
4093 {
4094 pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_SSBD;
4095 pVM->cpum.s.GuestFeatures.fSsbd = 1;
4096 }
4097
4098 PCPUMCPUIDLEAF const pSubLeaf2 = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x00000007), 2);
4099 if (pSubLeaf2)
4100 {
4101#ifdef RT_ARCH_AMD64
4102 if (pVM->cpum.s.HostFeatures.s.fPsfd)
4103#endif
4104 {
4105 pSubLeaf2->uEdx |= X86_CPUID_STEXT_FEATURE_2_EDX_PSFD;
4106 pVM->cpum.s.GuestFeatures.fPsfd = 1;
4107 }
4108
4109#ifdef RT_ARCH_AMD64
4110 if (pVM->cpum.s.HostFeatures.s.fIpredCtrl)
4111#endif
4112 {
4113 pSubLeaf2->uEdx |= X86_CPUID_STEXT_FEATURE_2_EDX_IPRED_CTRL;
4114 pVM->cpum.s.GuestFeatures.fIpredCtrl = 1;
4115 }
4116
4117#ifdef RT_ARCH_AMD64
4118 if (pVM->cpum.s.HostFeatures.s.fRrsbaCtrl)
4119#endif
4120 {
4121 pSubLeaf2->uEdx |= X86_CPUID_STEXT_FEATURE_2_EDX_RRSBA_CTRL;
4122 pVM->cpum.s.GuestFeatures.fRrsbaCtrl = 1;
4123 }
4124
4125#ifdef RT_ARCH_AMD64
4126 if (pVM->cpum.s.HostFeatures.s.fDdpdU)
4127#endif
4128 {
4129 pSubLeaf2->uEdx |= X86_CPUID_STEXT_FEATURE_2_EDX_DDPD_U;
4130 pVM->cpum.s.GuestFeatures.fDdpdU = 1;
4131 }
4132
4133#ifdef RT_ARCH_AMD64
4134 if (pVM->cpum.s.HostFeatures.s.fBhiCtrl)
4135#endif
4136 {
4137 pSubLeaf2->uEdx |= X86_CPUID_STEXT_FEATURE_2_EDX_BHI_CTRL;
4138 pVM->cpum.s.GuestFeatures.fBhiCtrl = 1;
4139 }
4140 fForceSpecCtrl = true;
4141 }
4142 }
4143
4144#ifdef RT_ARCH_AMD64
4145 if (pVM->cpum.s.HostFeatures.s.fFlushCmd)
4146#endif
4147 {
4148 pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD;
4149 pVM->cpum.s.GuestFeatures.fFlushCmd = 1;
4150 fForceFlushCmd = true;
4151 }
4152
4153#ifdef RT_ARCH_AMD64
4154 if (pVM->cpum.s.HostFeatures.s.fArchCap)
4155#endif
4156 {
4157 pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP;
4158 pVM->cpum.s.GuestFeatures.fArchCap = 1;
4159
4160 /* Advertise IBRS_ALL if present at this point... */
4161#ifdef RT_ARCH_AMD64
4162 if (pVM->cpum.s.HostFeatures.s.fArchCap & MSR_IA32_ARCH_CAP_F_IBRS_ALL)
4163#endif
4164 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->cpum.s.GuestMsrs.msr.ArchCaps |= MSR_IA32_ARCH_CAP_F_IBRS_ALL);
4165 }
4166 cpumCpuIdExplodeFeaturesX86SetSummaryBits(&pVM->cpum.s.GuestFeatures);
4167 }
4168 /*
4169 * AMD does things in a different (better) way. No MSR with info,
4170 * it's all in various CPUID leaves.
4171 */
4172 else if ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
4173 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
4174 {
4175 /* The precise details of AMD's implementation are not yet clear. */
4176 pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x80000008), 0);
4177 if (!pLeaf)
4178 {
4179 LogRel(("CPUM: WARNING! Can't turn on Speculation Control on AMD CPUs without leaf 0x80000008!\n"));
4180 return;
4181 }
4182
4183 /* We passthru all the host cpuid bits on AMD, see cpumR3CpuIdSanitize,
4184 and there is no code to clear/unset the feature. So, little to do.
4185 The only thing we could consider here, is to re-enable stuff
4186 suppressed for portability reasons. */
4187 }
4188 else
4189 break;
4190
4191 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Speculation Control.\n"));
4192 pVM->cpum.s.GuestFeatures.fSpeculationControl = 1;
4193 cpumR3MsrReconcileWithCpuId(pVM, fForceFlushCmd, fForceSpecCtrl);
4194 break;
4195 }
4196
4197 default:
4198 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
4199 break;
4200 }
4201
4202 /** @todo can probably kill this as this API is now init time only... */
4203 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
4204 {
4205 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
4206 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
4207 }
4208
4209#undef GET_8000_0001_CHECK_X86_HOST_FEATURE_RET
4210#undef CHECK_X86_HOST_FEATURE_RET
4211}
4212
4213
4214/**
4215 * Queries a CPUID feature bit.
4216 *
4217 * @returns boolean for feature presence
4218 * @param pVM The cross context VM structure.
4219 * @param enmFeature The feature to query.
4220 * @deprecated Use the cpum.ro.GuestFeatures directly instead.
4221 */
4222VMMR3_INT_DECL(bool) CPUMR3GetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
4223{
4224 switch (enmFeature)
4225 {
4226 case CPUMCPUIDFEATURE_APIC: return pVM->cpum.s.GuestFeatures.fApic;
4227 case CPUMCPUIDFEATURE_X2APIC: return pVM->cpum.s.GuestFeatures.fX2Apic;
4228 case CPUMCPUIDFEATURE_SYSCALL: return pVM->cpum.s.GuestFeatures.fSysCall;
4229 case CPUMCPUIDFEATURE_SEP: return pVM->cpum.s.GuestFeatures.fSysEnter;
4230 case CPUMCPUIDFEATURE_PAE: return pVM->cpum.s.GuestFeatures.fPae;
4231 case CPUMCPUIDFEATURE_NX: return pVM->cpum.s.GuestFeatures.fNoExecute;
4232 case CPUMCPUIDFEATURE_LAHF: return pVM->cpum.s.GuestFeatures.fLahfSahf;
4233 case CPUMCPUIDFEATURE_LONG_MODE: return pVM->cpum.s.GuestFeatures.fLongMode;
4234 case CPUMCPUIDFEATURE_RDTSCP: return pVM->cpum.s.GuestFeatures.fRdTscP;
4235 case CPUMCPUIDFEATURE_HVP: return pVM->cpum.s.GuestFeatures.fHypervisorPresent;
4236 case CPUMCPUIDFEATURE_SPEC_CTRL: return pVM->cpum.s.GuestFeatures.fSpeculationControl;
4237 case CPUMCPUIDFEATURE_INVALID:
4238 case CPUMCPUIDFEATURE_32BIT_HACK:
4239 break;
4240 }
4241 AssertFailed();
4242 return false;
4243}
4244
4245
4246/**
4247 * Clears a CPUID feature bit.
4248 *
4249 * @param pVM The cross context VM structure.
4250 * @param enmFeature The feature to clear.
4251 *
4252 * @deprecated Probably better to default the feature to disabled and only allow
4253 * setting (enabling) it during construction.
4254 */
4255VMMR3_INT_DECL(void) CPUMR3ClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
4256{
4257 PCPUMCPUIDLEAF pLeaf;
4258 switch (enmFeature)
4259 {
4260 case CPUMCPUIDFEATURE_APIC:
4261 Assert(!pVM->cpum.s.GuestFeatures.fApic); /* We only expect this call during init. No MSR adjusting needed. */
4262 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
4263 if (pLeaf)
4264 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
4265
4266 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
4267 if (pLeaf && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
4268 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
4269
4270 pVM->cpum.s.GuestFeatures.fApic = 0;
4271 Log(("CPUM: ClearGuestCpuIdFeature: Disabled xAPIC\n"));
4272 break;
4273
4274 case CPUMCPUIDFEATURE_X2APIC:
4275 Assert(!pVM->cpum.s.GuestFeatures.fX2Apic); /* We only expect this call during init. No MSR adjusting needed. */
4276 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
4277 if (pLeaf)
4278 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
4279 pVM->cpum.s.GuestFeatures.fX2Apic = 0;
4280 Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
4281 break;
4282
4283#if 0
4284 case CPUMCPUIDFEATURE_PAE:
4285 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
4286 if (pLeaf)
4287 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE;
4288
4289 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
4290 if ( pLeaf
4291 && ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
4292 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON))
4293 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
4294
4295 pVM->cpum.s.GuestFeatures.fPae = 0;
4296 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n"));
4297 break;
4298
4299 case CPUMCPUIDFEATURE_LONG_MODE:
4300 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
4301 if (pLeaf)
4302 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
4303 pVM->cpum.s.GuestFeatures.fLongMode = 0;
4304 pVM->cpum.s.GuestFeatures.cVmxMaxPhysAddrWidth = 32;
4305 if (pVM->cpum.s.GuestFeatures.fVmx)
4306 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
4307 {
4308 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
4309 pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs.u64Basic |= VMX_BASIC_PHYSADDR_WIDTH_32BIT;
4310 }
4311 break;
4312
4313 case CPUMCPUIDFEATURE_LAHF:
4314 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
4315 if (pLeaf)
4316 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
4317 pVM->cpum.s.GuestFeatures.fLahfSahf = 0;
4318 break;
4319#endif
4320 case CPUMCPUIDFEATURE_RDTSCP:
4321 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
4322 if (pLeaf)
4323 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
4324 pVM->cpum.s.GuestFeatures.fRdTscP = 0;
4325 Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
4326 break;
4327
4328#if 0
4329 case CPUMCPUIDFEATURE_HVP:
4330 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
4331 if (pLeaf)
4332 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP;
4333 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 0;
4334 break;
4335
4336 case CPUMCPUIDFEATURE_SPEC_CTRL:
4337 pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x00000007), 0);
4338 if (pLeaf)
4339 pLeaf->uEdx &= ~(X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB | X86_CPUID_STEXT_FEATURE_EDX_STIBP);
4340 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->cpum.s.GuestMsrs.msr.ArchCaps &= ~MSR_IA32_ARCH_CAP_F_IBRS_ALL);
4341 Log(("CPUM: ClearGuestCpuIdFeature: Disabled speculation control!\n"));
4342 break;
4343#endif
4344 default:
4345 AssertMsgFailed(("enmFeature=%d\n", enmFeature));
4346 break;
4347 }
4348
4349 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
4350 {
4351 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
4352 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CPUID;
4353 }
4354}
4355
4356
4357/**
4358 * Do some final polishing after all calls to CPUMR3SetGuestCpuIdFeature and
4359 * CPUMR3ClearGuestCpuIdFeature are (probably) done.
4360 *
4361 * @param pVM The cross context VM structure.
4362 */
4363void cpumR3CpuIdRing3InitDone(PVM pVM)
4364{
4365 /*
4366 * Do not advertise NX w/o PAE, seems to confuse windows 7 (black screen very
4367 * early in real mode).
4368 */
4369 PCPUMCPUIDLEAF pStdLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
4370 PCPUMCPUIDLEAF pExtLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
4371 if (pStdLeaf && pExtLeaf)
4372 {
4373 if ( !(pStdLeaf->uEdx & X86_CPUID_FEATURE_EDX_PAE)
4374 && (pExtLeaf->uEdx & X86_CPUID_EXT_FEATURE_EDX_NX))
4375 pExtLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_NX;
4376 }
4377}
4378
4379
4380/*
4381 *
4382 *
4383 * Saved state related code.
4384 * Saved state related code.
4385 * Saved state related code.
4386 *
4387 *
4388 */
4389
4390/**
4391 * Called both in pass 0 and the final pass.
4392 *
4393 * @param pVM The cross context VM structure.
4394 * @param pSSM The saved state handle.
4395 */
4396void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM)
4397{
4398 /*
4399 * Save all the CPU ID leaves.
4400 */
4401 SSMR3PutU32(pSSM, sizeof(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3[0]));
4402 SSMR3PutU32(pSSM, pVM->cpum.s.GuestInfo.cCpuIdLeaves);
4403 SSMR3PutMem(pSSM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3,
4404 sizeof(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3[0]) * pVM->cpum.s.GuestInfo.cCpuIdLeaves);
4405
4406 SSMR3PutMem(pSSM, &pVM->cpum.s.GuestInfo.DefCpuId, sizeof(pVM->cpum.s.GuestInfo.DefCpuId));
4407
4408 /*
4409 * Save a good portion of the raw CPU IDs as well as they may come in
4410 * handy when validating features for raw mode.
4411 */
4412#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
4413 CPUMCPUID aRawStd[16];
4414 for (unsigned i = 0; i < RT_ELEMENTS(aRawStd); i++)
4415 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].uEax, &aRawStd[i].uEbx, &aRawStd[i].uEcx, &aRawStd[i].uEdx);
4416 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawStd));
4417 SSMR3PutMem(pSSM, &aRawStd[0], sizeof(aRawStd));
4418
4419 CPUMCPUID aRawExt[32];
4420 for (unsigned i = 0; i < RT_ELEMENTS(aRawExt); i++)
4421 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].uEax, &aRawExt[i].uEbx, &aRawExt[i].uEcx, &aRawExt[i].uEdx);
4422 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt));
4423 SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt));
4424
4425#else
4426 /* Two zero counts on non-x86 hosts. */
4427 SSMR3PutU32(pSSM, 0);
4428 SSMR3PutU32(pSSM, 0);
4429#endif
4430}
4431
4432
4433static int cpumR3LoadOneOldGuestCpuIdArray(PSSMHANDLE pSSM, uint32_t uBase, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
4434{
4435 uint32_t cCpuIds;
4436 int rc = SSMR3GetU32(pSSM, &cCpuIds);
4437 if (RT_SUCCESS(rc))
4438 {
4439 if (cCpuIds < 64)
4440 {
4441 for (uint32_t i = 0; i < cCpuIds; i++)
4442 {
4443 CPUMCPUID CpuId;
4444 rc = SSMR3GetMem(pSSM, &CpuId, sizeof(CpuId));
4445 if (RT_FAILURE(rc))
4446 break;
4447
4448 CPUMCPUIDLEAF NewLeaf;
4449 NewLeaf.uLeaf = uBase + i;
4450 NewLeaf.uSubLeaf = 0;
4451 NewLeaf.fSubLeafMask = 0;
4452 NewLeaf.uEax = CpuId.uEax;
4453 NewLeaf.uEbx = CpuId.uEbx;
4454 NewLeaf.uEcx = CpuId.uEcx;
4455 NewLeaf.uEdx = CpuId.uEdx;
4456 NewLeaf.fFlags = 0;
4457 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &NewLeaf);
4458 }
4459 }
4460 else
4461 rc = VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
4462 }
4463 if (RT_FAILURE(rc))
4464 {
4465 RTMemFree(*ppaLeaves);
4466 *ppaLeaves = NULL;
4467 *pcLeaves = 0;
4468 }
4469 return rc;
4470}
4471
4472
4473static int cpumR3LoadGuestCpuIdArray(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
4474{
4475 *ppaLeaves = NULL;
4476 *pcLeaves = 0;
4477
4478 int rc;
4479 if (uVersion > CPUM_SAVED_STATE_VERSION_PUT_STRUCT)
4480 {
4481 /*
4482 * The new format. Starts by declaring the leave size and count.
4483 */
4484 uint32_t cbLeaf;
4485 SSMR3GetU32(pSSM, &cbLeaf);
4486 uint32_t cLeaves;
4487 rc = SSMR3GetU32(pSSM, &cLeaves);
4488 if (RT_SUCCESS(rc))
4489 {
4490 if (cbLeaf == sizeof(**ppaLeaves))
4491 {
4492 if (cLeaves <= CPUM_CPUID_MAX_LEAVES)
4493 {
4494 /*
4495 * Load the leaves one by one.
4496 *
4497 * The uPrev stuff is a kludge for working around a week worth of bad saved
4498 * states during the CPUID revamp in March 2015. We saved too many leaves
4499 * due to a bug in cpumR3CpuIdInstallAndExplodeLeaves, thus ending up with
4500 * garbage entires at the end of the array when restoring. We also had
4501 * a subleaf insertion bug that triggered with the leaf 4 stuff below,
4502 * this kludge doesn't deal correctly with that, but who cares...
4503 */
4504 uint32_t uPrev = 0;
4505 for (uint32_t i = 0; i < cLeaves && RT_SUCCESS(rc); i++)
4506 {
4507 CPUMCPUIDLEAF Leaf;
4508 rc = SSMR3GetMem(pSSM, &Leaf, sizeof(Leaf));
4509 if (RT_SUCCESS(rc))
4510 {
4511 if ( uVersion != CPUM_SAVED_STATE_VERSION_BAD_CPUID_COUNT
4512 || Leaf.uLeaf >= uPrev)
4513 {
4514 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &Leaf);
4515 uPrev = Leaf.uLeaf;
4516 }
4517 else
4518 uPrev = UINT32_MAX;
4519 }
4520 }
4521 }
4522 else
4523 rc = SSMR3SetLoadError(pSSM, VERR_TOO_MANY_CPUID_LEAVES, RT_SRC_POS,
4524 "Too many CPUID leaves: %#x, max %#x", cLeaves, CPUM_CPUID_MAX_LEAVES);
4525 }
4526 else
4527 rc = SSMR3SetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
4528 "CPUMCPUIDLEAF size differs: saved=%#x, our=%#x", cbLeaf, sizeof(**ppaLeaves));
4529 }
4530 }
4531 else
4532 {
4533 /*
4534 * The old format with its three inflexible arrays.
4535 */
4536 rc = cpumR3LoadOneOldGuestCpuIdArray(pSSM, UINT32_C(0x00000000), ppaLeaves, pcLeaves);
4537 if (RT_SUCCESS(rc))
4538 rc = cpumR3LoadOneOldGuestCpuIdArray(pSSM, UINT32_C(0x80000000), ppaLeaves, pcLeaves);
4539 if (RT_SUCCESS(rc))
4540 rc = cpumR3LoadOneOldGuestCpuIdArray(pSSM, UINT32_C(0xc0000000), ppaLeaves, pcLeaves);
4541 if (RT_SUCCESS(rc))
4542 {
4543 /*
4544 * Fake up leaf 4 on intel like we used to do in CPUMGetGuestCpuId earlier.
4545 */
4546 PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafInt(*ppaLeaves, *pcLeaves, 0, 0);
4547 if ( pLeaf
4548 && RTX86IsIntelCpu(pLeaf->uEbx, pLeaf->uEcx, pLeaf->uEdx))
4549 {
4550 CPUMCPUIDLEAF Leaf;
4551 Leaf.uLeaf = 4;
4552 Leaf.fSubLeafMask = UINT32_MAX;
4553 Leaf.uSubLeaf = 0;
4554 Leaf.uEdx = UINT32_C(0); /* 3 flags, 0 is fine. */
4555 Leaf.uEcx = UINT32_C(63); /* sets - 1 */
4556 Leaf.uEbx = (UINT32_C(7) << 22) /* associativity -1 */
4557 | (UINT32_C(0) << 12) /* phys line partitions - 1 */
4558 | UINT32_C(63); /* system coherency line size - 1 */
4559 Leaf.uEax = (RT_MIN(pVM->cCpus - 1, UINT32_C(0x3f)) << 26) /* cores per package - 1 */
4560 | (UINT32_C(0) << 14) /* threads per cache - 1 */
4561 | (UINT32_C(1) << 5) /* cache level */
4562 | UINT32_C(1); /* cache type (data) */
4563 Leaf.fFlags = 0;
4564 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &Leaf);
4565 if (RT_SUCCESS(rc))
4566 {
4567 Leaf.uSubLeaf = 1; /* Should've been cache type 2 (code), but buggy code made it data. */
4568 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &Leaf);
4569 }
4570 if (RT_SUCCESS(rc))
4571 {
4572 Leaf.uSubLeaf = 2; /* Should've been cache type 3 (unified), but buggy code made it data. */
4573 Leaf.uEcx = 4095; /* sets - 1 */
4574 Leaf.uEbx &= UINT32_C(0x003fffff); /* associativity - 1 */
4575 Leaf.uEbx |= UINT32_C(23) << 22;
4576 Leaf.uEax &= UINT32_C(0xfc003fff); /* threads per cache - 1 */
4577 Leaf.uEax |= RT_MIN(pVM->cCpus - 1, UINT32_C(0xfff)) << 14;
4578 Leaf.uEax &= UINT32_C(0xffffff1f); /* level */
4579 Leaf.uEax |= UINT32_C(2) << 5;
4580 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &Leaf);
4581 }
4582 }
4583 }
4584 }
4585 return rc;
4586}
4587
4588
4589/**
4590 * Loads the CPU ID leaves saved by pass 0, inner worker.
4591 *
4592 * @returns VBox status code.
4593 * @param pVM The cross context VM structure.
4594 * @param pSSM The saved state handle.
4595 * @param uVersion The format version.
4596 * @param paLeaves Guest CPUID leaves loaded from the state.
4597 * @param cLeaves The number of leaves in @a paLeaves.
4598 * @param pMsrs The guest MSRs.
4599 */
4600static int cpumR3LoadCpuIdInner(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs)
4601{
4602 AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
4603#if !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86)
4604 AssertMsgFailed(("Port me!"));
4605#endif
4606
4607 /*
4608 * Continue loading the state into stack buffers.
4609 */
4610 CPUMCPUID GuestDefCpuId;
4611 int rc = SSMR3GetMem(pSSM, &GuestDefCpuId, sizeof(GuestDefCpuId));
4612 AssertRCReturn(rc, rc);
4613
4614 CPUMCPUID aRawStd[16];
4615 uint32_t cRawStd;
4616 rc = SSMR3GetU32(pSSM, &cRawStd); AssertRCReturn(rc, rc);
4617 if (cRawStd > RT_ELEMENTS(aRawStd))
4618 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
4619 rc = SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0]));
4620 AssertRCReturn(rc, rc);
4621 for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)
4622#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
4623 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].uEax, &aRawStd[i].uEbx, &aRawStd[i].uEcx, &aRawStd[i].uEdx);
4624#else
4625 RT_ZERO(aRawStd[i]);
4626#endif
4627
4628 CPUMCPUID aRawExt[32];
4629 uint32_t cRawExt;
4630 rc = SSMR3GetU32(pSSM, &cRawExt); AssertRCReturn(rc, rc);
4631 if (cRawExt > RT_ELEMENTS(aRawExt))
4632 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
4633 rc = SSMR3GetMem(pSSM, &aRawExt[0], cRawExt * sizeof(aRawExt[0]));
4634 AssertRCReturn(rc, rc);
4635 for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++)
4636#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
4637 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].uEax, &aRawExt[i].uEbx, &aRawExt[i].uEcx, &aRawExt[i].uEdx);
4638#else
4639 RT_ZERO(aRawExt[i]);
4640#endif
4641
4642 /*
4643 * Get the raw CPU IDs for the current host.
4644 */
4645 CPUMCPUID aHostRawStd[16];
4646#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
4647 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawStd); i++)
4648 ASMCpuIdExSlow(i, 0, 0, 0, &aHostRawStd[i].uEax, &aHostRawStd[i].uEbx, &aHostRawStd[i].uEcx, &aHostRawStd[i].uEdx);
4649#else
4650 RT_ZERO(aHostRawStd);
4651#endif
4652
4653 CPUMCPUID aHostRawExt[32];
4654#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
4655 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawExt); i++)
4656 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0,
4657 &aHostRawExt[i].uEax, &aHostRawExt[i].uEbx, &aHostRawExt[i].uEcx, &aHostRawExt[i].uEdx);
4658#else
4659 RT_ZERO(aHostRawExt);
4660#endif
4661
4662 /*
4663 * Get the host and guest overrides so we don't reject the state because
4664 * some feature was enabled thru these interfaces.
4665 * Note! We currently only need the feature leaves, so skip rest.
4666 */
4667 PCFGMNODE pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID");
4668 CPUMCPUID aHostOverrideStd[2];
4669 memcpy(&aHostOverrideStd[0], &aHostRawStd[0], sizeof(aHostOverrideStd));
4670 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &aHostOverrideStd[0], RT_ELEMENTS(aHostOverrideStd), pOverrideCfg);
4671
4672 CPUMCPUID aHostOverrideExt[2];
4673 memcpy(&aHostOverrideExt[0], &aHostRawExt[0], sizeof(aHostOverrideExt));
4674 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &aHostOverrideExt[0], RT_ELEMENTS(aHostOverrideExt), pOverrideCfg);
4675
4676 /*
4677 * This can be skipped.
4678 *
4679 * @note On ARM we disable the strict checks for now because we can't verify with what the host supports
4680 * and just assume the interpreter/recompiler supports everything what was exposed earlier.
4681 */
4682 bool fStrictCpuIdChecks;
4683 CFGMR3QueryBoolDef(CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM"), "StrictCpuIdChecks", &fStrictCpuIdChecks,
4684#ifdef RT_ARCH_ARM64
4685 false
4686#else
4687 true
4688#endif
4689 );
4690
4691 /*
4692 * Define a bunch of macros for simplifying the santizing/checking code below.
4693 */
4694 /* Generic expression + failure message. */
4695#define CPUID_CHECK_RET(expr, fmt) \
4696 do { \
4697 if (!(expr)) \
4698 { \
4699 char *pszMsg = RTStrAPrintf2 fmt; /* lack of variadic macros sucks */ \
4700 if (fStrictCpuIdChecks) \
4701 { \
4702 int rcCpuid = SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, "%s", pszMsg); \
4703 RTStrFree(pszMsg); \
4704 return rcCpuid; \
4705 } \
4706 LogRel(("CPUM: %s\n", pszMsg)); \
4707 RTStrFree(pszMsg); \
4708 } \
4709 } while (0)
4710#define CPUID_CHECK_WRN(expr, fmt) \
4711 do { \
4712 if (!(expr)) \
4713 LogRel(fmt); \
4714 } while (0)
4715
4716 /* For comparing two values and bitch if they differs. */
4717#define CPUID_CHECK2_RET(what, host, saved) \
4718 do { \
4719 if ((host) != (saved)) \
4720 { \
4721 if (fStrictCpuIdChecks) \
4722 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
4723 N_(#what " mismatch: host=%#x saved=%#x"), (host), (saved)); \
4724 LogRel(("CPUM: " #what " differs: host=%#x saved=%#x\n", (host), (saved))); \
4725 } \
4726 } while (0)
4727#define CPUID_CHECK2_WRN(what, host, saved) \
4728 do { \
4729 if ((host) != (saved)) \
4730 LogRel(("CPUM: " #what " differs: host=%#x saved=%#x\n", (host), (saved))); \
4731 } while (0)
4732
4733 /* For checking raw cpu features (raw mode). */
4734#define CPUID_RAW_FEATURE_RET(set, reg, bit) \
4735 do { \
4736 if ((aHostRaw##set [1].reg & bit) != (aRaw##set [1].reg & bit)) \
4737 { \
4738 if (fStrictCpuIdChecks) \
4739 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
4740 N_(#bit " mismatch: host=%d saved=%d"), \
4741 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) ); \
4742 LogRel(("CPUM: " #bit" differs: host=%d saved=%d\n", \
4743 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) )); \
4744 } \
4745 } while (0)
4746#define CPUID_RAW_FEATURE_WRN(set, reg, bit) \
4747 do { \
4748 if ((aHostRaw##set [1].reg & bit) != (aRaw##set [1].reg & bit)) \
4749 LogRel(("CPUM: " #bit" differs: host=%d saved=%d\n", \
4750 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) )); \
4751 } while (0)
4752#define CPUID_RAW_FEATURE_IGN(set, reg, bit) do { } while (0)
4753
4754 /* For checking guest features. */
4755#define CPUID_GST_FEATURE_RET(set, reg, bit) \
4756 do { \
4757 if ( (aGuestCpuId##set [1].reg & bit) \
4758 && !(aHostRaw##set [1].reg & bit) \
4759 && !(aHostOverride##set [1].reg & bit) \
4760 ) \
4761 { \
4762 if (fStrictCpuIdChecks) \
4763 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
4764 N_(#bit " is not supported by the host but has already exposed to the guest")); \
4765 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
4766 } \
4767 } while (0)
4768#define CPUID_GST_FEATURE_WRN(set, reg, bit) \
4769 do { \
4770 if ( (aGuestCpuId##set [1].reg & bit) \
4771 && !(aHostRaw##set [1].reg & bit) \
4772 && !(aHostOverride##set [1].reg & bit) \
4773 ) \
4774 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
4775 } while (0)
4776#define CPUID_GST_FEATURE_EMU(set, reg, bit) \
4777 do { \
4778 if ( (aGuestCpuId##set [1].reg & bit) \
4779 && !(aHostRaw##set [1].reg & bit) \
4780 && !(aHostOverride##set [1].reg & bit) \
4781 ) \
4782 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
4783 } while (0)
4784#define CPUID_GST_FEATURE_IGN(set, reg, bit) do { } while (0)
4785
4786 /* For checking guest features if AMD guest CPU. */
4787#define CPUID_GST_AMD_FEATURE_RET(set, reg, bit) \
4788 do { \
4789 if ( (aGuestCpuId##set [1].reg & bit) \
4790 && fGuestAmd \
4791 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
4792 && !(aHostOverride##set [1].reg & bit) \
4793 ) \
4794 { \
4795 if (fStrictCpuIdChecks) \
4796 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
4797 N_(#bit " is not supported by the host but has already exposed to the guest")); \
4798 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
4799 } \
4800 } while (0)
4801#define CPUID_GST_AMD_FEATURE_WRN(set, reg, bit) \
4802 do { \
4803 if ( (aGuestCpuId##set [1].reg & bit) \
4804 && fGuestAmd \
4805 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
4806 && !(aHostOverride##set [1].reg & bit) \
4807 ) \
4808 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
4809 } while (0)
4810#define CPUID_GST_AMD_FEATURE_EMU(set, reg, bit) \
4811 do { \
4812 if ( (aGuestCpuId##set [1].reg & bit) \
4813 && fGuestAmd \
4814 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
4815 && !(aHostOverride##set [1].reg & bit) \
4816 ) \
4817 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
4818 } while (0)
4819#define CPUID_GST_AMD_FEATURE_IGN(set, reg, bit) do { } while (0)
4820
4821 /* For checking AMD features which have a corresponding bit in the standard
4822 range. (Intel defines very few bits in the extended feature sets.) */
4823#define CPUID_GST_FEATURE2_RET(reg, ExtBit, StdBit) \
4824 do { \
4825 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \
4826 && !(fHostAmd \
4827 ? aHostRawExt[1].reg & (ExtBit) \
4828 : aHostRawStd[1].reg & (StdBit)) \
4829 && !(aHostOverrideExt[1].reg & (ExtBit)) \
4830 ) \
4831 { \
4832 if (fStrictCpuIdChecks) \
4833 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \
4834 N_(#ExtBit " is not supported by the host but has already exposed to the guest")); \
4835 LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \
4836 } \
4837 } while (0)
4838#define CPUID_GST_FEATURE2_WRN(reg, ExtBit, StdBit) \
4839 do { \
4840 if ( (aGuestCpuId[1].reg & (ExtBit)) \
4841 && !(fHostAmd \
4842 ? aHostRawExt[1].reg & (ExtBit) \
4843 : aHostRawStd[1].reg & (StdBit)) \
4844 && !(aHostOverrideExt[1].reg & (ExtBit)) \
4845 ) \
4846 LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \
4847 } while (0)
4848#define CPUID_GST_FEATURE2_EMU(reg, ExtBit, StdBit) \
4849 do { \
4850 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \
4851 && !(fHostAmd \
4852 ? aHostRawExt[1].reg & (ExtBit) \
4853 : aHostRawStd[1].reg & (StdBit)) \
4854 && !(aHostOverrideExt[1].reg & (ExtBit)) \
4855 ) \
4856 LogRel(("CPUM: Warning - " #ExtBit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
4857 } while (0)
4858#define CPUID_GST_FEATURE2_IGN(reg, ExtBit, StdBit) do { } while (0)
4859
4860
4861 /*
4862 * Verify that we can support the features already exposed to the guest on
4863 * this host.
4864 *
4865 * Most of the features we're emulating requires intercepting instruction
4866 * and doing it the slow way, so there is no need to warn when they aren't
4867 * present in the host CPU. Thus we use IGN instead of EMU on these.
4868 *
4869 * Trailing comments:
4870 * "EMU" - Possible to emulate, could be lots of work and very slow.
4871 * "EMU?" - Can this be emulated?
4872 */
4873 CPUMCPUID aGuestCpuIdStd[2];
4874 RT_ZERO(aGuestCpuIdStd);
4875 cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, 1, 0, &aGuestCpuIdStd[1]);
4876
4877 /* CPUID(1).ecx */
4878 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE3); // -> EMU
4879 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PCLMUL); // -> EMU?
4880 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_DTES64); // -> EMU?
4881 CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_MONITOR);
4882 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CPLDS); // -> EMU?
4883 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_VMX); // -> EMU
4884 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SMX); // -> EMU
4885 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_EST); // -> EMU
4886 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TM2); // -> EMU?
4887 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSSE3); // -> EMU
4888 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CNTXID); // -> EMU
4889 CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_SDBG);
4890 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_FMA); // -> EMU? what's this?
4891 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CX16); // -> EMU?
4892 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TPRUPDATE);//-> EMU
4893 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PDCM); // -> EMU
4894 CPUID_GST_FEATURE_RET(Std, uEcx, RT_BIT_32(16) /*reserved*/);
4895 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PCID);
4896 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_DCA); // -> EMU?
4897 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_1); // -> EMU
4898 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_2); // -> EMU
4899 CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_X2APIC);
4900 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_MOVBE); // -> EMU
4901 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_POPCNT); // -> EMU
4902 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TSCDEADL);
4903 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AES); // -> EMU
4904 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_XSAVE); // -> EMU
4905 CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_OSXSAVE);
4906 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AVX); // -> EMU?
4907 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_F16C);
4908 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_RDRAND);
4909 CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_HVP); // Normally not set by host
4910
4911 /* CPUID(1).edx */
4912 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FPU);
4913 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_VME);
4914 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_DE); // -> EMU?
4915 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE);
4916 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_TSC); // -> EMU
4917 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MSR); // -> EMU
4918 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_PAE);
4919 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCE);
4920 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CX8); // -> EMU?
4921 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_APIC);
4922 CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(10) /*reserved*/);
4923 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_SEP);
4924 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MTRR);
4925 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PGE);
4926 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCA);
4927 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU
4928 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PAT);
4929 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE36);
4930 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSN);
4931 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CLFSH); // -> EMU
4932 CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(20) /*reserved*/);
4933 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_DS); // -> EMU?
4934 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_ACPI); // -> EMU?
4935 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MMX); // -> EMU
4936 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU
4937 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE); // -> EMU
4938 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE2); // -> EMU
4939 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SS); // -> EMU?
4940 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_HTT); // -> EMU?
4941 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_TM); // -> EMU?
4942 CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(30) /*JMPE/IA64*/); // -> EMU
4943 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_PBE); // -> EMU?
4944
4945 /* CPUID(0x80000000). */
4946 CPUMCPUID aGuestCpuIdExt[2];
4947 RT_ZERO(aGuestCpuIdExt);
4948 if (cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, UINT32_C(0x80000001), 0, &aGuestCpuIdExt[1]))
4949 {
4950 /** @todo deal with no 0x80000001 on the host. */
4951 bool const fHostAmd = RTX86IsAmdCpu(aHostRawStd[0].uEbx, aHostRawStd[0].uEcx, aHostRawStd[0].uEdx)
4952 || RTX86IsHygonCpu(aHostRawStd[0].uEbx, aHostRawStd[0].uEcx, aHostRawStd[0].uEdx);
4953 bool const fGuestAmd = RTX86IsAmdCpu(aGuestCpuIdExt[0].uEbx, aGuestCpuIdExt[0].uEcx, aGuestCpuIdExt[0].uEdx)
4954 || RTX86IsHygonCpu(aGuestCpuIdExt[0].uEbx, aGuestCpuIdExt[0].uEcx, aGuestCpuIdExt[0].uEdx);
4955
4956 /* CPUID(0x80000001).ecx */
4957 CPUID_GST_FEATURE_WRN(Ext, uEcx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); // -> EMU
4958 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CMPL); // -> EMU
4959 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SVM); // -> EMU
4960 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);// ???
4961 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CR8L); // -> EMU
4962 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_ABM); // -> EMU
4963 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SSE4A); // -> EMU
4964 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);//-> EMU
4965 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);// -> EMU
4966 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_OSVW); // -> EMU?
4967 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_IBS); // -> EMU
4968 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_XOP); // -> EMU
4969 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SKINIT); // -> EMU
4970 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_WDT); // -> EMU
4971 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(14));
4972 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(15));
4973 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(16));
4974 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(17));
4975 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(18));
4976 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(19));
4977 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(20));
4978 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(21));
4979 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(22));
4980 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(23));
4981 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(24));
4982 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(25));
4983 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(26));
4984 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(27));
4985 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(28));
4986 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(29));
4987 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(30));
4988 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(31));
4989
4990 /* CPUID(0x80000001).edx */
4991 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_FPU, X86_CPUID_FEATURE_EDX_FPU); // -> EMU
4992 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_VME, X86_CPUID_FEATURE_EDX_VME); // -> EMU
4993 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_DE, X86_CPUID_FEATURE_EDX_DE); // -> EMU
4994 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE, X86_CPUID_FEATURE_EDX_PSE);
4995 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_TSC, X86_CPUID_FEATURE_EDX_TSC); // -> EMU
4996 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_MSR, X86_CPUID_FEATURE_EDX_MSR); // -> EMU
4997 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_PAE, X86_CPUID_FEATURE_EDX_PAE);
4998 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_MCE, X86_CPUID_FEATURE_EDX_MCE);
4999 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_CX8, X86_CPUID_FEATURE_EDX_CX8); // -> EMU?
5000 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_APIC, X86_CPUID_FEATURE_EDX_APIC);
5001 CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(10) /*reserved*/);
5002 CPUID_GST_FEATURE_IGN( Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_SYSCALL); // On Intel: long mode only.
5003 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_MTRR, X86_CPUID_FEATURE_EDX_MTRR);
5004 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PGE, X86_CPUID_FEATURE_EDX_PGE);
5005 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_MCA, X86_CPUID_FEATURE_EDX_MCA);
5006 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_CMOV, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU
5007 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PAT, X86_CPUID_FEATURE_EDX_PAT);
5008 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE36, X86_CPUID_FEATURE_EDX_PSE36);
5009 CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(18) /*reserved*/);
5010 CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(19) /*reserved*/);
5011 CPUID_GST_FEATURE_RET( Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_NX);
5012 CPUID_GST_FEATURE_WRN( Ext, uEdx, RT_BIT_32(21) /*reserved*/);
5013 CPUID_GST_FEATURE_RET( Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
5014 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_MMX, X86_CPUID_FEATURE_EDX_MMX); // -> EMU
5015 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_FXSR, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU
5016 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
5017 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);
5018 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
5019 CPUID_GST_FEATURE_IGN( Ext, uEdx, RT_BIT_32(28) /*reserved*/);
5020 CPUID_GST_FEATURE_RET( Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
5021 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
5022 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
5023 }
5024
5025 /** @todo check leaf 7 */
5026
5027 /* CPUID(d) - XCR0 stuff - takes ECX as input.
5028 * ECX=0: EAX - Valid bits in XCR0[31:0].
5029 * EBX - Maximum state size as per current XCR0 value.
5030 * ECX - Maximum state size for all supported features.
5031 * EDX - Valid bits in XCR0[63:32].
5032 * ECX=1: EAX - Various X-features.
5033 * EBX - Maximum state size as per current XCR0|IA32_XSS value.
5034 * ECX - Valid bits in IA32_XSS[31:0].
5035 * EDX - Valid bits in IA32_XSS[63:32].
5036 * ECX=N, where N in 2..63 and indicates a bit in XCR0 and/or IA32_XSS,
5037 * if the bit invalid all four registers are set to zero.
5038 * EAX - The state size for this feature.
5039 * EBX - The state byte offset of this feature.
5040 * ECX - Bit 0 indicates whether this sub-leaf maps to a valid IA32_XSS bit (=1) or a valid XCR0 bit (=0).
5041 * EDX - Reserved, but is set to zero if invalid sub-leaf index.
5042 */
5043 uint64_t fGuestXcr0Mask = 0;
5044 PCPUMCPUIDLEAF pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x0000000d), 0);
5045 if ( pCurLeaf
5046 && (aGuestCpuIdStd[1].uEcx & X86_CPUID_FEATURE_ECX_XSAVE)
5047 && ( pCurLeaf->uEax
5048 || pCurLeaf->uEbx
5049 || pCurLeaf->uEcx
5050 || pCurLeaf->uEdx) )
5051 {
5052 fGuestXcr0Mask = RT_MAKE_U64(pCurLeaf->uEax, pCurLeaf->uEdx);
5053 if (fGuestXcr0Mask & ~pVM->cpum.s.fXStateHostMask)
5054 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
5055 N_("CPUID(0xd/0).EDX:EAX mismatch: %#llx saved, %#llx supported by the current host (XCR0 bits)"),
5056 fGuestXcr0Mask, pVM->cpum.s.fXStateHostMask);
5057 if ((fGuestXcr0Mask & (XSAVE_C_X87 | XSAVE_C_SSE)) != (XSAVE_C_X87 | XSAVE_C_SSE))
5058 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
5059 N_("CPUID(0xd/0).EDX:EAX missing mandatory X87 or SSE bits: %#RX64"), fGuestXcr0Mask);
5060
5061 /* We don't support any additional features yet. */
5062 pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x0000000d), 1);
5063 if (pCurLeaf && pCurLeaf->uEax)
5064 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
5065 N_("CPUID(0xd/1).EAX=%#x, expected zero"), pCurLeaf->uEax);
5066 if (pCurLeaf && (pCurLeaf->uEcx || pCurLeaf->uEdx))
5067 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
5068 N_("CPUID(0xd/1).EDX:ECX=%#llx, expected zero"),
5069 RT_MAKE_U64(pCurLeaf->uEdx, pCurLeaf->uEcx));
5070
5071
5072#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5073 for (uint32_t uSubLeaf = 2; uSubLeaf < 64; uSubLeaf++)
5074 {
5075 pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x0000000d), uSubLeaf);
5076 if (pCurLeaf)
5077 {
5078 /* If advertised, the state component offset and size must match the one used by host. */
5079 if (pCurLeaf->uEax || pCurLeaf->uEbx || pCurLeaf->uEcx || pCurLeaf->uEdx)
5080 {
5081 CPUMCPUID RawHost;
5082 ASMCpuIdExSlow(UINT32_C(0x0000000d), 0, uSubLeaf, 0,
5083 &RawHost.uEax, &RawHost.uEbx, &RawHost.uEcx, &RawHost.uEdx);
5084 if ( RawHost.uEbx != pCurLeaf->uEbx
5085 || RawHost.uEax != pCurLeaf->uEax)
5086 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
5087 N_("CPUID(0xd/%#x).EBX/EAX=%#x/%#x, current host uses %#x/%#x (offset/size)"),
5088 uSubLeaf, pCurLeaf->uEbx, pCurLeaf->uEax, RawHost.uEbx, RawHost.uEax);
5089 }
5090 }
5091 }
5092#endif
5093 }
5094 /* Clear leaf 0xd just in case we're loading an old state... */
5095 else if (pCurLeaf)
5096 {
5097 for (uint32_t uSubLeaf = 0; uSubLeaf < 64; uSubLeaf++)
5098 {
5099 pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x0000000d), uSubLeaf);
5100 if (pCurLeaf)
5101 {
5102 AssertLogRelMsg( uVersion <= CPUM_SAVED_STATE_VERSION_PUT_STRUCT
5103 || ( pCurLeaf->uEax == 0
5104 && pCurLeaf->uEbx == 0
5105 && pCurLeaf->uEcx == 0
5106 && pCurLeaf->uEdx == 0),
5107 ("uVersion=%#x; %#x %#x %#x %#x\n",
5108 uVersion, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx));
5109 pCurLeaf->uEax = pCurLeaf->uEbx = pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
5110 }
5111 }
5112 }
5113
5114 /* Update the fXStateGuestMask value for the VM. */
5115 if (pVM->cpum.s.fXStateGuestMask != fGuestXcr0Mask)
5116 {
5117 LogRel(("CPUM: fXStateGuestMask=%#llx -> %#llx\n", pVM->cpum.s.fXStateGuestMask, fGuestXcr0Mask));
5118 pVM->cpum.s.fXStateGuestMask = fGuestXcr0Mask;
5119 if (!fGuestXcr0Mask && (aGuestCpuIdStd[1].uEcx & X86_CPUID_FEATURE_ECX_XSAVE))
5120 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS,
5121 N_("Internal Processing Error: XSAVE feature bit enabled, but leaf 0xd is empty."));
5122 }
5123
5124#undef CPUID_CHECK_RET
5125#undef CPUID_CHECK_WRN
5126#undef CPUID_CHECK2_RET
5127#undef CPUID_CHECK2_WRN
5128#undef CPUID_RAW_FEATURE_RET
5129#undef CPUID_RAW_FEATURE_WRN
5130#undef CPUID_RAW_FEATURE_IGN
5131#undef CPUID_GST_FEATURE_RET
5132#undef CPUID_GST_FEATURE_WRN
5133#undef CPUID_GST_FEATURE_EMU
5134#undef CPUID_GST_FEATURE_IGN
5135#undef CPUID_GST_FEATURE2_RET
5136#undef CPUID_GST_FEATURE2_WRN
5137#undef CPUID_GST_FEATURE2_EMU
5138#undef CPUID_GST_FEATURE2_IGN
5139#undef CPUID_GST_AMD_FEATURE_RET
5140#undef CPUID_GST_AMD_FEATURE_WRN
5141#undef CPUID_GST_AMD_FEATURE_EMU
5142#undef CPUID_GST_AMD_FEATURE_IGN
5143
5144 /*
5145 * We're good, commit the CPU ID leaves.
5146 */
5147 pVM->cpum.s.GuestInfo.DefCpuId = GuestDefCpuId;
5148 rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, &pVM->cpum.s, paLeaves, cLeaves, pMsrs);
5149 AssertLogRelRCReturn(rc, rc);
5150
5151 return VINF_SUCCESS;
5152}
5153
5154
5155/**
5156 * Loads the CPU ID leaves saved by pass 0, x86 targets.
5157 *
5158 * @returns VBox status code.
5159 * @param pVM The cross context VM structure.
5160 * @param pSSM The saved state handle.
5161 * @param uVersion The format version.
5162 * @param pMsrs The guest MSRs.
5163 */
5164int cpumR3LoadCpuIdX86(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pMsrs)
5165{
5166 AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
5167
5168 /*
5169 * Load the CPUID leaves array first and call worker to do the rest, just so
5170 * we can free the memory when we need to without ending up in column 1000.
5171 */
5172 PCPUMCPUIDLEAF paLeaves;
5173 uint32_t cLeaves;
5174 int rc = cpumR3LoadGuestCpuIdArray(pVM, pSSM, uVersion, &paLeaves, &cLeaves);
5175 AssertRC(rc);
5176 if (RT_SUCCESS(rc))
5177 {
5178 rc = cpumR3LoadCpuIdInner(pVM, pSSM, uVersion, paLeaves, cLeaves, pMsrs);
5179 RTMemFree(paLeaves);
5180 }
5181 return rc;
5182}
5183
5184
5185
5186/**
5187 * Loads the CPU ID leaves saved by pass 0 in an pre 3.2 saved state.
5188 *
5189 * @returns VBox status code.
5190 * @param pVM The cross context VM structure.
5191 * @param pSSM The saved state handle.
5192 * @param uVersion The format version.
5193 */
5194int cpumR3LoadCpuIdPre32(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
5195{
5196 AssertMsgReturn(uVersion < CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
5197
5198 /*
5199 * Restore the CPUID leaves.
5200 *
5201 * Note that we support restoring less than the current amount of standard
5202 * leaves because we've been allowed more is newer version of VBox.
5203 */
5204 uint32_t cElements;
5205 int rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
5206 if (cElements > RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmStd))
5207 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
5208 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmStd[0], cElements*sizeof(pVM->cpum.s.aGuestCpuIdPatmStd[0]));
5209
5210 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
5211 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmExt))
5212 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
5213 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmExt[0], sizeof(pVM->cpum.s.aGuestCpuIdPatmExt));
5214
5215 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
5216 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmCentaur))
5217 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
5218 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdPatmCentaur));
5219
5220 SSMR3GetMem(pSSM, &pVM->cpum.s.GuestInfo.DefCpuId, sizeof(pVM->cpum.s.GuestInfo.DefCpuId));
5221
5222 /*
5223 * Check that the basic cpuid id information is unchanged.
5224 */
5225 /** @todo we should check the 64 bits capabilities too! */
5226 uint32_t au32CpuId[8] = {0,0,0,0, 0,0,0,0};
5227#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5228 ASMCpuIdExSlow(0, 0, 0, 0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
5229 ASMCpuIdExSlow(1, 0, 0, 0, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
5230#endif
5231 uint32_t au32CpuIdSaved[8];
5232 rc = SSMR3GetMem(pSSM, &au32CpuIdSaved[0], sizeof(au32CpuIdSaved));
5233 if (RT_SUCCESS(rc))
5234 {
5235 /* Ignore CPU stepping. */
5236 au32CpuId[4] &= 0xfffffff0;
5237 au32CpuIdSaved[4] &= 0xfffffff0;
5238
5239 /* Ignore APIC ID (AMD specs). */
5240 au32CpuId[5] &= ~0xff000000;
5241 au32CpuIdSaved[5] &= ~0xff000000;
5242
5243 /* Ignore the number of Logical CPUs (AMD specs). */
5244 au32CpuId[5] &= ~0x00ff0000;
5245 au32CpuIdSaved[5] &= ~0x00ff0000;
5246
5247 /* Ignore some advanced capability bits, that we don't expose to the guest. */
5248 au32CpuId[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
5249 | X86_CPUID_FEATURE_ECX_VMX
5250 | X86_CPUID_FEATURE_ECX_SMX
5251 | X86_CPUID_FEATURE_ECX_EST
5252 | X86_CPUID_FEATURE_ECX_TM2
5253 | X86_CPUID_FEATURE_ECX_CNTXID
5254 | X86_CPUID_FEATURE_ECX_TPRUPDATE
5255 | X86_CPUID_FEATURE_ECX_PDCM
5256 | X86_CPUID_FEATURE_ECX_DCA
5257 | X86_CPUID_FEATURE_ECX_X2APIC
5258 );
5259 au32CpuIdSaved[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
5260 | X86_CPUID_FEATURE_ECX_VMX
5261 | X86_CPUID_FEATURE_ECX_SMX
5262 | X86_CPUID_FEATURE_ECX_EST
5263 | X86_CPUID_FEATURE_ECX_TM2
5264 | X86_CPUID_FEATURE_ECX_CNTXID
5265 | X86_CPUID_FEATURE_ECX_TPRUPDATE
5266 | X86_CPUID_FEATURE_ECX_PDCM
5267 | X86_CPUID_FEATURE_ECX_DCA
5268 | X86_CPUID_FEATURE_ECX_X2APIC
5269 );
5270
5271 /* Make sure we don't forget to update the masks when enabling
5272 * features in the future.
5273 */
5274 AssertRelease(!(pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx &
5275 ( X86_CPUID_FEATURE_ECX_DTES64
5276 | X86_CPUID_FEATURE_ECX_VMX
5277 | X86_CPUID_FEATURE_ECX_SMX
5278 | X86_CPUID_FEATURE_ECX_EST
5279 | X86_CPUID_FEATURE_ECX_TM2
5280 | X86_CPUID_FEATURE_ECX_CNTXID
5281 | X86_CPUID_FEATURE_ECX_TPRUPDATE
5282 | X86_CPUID_FEATURE_ECX_PDCM
5283 | X86_CPUID_FEATURE_ECX_DCA
5284 | X86_CPUID_FEATURE_ECX_X2APIC
5285 )));
5286 /* do the compare */
5287 if (memcmp(au32CpuIdSaved, au32CpuId, sizeof(au32CpuIdSaved)))
5288 {
5289 if (SSMR3HandleGetAfter(pSSM) == SSMAFTER_DEBUG_IT)
5290 LogRel(("cpumR3LoadExec: CpuId mismatch! (ignored due to SSMAFTER_DEBUG_IT)\n"
5291 "Saved=%.*Rhxs\n"
5292 "Real =%.*Rhxs\n",
5293 sizeof(au32CpuIdSaved), au32CpuIdSaved,
5294 sizeof(au32CpuId), au32CpuId));
5295 else
5296 {
5297 LogRel(("cpumR3LoadExec: CpuId mismatch!\n"
5298 "Saved=%.*Rhxs\n"
5299 "Real =%.*Rhxs\n",
5300 sizeof(au32CpuIdSaved), au32CpuIdSaved,
5301 sizeof(au32CpuId), au32CpuId));
5302 rc = VERR_SSM_LOAD_CPUID_MISMATCH;
5303 }
5304 }
5305 }
5306
5307 return rc;
5308}
5309
5310
5311
5312/*
5313 *
5314 *
5315 * CPUID Info Handler.
5316 * CPUID Info Handler.
5317 * CPUID Info Handler.
5318 *
5319 *
5320 */
5321
5322
5323
5324/**
5325 * Get L1 cache / TLS associativity.
5326 */
5327static const char *getCacheAss(unsigned u, char *pszBuf)
5328{
5329 if (u == 0)
5330 return "res0 ";
5331 if (u == 1)
5332 return "direct";
5333 if (u == 255)
5334 return "fully";
5335 if (u >= 256)
5336 return "???";
5337
5338 RTStrPrintf(pszBuf, 16, "%d way", u);
5339 return pszBuf;
5340}
5341
5342
5343/**
5344 * Get L2/L3 cache associativity.
5345 */
5346static const char *getL23CacheAss(unsigned u)
5347{
5348 switch (u)
5349 {
5350 case 0: return "off ";
5351 case 1: return "direct";
5352 case 2: return "2 way ";
5353 case 3: return "3 way ";
5354 case 4: return "4 way ";
5355 case 5: return "6 way ";
5356 case 6: return "8 way ";
5357 case 7: return "res7 ";
5358 case 8: return "16 way";
5359 case 9: return "tpoext"; /* Overridden by Fn8000_001D */
5360 case 10: return "32 way";
5361 case 11: return "48 way";
5362 case 12: return "64 way";
5363 case 13: return "96 way";
5364 case 14: return "128way";
5365 case 15: return "fully ";
5366 default: return "????";
5367 }
5368}
5369
5370
5371/** CPUID(1).EDX field descriptions. */
5372static DBGFREGSUBFIELD const g_aLeaf1EdxSubFields[] =
5373{
5374 DBGFREGSUBFIELD_RO("FPU\0" "x87 FPU on Chip", 0, 1, 0),
5375 DBGFREGSUBFIELD_RO("VME\0" "Virtual 8086 Mode Enhancements", 1, 1, 0),
5376 DBGFREGSUBFIELD_RO("DE\0" "Debugging extensions", 2, 1, 0),
5377 DBGFREGSUBFIELD_RO("PSE\0" "Page Size Extension", 3, 1, 0),
5378 DBGFREGSUBFIELD_RO("TSC\0" "Time Stamp Counter", 4, 1, 0),
5379 DBGFREGSUBFIELD_RO("MSR\0" "Model Specific Registers", 5, 1, 0),
5380 DBGFREGSUBFIELD_RO("PAE\0" "Physical Address Extension", 6, 1, 0),
5381 DBGFREGSUBFIELD_RO("MCE\0" "Machine Check Exception", 7, 1, 0),
5382 DBGFREGSUBFIELD_RO("CX8\0" "CMPXCHG8B instruction", 8, 1, 0),
5383 DBGFREGSUBFIELD_RO("APIC\0" "APIC On-Chip", 9, 1, 0),
5384 DBGFREGSUBFIELD_RO("SEP\0" "SYSENTER and SYSEXIT Present", 11, 1, 0),
5385 DBGFREGSUBFIELD_RO("MTRR\0" "Memory Type Range Registers", 12, 1, 0),
5386 DBGFREGSUBFIELD_RO("PGE\0" "PTE Global Bit", 13, 1, 0),
5387 DBGFREGSUBFIELD_RO("MCA\0" "Machine Check Architecture", 14, 1, 0),
5388 DBGFREGSUBFIELD_RO("CMOV\0" "Conditional Move instructions", 15, 1, 0),
5389 DBGFREGSUBFIELD_RO("PAT\0" "Page Attribute Table", 16, 1, 0),
5390 DBGFREGSUBFIELD_RO("PSE-36\0" "36-bit Page Size Extension", 17, 1, 0),
5391 DBGFREGSUBFIELD_RO("PSN\0" "Processor Serial Number", 18, 1, 0),
5392 DBGFREGSUBFIELD_RO("CLFSH\0" "CLFLUSH instruction", 19, 1, 0),
5393 DBGFREGSUBFIELD_RO("DS\0" "Debug Store", 21, 1, 0),
5394 DBGFREGSUBFIELD_RO("ACPI\0" "Thermal Mon. & Soft. Clock Ctrl.", 22, 1, 0),
5395 DBGFREGSUBFIELD_RO("MMX\0" "Intel MMX Technology", 23, 1, 0),
5396 DBGFREGSUBFIELD_RO("FXSR\0" "FXSAVE and FXRSTOR instructions", 24, 1, 0),
5397 DBGFREGSUBFIELD_RO("SSE\0" "SSE support", 25, 1, 0),
5398 DBGFREGSUBFIELD_RO("SSE2\0" "SSE2 support", 26, 1, 0),
5399 DBGFREGSUBFIELD_RO("SS\0" "Self Snoop", 27, 1, 0),
5400 DBGFREGSUBFIELD_RO("HTT\0" "Hyper-Threading Technology", 28, 1, 0),
5401 DBGFREGSUBFIELD_RO("TM\0" "Therm. Monitor", 29, 1, 0),
5402 DBGFREGSUBFIELD_RO("PBE\0" "Pending Break Enabled", 31, 1, 0),
5403 DBGFREGSUBFIELD_TERMINATOR()
5404};
5405
5406/** CPUID(1).ECX field descriptions. */
5407static DBGFREGSUBFIELD const g_aLeaf1EcxSubFields[] =
5408{
5409 DBGFREGSUBFIELD_RO("SSE3\0" "SSE3 support", 0, 1, 0),
5410 DBGFREGSUBFIELD_RO("PCLMUL\0" "PCLMULQDQ support (for AES-GCM)", 1, 1, 0),
5411 DBGFREGSUBFIELD_RO("DTES64\0" "DS Area 64-bit Layout", 2, 1, 0),
5412 DBGFREGSUBFIELD_RO("MONITOR\0" "MONITOR/MWAIT instructions", 3, 1, 0),
5413 DBGFREGSUBFIELD_RO("CPL-DS\0" "CPL Qualified Debug Store", 4, 1, 0),
5414 DBGFREGSUBFIELD_RO("VMX\0" "Virtual Machine Extensions", 5, 1, 0),
5415 DBGFREGSUBFIELD_RO("SMX\0" "Safer Mode Extensions", 6, 1, 0),
5416 DBGFREGSUBFIELD_RO("EST\0" "Enhanced SpeedStep Technology", 7, 1, 0),
5417 DBGFREGSUBFIELD_RO("TM2\0" "Terminal Monitor 2", 8, 1, 0),
5418 DBGFREGSUBFIELD_RO("SSSE3\0" "Supplemental Streaming SIMD Extensions 3", 9, 1, 0),
5419 DBGFREGSUBFIELD_RO("CNTX-ID\0" "L1 Context ID", 10, 1, 0),
5420 DBGFREGSUBFIELD_RO("SDBG\0" "Silicon Debug interface", 11, 1, 0),
5421 DBGFREGSUBFIELD_RO("FMA\0" "Fused Multiply Add extensions", 12, 1, 0),
5422 DBGFREGSUBFIELD_RO("CX16\0" "CMPXCHG16B instruction", 13, 1, 0),
5423 DBGFREGSUBFIELD_RO("TPRUPDATE\0" "xTPR Update Control", 14, 1, 0),
5424 DBGFREGSUBFIELD_RO("PDCM\0" "Perf/Debug Capability MSR", 15, 1, 0),
5425 DBGFREGSUBFIELD_RO("PCID\0" "Process Context Identifiers", 17, 1, 0),
5426 DBGFREGSUBFIELD_RO("DCA\0" "Direct Cache Access", 18, 1, 0),
5427 DBGFREGSUBFIELD_RO("SSE4_1\0" "SSE4_1 support", 19, 1, 0),
5428 DBGFREGSUBFIELD_RO("SSE4_2\0" "SSE4_2 support", 20, 1, 0),
5429 DBGFREGSUBFIELD_RO("X2APIC\0" "x2APIC support", 21, 1, 0),
5430 DBGFREGSUBFIELD_RO("MOVBE\0" "MOVBE instruction", 22, 1, 0),
5431 DBGFREGSUBFIELD_RO("POPCNT\0" "POPCNT instruction", 23, 1, 0),
5432 DBGFREGSUBFIELD_RO("TSCDEADL\0" "Time Stamp Counter Deadline", 24, 1, 0),
5433 DBGFREGSUBFIELD_RO("AES\0" "AES instructions", 25, 1, 0),
5434 DBGFREGSUBFIELD_RO("XSAVE\0" "XSAVE instruction", 26, 1, 0),
5435 DBGFREGSUBFIELD_RO("OSXSAVE\0" "OSXSAVE instruction", 27, 1, 0),
5436 DBGFREGSUBFIELD_RO("AVX\0" "AVX support", 28, 1, 0),
5437 DBGFREGSUBFIELD_RO("F16C\0" "16-bit floating point conversion instructions", 29, 1, 0),
5438 DBGFREGSUBFIELD_RO("RDRAND\0" "RDRAND instruction", 30, 1, 0),
5439 DBGFREGSUBFIELD_RO("HVP\0" "Hypervisor Present (we're a guest)", 31, 1, 0),
5440 DBGFREGSUBFIELD_TERMINATOR()
5441};
5442
5443/** CPUID(7,0).EBX field descriptions. */
5444static DBGFREGSUBFIELD const g_aLeaf7Sub0EbxSubFields[] =
5445{
5446 DBGFREGSUBFIELD_RO("FSGSBASE\0" "RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE instr.", 0, 1, 0),
5447 DBGFREGSUBFIELD_RO("TSCADJUST\0" "Supports MSR_IA32_TSC_ADJUST", 1, 1, 0),
5448 DBGFREGSUBFIELD_RO("SGX\0" "Supports Software Guard Extensions", 2, 1, 0),
5449 DBGFREGSUBFIELD_RO("BMI1\0" "Advanced Bit Manipulation extension 1", 3, 1, 0),
5450 DBGFREGSUBFIELD_RO("HLE\0" "Hardware Lock Elision", 4, 1, 0),
5451 DBGFREGSUBFIELD_RO("AVX2\0" "Advanced Vector Extensions 2", 5, 1, 0),
5452 DBGFREGSUBFIELD_RO("FDP_EXCPTN_ONLY\0" "FPU DP only updated on exceptions", 6, 1, 0),
5453 DBGFREGSUBFIELD_RO("SMEP\0" "Supervisor Mode Execution Prevention", 7, 1, 0),
5454 DBGFREGSUBFIELD_RO("BMI2\0" "Advanced Bit Manipulation extension 2", 8, 1, 0),
5455 DBGFREGSUBFIELD_RO("ERMS\0" "Enhanced REP MOVSB/STOSB instructions", 9, 1, 0),
5456 DBGFREGSUBFIELD_RO("INVPCID\0" "INVPCID instruction", 10, 1, 0),
5457 DBGFREGSUBFIELD_RO("RTM\0" "Restricted Transactional Memory", 11, 1, 0),
5458 DBGFREGSUBFIELD_RO("PQM\0" "Platform Quality of Service Monitoring", 12, 1, 0),
5459 DBGFREGSUBFIELD_RO("DEPFPU_CS_DS\0" "Deprecates FPU CS, FPU DS values if set", 13, 1, 0),
5460 DBGFREGSUBFIELD_RO("MPE\0" "Intel Memory Protection Extensions", 14, 1, 0),
5461 DBGFREGSUBFIELD_RO("PQE\0" "Platform Quality of Service Enforcement", 15, 1, 0),
5462 DBGFREGSUBFIELD_RO("AVX512F\0" "AVX512 Foundation instructions", 16, 1, 0),
5463 DBGFREGSUBFIELD_RO("RDSEED\0" "RDSEED instruction", 18, 1, 0),
5464 DBGFREGSUBFIELD_RO("ADX\0" "ADCX/ADOX instructions", 19, 1, 0),
5465 DBGFREGSUBFIELD_RO("SMAP\0" "Supervisor Mode Access Prevention", 20, 1, 0),
5466 DBGFREGSUBFIELD_RO("CLFLUSHOPT\0" "CLFLUSHOPT (Cache Line Flush) instruction", 23, 1, 0),
5467 DBGFREGSUBFIELD_RO("CLWB\0" "CLWB instruction", 24, 1, 0),
5468 DBGFREGSUBFIELD_RO("INTEL_PT\0" "Intel Processor Trace", 25, 1, 0),
5469 DBGFREGSUBFIELD_RO("AVX512PF\0" "AVX512 Prefetch instructions", 26, 1, 0),
5470 DBGFREGSUBFIELD_RO("AVX512ER\0" "AVX512 Exponential & Reciprocal instructions", 27, 1, 0),
5471 DBGFREGSUBFIELD_RO("AVX512CD\0" "AVX512 Conflict Detection instructions", 28, 1, 0),
5472 DBGFREGSUBFIELD_RO("SHA\0" "Secure Hash Algorithm extensions", 29, 1, 0),
5473 DBGFREGSUBFIELD_TERMINATOR()
5474};
5475
5476/** CPUID(7,0).ECX field descriptions. */
5477static DBGFREGSUBFIELD const g_aLeaf7Sub0EcxSubFields[] =
5478{
5479 DBGFREGSUBFIELD_RO("PREFETCHWT1\0" "PREFETCHWT1 instruction", 0, 1, 0),
5480 DBGFREGSUBFIELD_RO("UMIP\0" "User mode insturction prevention", 2, 1, 0),
5481 DBGFREGSUBFIELD_RO("PKU\0" "Protection Key for Usermode pages", 3, 1, 0),
5482 DBGFREGSUBFIELD_RO("OSPKE\0" "CR4.PKU mirror", 4, 1, 0),
5483 DBGFREGSUBFIELD_RO("MAWAU\0" "Value used by BNDLDX & BNDSTX", 17, 5, 0),
5484 DBGFREGSUBFIELD_RO("RDPID\0" "Read processor ID support", 22, 1, 0),
5485 DBGFREGSUBFIELD_RO("SGX_LC\0" "Supports SGX Launch Configuration", 30, 1, 0),
5486 DBGFREGSUBFIELD_TERMINATOR()
5487};
5488
5489/** CPUID(7,0).EDX field descriptions. */
5490static DBGFREGSUBFIELD const g_aLeaf7Sub0EdxSubFields[] =
5491{
5492 DBGFREGSUBFIELD_RO("MCU_OPT_CTRL\0" "Supports IA32_MCU_OPT_CTRL ", 9, 1, 0),
5493 DBGFREGSUBFIELD_RO("MD_CLEAR\0" "Supports MDS related buffer clearing", 10, 1, 0),
5494 DBGFREGSUBFIELD_RO("TSX_FORCE_ABORT\0" "Supports IA32_TSX_FORCE_ABORT", 11, 1, 0),
5495 DBGFREGSUBFIELD_RO("CET_IBT\0" "Supports indirect branch tracking w/ CET", 20, 1, 0),
5496 DBGFREGSUBFIELD_RO("IBRS_IBPB\0" "IA32_SPEC_CTRL.IBRS and IA32_PRED_CMD.IBPB", 26, 1, 0),
5497 DBGFREGSUBFIELD_RO("STIBP\0" "Supports IA32_SPEC_CTRL.STIBP", 27, 1, 0),
5498 DBGFREGSUBFIELD_RO("FLUSH_CMD\0" "Supports IA32_FLUSH_CMD", 28, 1, 0),
5499 DBGFREGSUBFIELD_RO("ARCHCAP\0" "Supports IA32_ARCH_CAP", 29, 1, 0),
5500 DBGFREGSUBFIELD_RO("CORECAP\0" "Supports IA32_CORE_CAP", 30, 1, 0),
5501 DBGFREGSUBFIELD_RO("SSBD\0" "Supports IA32_SPEC_CTRL.SSBD", 31, 1, 0),
5502 DBGFREGSUBFIELD_TERMINATOR()
5503};
5504
5505
5506/** CPUID(7,2).EBX field descriptions. */
5507static DBGFREGSUBFIELD const g_aLeaf7Sub2EbxSubFields[] =
5508{
5509 DBGFREGSUBFIELD_TERMINATOR()
5510};
5511
5512/** CPUID(7,2).ECX field descriptions. */
5513static DBGFREGSUBFIELD const g_aLeaf7Sub2EcxSubFields[] =
5514{
5515 DBGFREGSUBFIELD_TERMINATOR()
5516};
5517
5518/** CPUID(7,2).EDX field descriptions. */
5519static DBGFREGSUBFIELD const g_aLeaf7Sub2EdxSubFields[] =
5520{
5521 DBGFREGSUBFIELD_RO("PSFD\0" "Supports IA32_SPEC_CTRL[7] (PSFD)", 0, 1, 0),
5522 DBGFREGSUBFIELD_RO("IPRED_CTRL\0" "Supports IA32_SPEC_CTRL[4:3] (IPRED_DIS)", 1, 1, 0),
5523 DBGFREGSUBFIELD_RO("RRSBA_CTRL\0" "Supports IA32_SPEC_CTRL[6:5] (RRSBA_DIS)", 2, 1, 0),
5524 DBGFREGSUBFIELD_RO("DDPD_U\0" "Supports IA32_SPEC_CTRL[8] (DDPD_U)", 3, 1, 0),
5525 DBGFREGSUBFIELD_RO("BHI_CTRL\0" "Supports IA32_SPEC_CTRL[10] (BHI_DIS_S) ", 4, 1, 0),
5526 DBGFREGSUBFIELD_RO("MCDT_NO\0" "No MXCSR Config Dependent Timing issues", 5, 1, 0),
5527 DBGFREGSUBFIELD_RO("UC_LOCK_DIS\0" "Supports UC-lock disable and causing #AC", 6, 1, 0),
5528 DBGFREGSUBFIELD_RO("MONITOR_MITG_NO\0" "No MONITOR/UMONITOR power issues", 7, 1, 0),
5529 DBGFREGSUBFIELD_TERMINATOR()
5530};
5531
5532
5533/** CPUID(13,0).EAX+EDX, XCR0, ++ bit descriptions. */
5534static DBGFREGSUBFIELD const g_aXSaveStateBits[] =
5535{
5536 DBGFREGSUBFIELD_RO("x87\0" "Legacy FPU state", 0, 1, 0),
5537 DBGFREGSUBFIELD_RO("SSE\0" "128-bit SSE state", 1, 1, 0),
5538 DBGFREGSUBFIELD_RO("YMM_Hi128\0" "Upper 128 bits of YMM0-15 (AVX)", 2, 1, 0),
5539 DBGFREGSUBFIELD_RO("BNDREGS\0" "MPX bound register state", 3, 1, 0),
5540 DBGFREGSUBFIELD_RO("BNDCSR\0" "MPX bound config and status state", 4, 1, 0),
5541 DBGFREGSUBFIELD_RO("Opmask\0" "opmask state", 5, 1, 0),
5542 DBGFREGSUBFIELD_RO("ZMM_Hi256\0" "Upper 256 bits of ZMM0-15 (AVX-512)", 6, 1, 0),
5543 DBGFREGSUBFIELD_RO("Hi16_ZMM\0" "512-bits ZMM16-31 state (AVX-512)", 7, 1, 0),
5544 DBGFREGSUBFIELD_RO("LWP\0" "Lightweight Profiling (AMD)", 62, 1, 0),
5545 DBGFREGSUBFIELD_TERMINATOR()
5546};
5547
5548/** CPUID(13,1).EAX field descriptions. */
5549static DBGFREGSUBFIELD const g_aLeaf13Sub1EaxSubFields[] =
5550{
5551 DBGFREGSUBFIELD_RO("XSAVEOPT\0" "XSAVEOPT is available", 0, 1, 0),
5552 DBGFREGSUBFIELD_RO("XSAVEC\0" "XSAVEC and compacted XRSTOR supported", 1, 1, 0),
5553 DBGFREGSUBFIELD_RO("XGETBC1\0" "XGETBV with ECX=1 supported", 2, 1, 0),
5554 DBGFREGSUBFIELD_RO("XSAVES\0" "XSAVES/XRSTORS and IA32_XSS supported", 3, 1, 0),
5555 DBGFREGSUBFIELD_TERMINATOR()
5556};
5557
5558
5559/** CPUID(0x80000001,0).EDX field descriptions. */
5560static DBGFREGSUBFIELD const g_aExtLeaf1EdxSubFields[] =
5561{
5562 DBGFREGSUBFIELD_RO("FPU\0" "x87 FPU on Chip", 0, 1, 0),
5563 DBGFREGSUBFIELD_RO("VME\0" "Virtual 8086 Mode Enhancements", 1, 1, 0),
5564 DBGFREGSUBFIELD_RO("DE\0" "Debugging extensions", 2, 1, 0),
5565 DBGFREGSUBFIELD_RO("PSE\0" "Page Size Extension", 3, 1, 0),
5566 DBGFREGSUBFIELD_RO("TSC\0" "Time Stamp Counter", 4, 1, 0),
5567 DBGFREGSUBFIELD_RO("MSR\0" "K86 Model Specific Registers", 5, 1, 0),
5568 DBGFREGSUBFIELD_RO("PAE\0" "Physical Address Extension", 6, 1, 0),
5569 DBGFREGSUBFIELD_RO("MCE\0" "Machine Check Exception", 7, 1, 0),
5570 DBGFREGSUBFIELD_RO("CX8\0" "CMPXCHG8B instruction", 8, 1, 0),
5571 DBGFREGSUBFIELD_RO("APIC\0" "APIC On-Chip", 9, 1, 0),
5572 DBGFREGSUBFIELD_RO("SEP\0" "SYSCALL/SYSRET", 11, 1, 0),
5573 DBGFREGSUBFIELD_RO("MTRR\0" "Memory Type Range Registers", 12, 1, 0),
5574 DBGFREGSUBFIELD_RO("PGE\0" "PTE Global Bit", 13, 1, 0),
5575 DBGFREGSUBFIELD_RO("MCA\0" "Machine Check Architecture", 14, 1, 0),
5576 DBGFREGSUBFIELD_RO("CMOV\0" "Conditional Move instructions", 15, 1, 0),
5577 DBGFREGSUBFIELD_RO("PAT\0" "Page Attribute Table", 16, 1, 0),
5578 DBGFREGSUBFIELD_RO("PSE-36\0" "36-bit Page Size Extension", 17, 1, 0),
5579 DBGFREGSUBFIELD_RO("NX\0" "No-Execute/Execute-Disable", 20, 1, 0),
5580 DBGFREGSUBFIELD_RO("AXMMX\0" "AMD Extensions to MMX instructions", 22, 1, 0),
5581 DBGFREGSUBFIELD_RO("MMX\0" "Intel MMX Technology", 23, 1, 0),
5582 DBGFREGSUBFIELD_RO("FXSR\0" "FXSAVE and FXRSTOR Instructions", 24, 1, 0),
5583 DBGFREGSUBFIELD_RO("FFXSR\0" "AMD fast FXSAVE and FXRSTOR instructions", 25, 1, 0),
5584 DBGFREGSUBFIELD_RO("Page1GB\0" "1 GB large page", 26, 1, 0),
5585 DBGFREGSUBFIELD_RO("RDTSCP\0" "RDTSCP instruction", 27, 1, 0),
5586 DBGFREGSUBFIELD_RO("LM\0" "AMD64 Long Mode", 29, 1, 0),
5587 DBGFREGSUBFIELD_RO("3DNOWEXT\0" "AMD Extensions to 3DNow", 30, 1, 0),
5588 DBGFREGSUBFIELD_RO("3DNOW\0" "AMD 3DNow", 31, 1, 0),
5589 DBGFREGSUBFIELD_TERMINATOR()
5590};
5591
5592/** CPUID(0x80000001,0).ECX field descriptions. */
5593static DBGFREGSUBFIELD const g_aExtLeaf1EcxSubFields[] =
5594{
5595 DBGFREGSUBFIELD_RO("LahfSahf\0" "LAHF/SAHF support in 64-bit mode", 0, 1, 0),
5596 DBGFREGSUBFIELD_RO("CmpLegacy\0" "Core multi-processing legacy mode", 1, 1, 0),
5597 DBGFREGSUBFIELD_RO("SVM\0" "AMD Secure Virtual Machine extensions", 2, 1, 0),
5598 DBGFREGSUBFIELD_RO("EXTAPIC\0" "AMD Extended APIC registers", 3, 1, 0),
5599 DBGFREGSUBFIELD_RO("CR8L\0" "AMD LOCK MOV CR0 means MOV CR8", 4, 1, 0),
5600 DBGFREGSUBFIELD_RO("ABM\0" "AMD Advanced Bit Manipulation", 5, 1, 0),
5601 DBGFREGSUBFIELD_RO("SSE4A\0" "SSE4A instructions", 6, 1, 0),
5602 DBGFREGSUBFIELD_RO("MISALIGNSSE\0" "AMD Misaligned SSE mode", 7, 1, 0),
5603 DBGFREGSUBFIELD_RO("3DNOWPRF\0" "AMD PREFETCH and PREFETCHW instructions", 8, 1, 0),
5604 DBGFREGSUBFIELD_RO("OSVW\0" "AMD OS Visible Workaround", 9, 1, 0),
5605 DBGFREGSUBFIELD_RO("IBS\0" "Instruct Based Sampling", 10, 1, 0),
5606 DBGFREGSUBFIELD_RO("XOP\0" "Extended Operation support", 11, 1, 0),
5607 DBGFREGSUBFIELD_RO("SKINIT\0" "SKINIT, STGI, and DEV support", 12, 1, 0),
5608 DBGFREGSUBFIELD_RO("WDT\0" "AMD Watchdog Timer support", 13, 1, 0),
5609 DBGFREGSUBFIELD_RO("LWP\0" "Lightweight Profiling support", 15, 1, 0),
5610 DBGFREGSUBFIELD_RO("FMA4\0" "Four operand FMA instruction support", 16, 1, 0),
5611 DBGFREGSUBFIELD_RO("TCE\0" "Translation Cache Extension support", 17, 1, 0),
5612 DBGFREGSUBFIELD_RO("NodeId\0" "NodeId in MSR C001_100C", 19, 1, 0),
5613 DBGFREGSUBFIELD_RO("TBM\0" "Trailing Bit Manipulation instructions", 21, 1, 0),
5614 DBGFREGSUBFIELD_RO("TOPOEXT\0" "Topology Extensions", 22, 1, 0),
5615 DBGFREGSUBFIELD_RO("PRFEXTCORE\0" "Performance Counter Extensions support", 23, 1, 0),
5616 DBGFREGSUBFIELD_RO("PRFEXTNB\0" "NB Performance Counter Extensions support", 24, 1, 0),
5617 DBGFREGSUBFIELD_RO("DATABPEXT\0" "Data-access Breakpoint Extension", 26, 1, 0),
5618 DBGFREGSUBFIELD_RO("PERFTSC\0" "Performance Time Stamp Counter", 27, 1, 0),
5619 DBGFREGSUBFIELD_RO("PCX_L2I\0" "L2I/L3 Performance Counter Extensions", 28, 1, 0),
5620 DBGFREGSUBFIELD_RO("MONITORX\0" "MWAITX and MONITORX instructions", 29, 1, 0),
5621 DBGFREGSUBFIELD_RO("AddrMaskExt\0" "BP Addressing masking extended to bit 31", 30, 1, 0),
5622 DBGFREGSUBFIELD_TERMINATOR()
5623};
5624
5625/** CPUID(0x8000000a,0).EDX field descriptions. */
5626static DBGFREGSUBFIELD const g_aExtLeafAEdxSubFields[] =
5627{
5628 DBGFREGSUBFIELD_RO("NP\0" "Nested Paging", 0, 1, 0),
5629 DBGFREGSUBFIELD_RO("LbrVirt\0" "Last Branch Record Virtualization", 1, 1, 0),
5630 DBGFREGSUBFIELD_RO("SVML\0" "SVM Lock", 2, 1, 0),
5631 DBGFREGSUBFIELD_RO("NRIPS\0" "NextRIP Save", 3, 1, 0),
5632 DBGFREGSUBFIELD_RO("TscRateMsr\0" "MSR based TSC rate control", 4, 1, 0),
5633 DBGFREGSUBFIELD_RO("VmcbClean\0" "VMCB clean bits", 5, 1, 0),
5634 DBGFREGSUBFIELD_RO("FlushByASID\0" "Flush by ASID", 6, 1, 0),
5635 DBGFREGSUBFIELD_RO("DecodeAssists\0" "Decode Assists", 7, 1, 0),
5636 DBGFREGSUBFIELD_RO("PauseFilter\0" "Pause intercept filter", 10, 1, 0),
5637 DBGFREGSUBFIELD_RO("PauseFilterThreshold\0" "Pause filter threshold", 12, 1, 0),
5638 DBGFREGSUBFIELD_RO("AVIC\0" "Advanced Virtual Interrupt Controller", 13, 1, 0),
5639 DBGFREGSUBFIELD_RO("VMSAVEVirt\0" "VMSAVE and VMLOAD Virtualization", 15, 1, 0),
5640 DBGFREGSUBFIELD_RO("VGIF\0" "Virtual Global-Interrupt Flag", 16, 1, 0),
5641 DBGFREGSUBFIELD_RO("GMET\0" "Guest Mode Execute Trap Extension", 17, 1, 0),
5642 DBGFREGSUBFIELD_RO("x2AVIC\0" "AVIC support for x2APIC mode", 18, 1, 0),
5643 DBGFREGSUBFIELD_RO("SSSCheck\0" "SVM supervisor shadow stack restrictions", 19, 1, 0),
5644 DBGFREGSUBFIELD_RO("SpecCtrl\0" "SPEC_CTRL virtualization", 20, 1, 0),
5645 DBGFREGSUBFIELD_RO("ROGPT\0" "Read-Only Guest Page Table feature support", 21, 1, 0),
5646 DBGFREGSUBFIELD_RO("HOST_MCE_OVERRIDE\0" "Guest #MC can be intercepted", 23, 1, 0),
5647 DBGFREGSUBFIELD_RO("TlbiCtl\0" "INVLPGB/TLBSYNC enable and intercept", 24, 1, 0),
5648 DBGFREGSUBFIELD_RO("VNMI\0" "NMI Virtualization", 25, 1, 0),
5649 DBGFREGSUBFIELD_RO("IbsVirt\0" "IBS Virtualization", 26, 1, 0),
5650 DBGFREGSUBFIELD_RO("ExtLvtAvicAccessChg\0" "Extended LVT AVIC access changes", 27, 1, 0),
5651 DBGFREGSUBFIELD_RO("NestedVirtVmcbAddrChk\0""Guest VMCB address check", 28, 1, 0),
5652 DBGFREGSUBFIELD_RO("BusLockThreshold\0" "Bus Lock Threshold", 29, 1, 0),
5653 DBGFREGSUBFIELD_TERMINATOR()
5654};
5655
5656
5657/** CPUID(0x80000007,0).EDX field descriptions. */
5658static DBGFREGSUBFIELD const g_aExtLeaf7EdxSubFields[] =
5659{
5660 DBGFREGSUBFIELD_RO("TS\0" "Temperature Sensor", 0, 1, 0),
5661 DBGFREGSUBFIELD_RO("FID\0" "Frequency ID control", 1, 1, 0),
5662 DBGFREGSUBFIELD_RO("VID\0" "Voltage ID control", 2, 1, 0),
5663 DBGFREGSUBFIELD_RO("TTP\0" "Thermal Trip", 3, 1, 0),
5664 DBGFREGSUBFIELD_RO("TM\0" "Hardware Thermal Control (HTC)", 4, 1, 0),
5665 DBGFREGSUBFIELD_RO("100MHzSteps\0" "100 MHz Multiplier control", 6, 1, 0),
5666 DBGFREGSUBFIELD_RO("HwPstate\0" "Hardware P-state control", 7, 1, 0),
5667 DBGFREGSUBFIELD_RO("TscInvariant\0" "Invariant Time Stamp Counter", 8, 1, 0),
5668 DBGFREGSUBFIELD_RO("CPB\0" "Core Performance Boost", 9, 1, 0),
5669 DBGFREGSUBFIELD_RO("EffFreqRO\0" "Read-only Effective Frequency Interface", 10, 1, 0),
5670 DBGFREGSUBFIELD_RO("ProcFdbkIf\0" "Processor Feedback Interface", 11, 1, 0),
5671 DBGFREGSUBFIELD_RO("ProcPwrRep\0" "Core power reporting interface support", 12, 1, 0),
5672 DBGFREGSUBFIELD_RO("ConnectedStandby\0" "Connected Standby", 13, 1, 0),
5673 DBGFREGSUBFIELD_RO("RAPL\0" "Running average power limit", 14, 1, 0),
5674 DBGFREGSUBFIELD_TERMINATOR()
5675};
5676
5677/** CPUID(0x80000008,0).EBX field descriptions. */
5678static DBGFREGSUBFIELD const g_aExtLeaf8EbxSubFields[] =
5679{
5680 DBGFREGSUBFIELD_RO("CLZERO\0" "Clear zero instruction (cacheline)", 0, 1, 0),
5681 DBGFREGSUBFIELD_RO("IRPerf\0" "Instructions retired count support", 1, 1, 0),
5682 DBGFREGSUBFIELD_RO("XSaveErPtr\0" "Save/restore error pointers (FXSAVE/RSTOR)", 2, 1, 0),
5683 DBGFREGSUBFIELD_RO("INVLPGB\0" "INVLPGB and TLBSYNC instructions", 3, 1, 0),
5684 DBGFREGSUBFIELD_RO("RDPRU\0" "RDPRU instruction", 4, 1, 0),
5685 DBGFREGSUBFIELD_RO("BE\0" "Bandwidth Enforcement extension", 6, 1, 0),
5686 DBGFREGSUBFIELD_RO("MCOMMIT\0" "MCOMMIT instruction", 8, 1, 0),
5687 DBGFREGSUBFIELD_RO("WBNOINVD\0" "WBNOINVD instruction", 9, 1, 0),
5688 DBGFREGSUBFIELD_RO("IBPB\0" "Supports the IBPB command in IA32_PRED_CMD", 12, 1, 0),
5689 DBGFREGSUBFIELD_RO("INT_WBINVD\0" "WBINVD/WBNOINVD interruptible", 13, 1, 0),
5690 DBGFREGSUBFIELD_RO("IBRS\0" "Indirect Branch Restricted Speculation", 14, 1, 0),
5691 DBGFREGSUBFIELD_RO("STIBP\0" "Single Thread Indirect Branch Prediction", 15, 1, 0),
5692 DBGFREGSUBFIELD_RO("IbrsAlwaysOn\0" "Processor prefers that IBRS be left on", 16, 1, 0),
5693 DBGFREGSUBFIELD_RO("StibpAlwaysOn\0""Processor prefers that STIBP be left on", 17, 1, 0),
5694 DBGFREGSUBFIELD_RO("IbrsPreferred\0""IBRS preferred over software solution", 18, 1, 0),
5695 DBGFREGSUBFIELD_RO("IbrsSameMode\0" "IBRS limits same mode speculation", 19, 1, 0),
5696 DBGFREGSUBFIELD_RO("EferLmsleUnsupported\0" "EFER.LMSLE is unsupported", 20, 1, 0),
5697 DBGFREGSUBFIELD_RO("INVLPGBnestedPages\0" "INVLPGB for nested translation", 21, 1, 0),
5698 DBGFREGSUBFIELD_RO("PPIN\0" "Protected processor inventory number", 23, 1, 0),
5699 DBGFREGSUBFIELD_RO("SSBD\0" "Speculative Store Bypass Disable", 24, 1, 0),
5700 DBGFREGSUBFIELD_RO("SsbdVirtSpecCtrl\0" "Use VIRT_SPEC_CTL for SSBD", 25, 1, 0),
5701 DBGFREGSUBFIELD_RO("SsbdNotRequired\0" "SSBD not needed on this processor", 26, 1, 0),
5702 DBGFREGSUBFIELD_RO("CPPC\0" "Collaborative Processor Performance Control", 27, 1, 0),
5703 DBGFREGSUBFIELD_RO("PSFD\0" "Predictive Store Forward Disable", 28, 1, 0),
5704 DBGFREGSUBFIELD_RO("BTC_NO\0" "Unaffected by branch type confusion", 29, 1, 0),
5705 DBGFREGSUBFIELD_RO("IBPB_RET\0" "Clears RA predictor when PRED_CMD.IBPB set", 30, 1, 0),
5706 DBGFREGSUBFIELD_TERMINATOR()
5707};
5708
5709
5710static void cpumR3CpuIdInfoMnemonicListU32(PCDBGFINFOHLP pHlp, uint32_t uVal, PCDBGFREGSUBFIELD pDesc,
5711 const char *pszLeadIn, uint32_t cchWidth)
5712{
5713 if (pszLeadIn)
5714 pHlp->pfnPrintf(pHlp, "%*s", cchWidth, pszLeadIn);
5715
5716 for (uint32_t iBit = 0; iBit < 32; iBit++)
5717 if (RT_BIT_32(iBit) & uVal)
5718 {
5719 while ( pDesc->pszName != NULL
5720 && iBit >= (uint32_t)pDesc->iFirstBit + pDesc->cBits)
5721 pDesc++;
5722 if ( pDesc->pszName != NULL
5723 && iBit - (uint32_t)pDesc->iFirstBit < (uint32_t)pDesc->cBits)
5724 {
5725 if (pDesc->cBits == 1)
5726 pHlp->pfnPrintf(pHlp, " %s", pDesc->pszName);
5727 else
5728 {
5729 uint32_t uFieldValue = uVal >> pDesc->iFirstBit;
5730 if (pDesc->cBits < 32)
5731 uFieldValue &= RT_BIT_32(pDesc->cBits) - UINT32_C(1);
5732 pHlp->pfnPrintf(pHlp, pDesc->cBits < 4 ? " %s=%u" : " %s=%#x", pDesc->pszName, uFieldValue);
5733 iBit = pDesc->iFirstBit + pDesc->cBits - 1;
5734 }
5735 }
5736 else
5737 pHlp->pfnPrintf(pHlp, " %u", iBit);
5738 }
5739 if (pszLeadIn)
5740 pHlp->pfnPrintf(pHlp, "\n");
5741}
5742
5743
5744static void cpumR3CpuIdInfoMnemonicListU64(PCDBGFINFOHLP pHlp, uint64_t uVal, PCDBGFREGSUBFIELD pDesc,
5745 const char *pszLeadIn, uint32_t cchWidth)
5746{
5747 if (pszLeadIn)
5748 pHlp->pfnPrintf(pHlp, "%*s", cchWidth, pszLeadIn);
5749
5750 for (uint32_t iBit = 0; iBit < 64; iBit++)
5751 if (RT_BIT_64(iBit) & uVal)
5752 {
5753 while ( pDesc->pszName != NULL
5754 && iBit >= (uint32_t)pDesc->iFirstBit + pDesc->cBits)
5755 pDesc++;
5756 if ( pDesc->pszName != NULL
5757 && iBit - (uint32_t)pDesc->iFirstBit < (uint32_t)pDesc->cBits)
5758 {
5759 if (pDesc->cBits == 1)
5760 pHlp->pfnPrintf(pHlp, " %s", pDesc->pszName);
5761 else
5762 {
5763 uint64_t uFieldValue = uVal >> pDesc->iFirstBit;
5764 if (pDesc->cBits < 64)
5765 uFieldValue &= RT_BIT_64(pDesc->cBits) - UINT64_C(1);
5766 pHlp->pfnPrintf(pHlp, pDesc->cBits < 4 ? " %s=%llu" : " %s=%#llx", pDesc->pszName, uFieldValue);
5767 iBit = pDesc->iFirstBit + pDesc->cBits - 1;
5768 }
5769 }
5770 else
5771 pHlp->pfnPrintf(pHlp, " %u", iBit);
5772 }
5773 if (pszLeadIn)
5774 pHlp->pfnPrintf(pHlp, "\n");
5775}
5776
5777
5778static void cpumR3CpuIdInfoValueWithMnemonicListU64(PCDBGFINFOHLP pHlp, uint64_t uVal, PCDBGFREGSUBFIELD pDesc,
5779 const char *pszLeadIn, uint32_t cchWidth)
5780{
5781 if (!uVal)
5782 pHlp->pfnPrintf(pHlp, "%*s %#010x`%08x\n", cchWidth, pszLeadIn, RT_HI_U32(uVal), RT_LO_U32(uVal));
5783 else
5784 {
5785 pHlp->pfnPrintf(pHlp, "%*s %#010x`%08x (", cchWidth, pszLeadIn, RT_HI_U32(uVal), RT_LO_U32(uVal));
5786 cpumR3CpuIdInfoMnemonicListU64(pHlp, uVal, pDesc, NULL, 0);
5787 pHlp->pfnPrintf(pHlp, " )\n");
5788 }
5789}
5790
5791
5792static void cpumR3CpuIdInfoVerboseCompareListU32(PCDBGFINFOHLP pHlp, uint32_t uVal1, uint32_t uVal2, PCDBGFREGSUBFIELD pDesc,
5793 const char *pszLeadIn, uint32_t cchWidth)
5794{
5795 if (pszLeadIn)
5796 pHlp->pfnPrintf(pHlp,
5797 "%s\n"
5798 " %-*s= guest (host)\n",
5799 pszLeadIn,
5800 cchWidth, "Mnemonic - Description");
5801
5802 uint32_t uCombined = uVal1 | uVal2;
5803 for (uint32_t iBit = 0; iBit < 32; iBit++)
5804 if ( (RT_BIT_32(iBit) & uCombined)
5805 || (iBit == pDesc->iFirstBit && pDesc->pszName) )
5806 {
5807 while ( pDesc->pszName != NULL
5808 && iBit >= (uint32_t)pDesc->iFirstBit + pDesc->cBits)
5809 pDesc++;
5810
5811 if ( pDesc->pszName != NULL
5812 && iBit - (uint32_t)pDesc->iFirstBit < (uint32_t)pDesc->cBits)
5813 {
5814 size_t cchMnemonic = strlen(pDesc->pszName);
5815 const char *pszDesc = pDesc->pszName + cchMnemonic + 1;
5816 size_t cchDesc = strlen(pszDesc);
5817 uint32_t uFieldValue1 = uVal1 >> pDesc->iFirstBit;
5818 uint32_t uFieldValue2 = uVal2 >> pDesc->iFirstBit;
5819 if (pDesc->cBits < 32)
5820 {
5821 uFieldValue1 &= RT_BIT_32(pDesc->cBits) - UINT32_C(1);
5822 uFieldValue2 &= RT_BIT_32(pDesc->cBits) - UINT32_C(1);
5823 }
5824
5825 pHlp->pfnPrintf(pHlp, pDesc->cBits < 4 ? " %s - %s%*s= %u (%u)\n" : " %s - %s%*s= %#x (%#x)\n",
5826 pDesc->pszName, pszDesc,
5827 cchMnemonic + 3 + cchDesc < cchWidth ? cchWidth - (cchMnemonic + 3 + cchDesc) : 1, "",
5828 uFieldValue1, uFieldValue2);
5829
5830 iBit = pDesc->iFirstBit + pDesc->cBits - 1U;
5831 pDesc++;
5832 }
5833 else
5834 pHlp->pfnPrintf(pHlp, " %2u - Reserved%*s= %u (%u)\n", iBit, 13 < cchWidth ? cchWidth - 13 : 1, "",
5835 RT_BOOL(uVal1 & RT_BIT_32(iBit)), RT_BOOL(uVal2 & RT_BIT_32(iBit)));
5836 }
5837}
5838
5839
5840/**
5841 * Produces a detailed summary of standard leaf 0x00000001.
5842 *
5843 * @param pHlp The info helper functions.
5844 * @param pCurLeaf The 0x00000001 leaf.
5845 * @param fVerbose Whether to be very verbose or not.
5846 * @param fIntel Set if intel CPU.
5847 */
5848static void cpumR3CpuIdInfoStdLeaf1Details(PCDBGFINFOHLP pHlp, PCCPUMCPUIDLEAF pCurLeaf, bool fVerbose, bool fIntel)
5849{
5850 Assert(pCurLeaf); Assert(pCurLeaf->uLeaf == 1);
5851 static const char * const s_apszTypes[4] = { "primary", "overdrive", "MP", "reserved" };
5852 uint32_t uEAX = pCurLeaf->uEax;
5853 uint32_t uEBX = pCurLeaf->uEbx;
5854
5855 pHlp->pfnPrintf(pHlp,
5856 "%36s %2d \tExtended: %d \tEffective: %d\n"
5857 "%36s %2d \tExtended: %d \tEffective: %d\n"
5858 "%36s %d\n"
5859 "%36s %d (%s)\n"
5860 "%36s %#04x\n"
5861 "%36s %d\n"
5862 "%36s %d\n"
5863 "%36s %#04x\n"
5864 ,
5865 "Family:", (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, RTX86GetCpuFamily(uEAX),
5866 "Model:", (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, RTX86GetCpuModel(uEAX, fIntel),
5867 "Stepping:", RTX86GetCpuStepping(uEAX),
5868 "Type:", (uEAX >> 12) & 3, s_apszTypes[(uEAX >> 12) & 3],
5869 "APIC ID:", (uEBX >> 24) & 0xff,
5870 "Logical CPUs:",(uEBX >> 16) & 0xff,
5871 "CLFLUSH Size:",(uEBX >> 8) & 0xff,
5872 "Brand ID:", (uEBX >> 0) & 0xff);
5873 if (fVerbose)
5874 {
5875 CPUMCPUID Host = {0};
5876#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5877 ASMCpuIdExSlow(1, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
5878#endif
5879 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf1EdxSubFields, "Features", 56);
5880 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf1EcxSubFields, NULL, 56);
5881 }
5882 else
5883 {
5884 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aLeaf1EdxSubFields, "Features EDX:", 36);
5885 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEcx, g_aLeaf1EcxSubFields, "Features ECX:", 36);
5886 }
5887}
5888
5889
5890/**
5891 * Produces a detailed summary of standard leaf 0x00000007.
5892 *
5893 * @param pHlp The info helper functions.
5894 * @param paLeaves The CPUID leaves array.
5895 * @param cLeaves The number of leaves in the array.
5896 * @param pCurLeaf The first 0x00000007 leaf.
5897 * @param fVerbose Whether to be very verbose or not.
5898 */
5899static void cpumR3CpuIdInfoStdLeaf7Details(PCDBGFINFOHLP pHlp, PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves,
5900 PCCPUMCPUIDLEAF pCurLeaf, bool fVerbose)
5901{
5902 Assert(pCurLeaf); Assert(pCurLeaf->uLeaf == 7);
5903 pHlp->pfnPrintf(pHlp, "Structured Extended Feature Flags Enumeration (leaf 7):\n");
5904 for (;;)
5905 {
5906 CPUMCPUID Host = {0};
5907#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5908 ASMCpuIdExSlow(pCurLeaf->uLeaf, 0, pCurLeaf->uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
5909#endif
5910
5911 switch (pCurLeaf->uSubLeaf)
5912 {
5913 case 0:
5914 if (fVerbose)
5915 {
5916 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aLeaf7Sub0EbxSubFields, "Sub-leaf 0", 56);
5917 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf7Sub0EcxSubFields, NULL, 56);
5918 if (pCurLeaf->uEdx || Host.uEdx)
5919 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf7Sub0EdxSubFields, NULL, 56);
5920 }
5921 else
5922 {
5923 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEbx, g_aLeaf7Sub0EbxSubFields, "Ext Features #0 EBX:", 36);
5924 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEcx, g_aLeaf7Sub0EcxSubFields, "Ext Features #0 ECX:", 36);
5925 if (pCurLeaf->uEdx)
5926 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aLeaf7Sub0EdxSubFields, "Ext Features #0 EDX:", 36);
5927 }
5928 break;
5929
5930 /** @todo case 1 */
5931
5932 case 2:
5933 if (fVerbose)
5934 {
5935 pHlp->pfnPrintf(pHlp, " Sub-leaf 2\n");
5936 pHlp->pfnPrintf(pHlp, " Mnemonic - Description = guest (host)\n");
5937 if (pCurLeaf->uEbx || Host.uEbx)
5938 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aLeaf7Sub2EbxSubFields, NULL, 56);
5939 if (pCurLeaf->uEcx || Host.uEcx)
5940 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf7Sub2EcxSubFields, NULL, 56);
5941 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf7Sub2EdxSubFields, NULL, 56);
5942 }
5943 else
5944 {
5945 if (pCurLeaf->uEbx)
5946 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEbx, g_aLeaf7Sub2EbxSubFields, "Ext Features #2 EBX:", 36);
5947 if (pCurLeaf->uEcx)
5948 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEcx, g_aLeaf7Sub2EcxSubFields, "Ext Features #2 ECX:", 36);
5949 if (pCurLeaf->uEdx)
5950 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aLeaf7Sub2EdxSubFields, "Ext Features #2 EDX:", 36);
5951 }
5952 break;
5953
5954 default:
5955 if (pCurLeaf->uEdx || pCurLeaf->uEcx || pCurLeaf->uEbx)
5956 pHlp->pfnPrintf(pHlp, "Unknown extended feature sub-leaf #%u: EAX=%#x EBX=%#x ECX=%#x EDX=%#x\n",
5957 pCurLeaf->uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx);
5958 break;
5959
5960 }
5961
5962 /* advance. */
5963 pCurLeaf++;
5964 if ( (uintptr_t)(pCurLeaf - paLeaves) >= cLeaves
5965 || pCurLeaf->uLeaf != 0x7)
5966 break;
5967 }
5968}
5969
5970
5971/**
5972 * Produces a detailed summary of standard leaf 0x0000000d.
5973 *
5974 * @param pHlp The info helper functions.
5975 * @param paLeaves The CPUID leaves array.
5976 * @param cLeaves The number of leaves in the array.
5977 * @param pCurLeaf The first 0x00000007 leaf.
5978 * @param fVerbose Whether to be very verbose or not.
5979 */
5980static void cpumR3CpuIdInfoStdLeaf13Details(PCDBGFINFOHLP pHlp, PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves,
5981 PCCPUMCPUIDLEAF pCurLeaf, bool fVerbose)
5982{
5983 RT_NOREF_PV(fVerbose);
5984 Assert(pCurLeaf); Assert(pCurLeaf->uLeaf == 13);
5985 pHlp->pfnPrintf(pHlp, "Processor Extended State Enumeration (leaf 0xd):\n");
5986 for (uint32_t uSubLeaf = 0; uSubLeaf < 64; uSubLeaf++)
5987 {
5988 CPUMCPUID Host = {0};
5989#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
5990 ASMCpuIdExSlow(UINT32_C(0x0000000d), 0, uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
5991#endif
5992
5993 switch (uSubLeaf)
5994 {
5995 case 0:
5996 if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
5997 pHlp->pfnPrintf(pHlp, "%42s %#x/%#x\n", "XSAVE area cur/max size by XCR0, guest:",
5998 pCurLeaf->uEbx, pCurLeaf->uEcx);
5999 pHlp->pfnPrintf(pHlp, "%42s %#x/%#x\n", "XSAVE area cur/max size by XCR0, host:", Host.uEbx, Host.uEcx);
6000
6001 if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
6002 cpumR3CpuIdInfoValueWithMnemonicListU64(pHlp, RT_MAKE_U64(pCurLeaf->uEax, pCurLeaf->uEdx), g_aXSaveStateBits,
6003 "Valid XCR0 bits, guest:", 42);
6004 cpumR3CpuIdInfoValueWithMnemonicListU64(pHlp, RT_MAKE_U64(Host.uEax, Host.uEdx), g_aXSaveStateBits,
6005 "Valid XCR0 bits, host:", 42);
6006 break;
6007
6008 case 1:
6009 if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
6010 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEax, g_aLeaf13Sub1EaxSubFields, "XSAVE features, guest:", 42);
6011 cpumR3CpuIdInfoMnemonicListU32(pHlp, Host.uEax, g_aLeaf13Sub1EaxSubFields, "XSAVE features, host:", 42);
6012
6013 if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
6014 pHlp->pfnPrintf(pHlp, "%42s %#x\n", "XSAVE area cur size XCR0|XSS, guest:", pCurLeaf->uEbx);
6015 pHlp->pfnPrintf(pHlp, "%42s %#x\n", "XSAVE area cur size XCR0|XSS, host:", Host.uEbx);
6016
6017 if (pCurLeaf && pCurLeaf->uSubLeaf == uSubLeaf)
6018 cpumR3CpuIdInfoValueWithMnemonicListU64(pHlp, RT_MAKE_U64(pCurLeaf->uEcx, pCurLeaf->uEdx), g_aXSaveStateBits,
6019 " Valid IA32_XSS bits, guest:", 42);
6020 cpumR3CpuIdInfoValueWithMnemonicListU64(pHlp, RT_MAKE_U64(Host.uEdx, Host.uEcx), g_aXSaveStateBits,
6021 " Valid IA32_XSS bits, host:", 42);
6022 break;
6023
6024 default:
6025 if ( pCurLeaf
6026 && pCurLeaf->uSubLeaf == uSubLeaf
6027 && (pCurLeaf->uEax || pCurLeaf->uEbx || pCurLeaf->uEcx || pCurLeaf->uEdx) )
6028 {
6029 pHlp->pfnPrintf(pHlp, " State #%u, guest: off=%#06x, cb=%#06x %s", uSubLeaf, pCurLeaf->uEbx,
6030 pCurLeaf->uEax, pCurLeaf->uEcx & RT_BIT_32(0) ? "XCR0-bit" : "IA32_XSS-bit");
6031 if (pCurLeaf->uEcx & ~RT_BIT_32(0))
6032 pHlp->pfnPrintf(pHlp, " ECX[reserved]=%#x\n", pCurLeaf->uEcx & ~RT_BIT_32(0));
6033 if (pCurLeaf->uEdx)
6034 pHlp->pfnPrintf(pHlp, " EDX[reserved]=%#x\n", pCurLeaf->uEdx);
6035 pHlp->pfnPrintf(pHlp, " --");
6036 cpumR3CpuIdInfoMnemonicListU64(pHlp, RT_BIT_64(uSubLeaf), g_aXSaveStateBits, NULL, 0);
6037 pHlp->pfnPrintf(pHlp, "\n");
6038 }
6039 if (Host.uEax || Host.uEbx || Host.uEcx || Host.uEdx)
6040 {
6041 pHlp->pfnPrintf(pHlp, " State #%u, host: off=%#06x, cb=%#06x %s", uSubLeaf, Host.uEbx,
6042 Host.uEax, Host.uEcx & RT_BIT_32(0) ? "XCR0-bit" : "IA32_XSS-bit");
6043 if (Host.uEcx & ~RT_BIT_32(0))
6044 pHlp->pfnPrintf(pHlp, " ECX[reserved]=%#x\n", Host.uEcx & ~RT_BIT_32(0));
6045 if (Host.uEdx)
6046 pHlp->pfnPrintf(pHlp, " EDX[reserved]=%#x\n", Host.uEdx);
6047 pHlp->pfnPrintf(pHlp, " --");
6048 cpumR3CpuIdInfoMnemonicListU64(pHlp, RT_BIT_64(uSubLeaf), g_aXSaveStateBits, NULL, 0);
6049 pHlp->pfnPrintf(pHlp, "\n");
6050 }
6051 break;
6052
6053 }
6054
6055 /* advance. */
6056 if (pCurLeaf)
6057 {
6058 while ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
6059 && pCurLeaf->uSubLeaf <= uSubLeaf
6060 && pCurLeaf->uLeaf == UINT32_C(0x0000000d))
6061 pCurLeaf++;
6062 if ( (uintptr_t)(pCurLeaf - paLeaves) >= cLeaves
6063 || pCurLeaf->uLeaf != UINT32_C(0x0000000d))
6064 pCurLeaf = NULL;
6065 }
6066 }
6067}
6068
6069
6070static PCCPUMCPUIDLEAF cpumR3CpuIdInfoRawRange(PCDBGFINFOHLP pHlp, PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves,
6071 PCCPUMCPUIDLEAF pCurLeaf, uint32_t uUpToLeaf, const char *pszTitle)
6072{
6073 if ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
6074 && pCurLeaf->uLeaf <= uUpToLeaf)
6075 {
6076 pHlp->pfnPrintf(pHlp,
6077 " %s\n"
6078 " Leaf/sub-leaf eax ebx ecx edx\n", pszTitle);
6079 while ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
6080 && pCurLeaf->uLeaf <= uUpToLeaf)
6081 {
6082 CPUMCPUID Host = {0};
6083#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
6084 ASMCpuIdExSlow(pCurLeaf->uLeaf, 0, pCurLeaf->uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
6085#endif
6086 pHlp->pfnPrintf(pHlp,
6087 "Gst: %08x/%04x %08x %08x %08x %08x\n"
6088 "Hst: %08x %08x %08x %08x\n",
6089 pCurLeaf->uLeaf, pCurLeaf->uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx,
6090 Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
6091 pCurLeaf++;
6092 }
6093 }
6094
6095 return pCurLeaf;
6096}
6097
6098
6099/**
6100 * Display the guest CpuId leaves.
6101 *
6102 * @param pVM The cross context VM structure.
6103 * @param pHlp The info helper functions.
6104 * @param pszArgs "terse", "default" or "verbose".
6105 */
6106DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
6107{
6108 /*
6109 * Parse the argument.
6110 */
6111 unsigned iVerbosity = 1;
6112 if (pszArgs)
6113 {
6114 pszArgs = RTStrStripL(pszArgs);
6115 if (!strcmp(pszArgs, "terse"))
6116 iVerbosity--;
6117 else if (!strcmp(pszArgs, "verbose"))
6118 iVerbosity++;
6119 }
6120
6121 uint32_t uLeaf;
6122 CPUMCPUID Host = {0};
6123 uint32_t cLeaves = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
6124 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.paCpuIdLeavesR3;
6125 PCCPUMCPUIDLEAF pCurLeaf;
6126 PCCPUMCPUIDLEAF pNextLeaf;
6127 bool const fIntel = RTX86IsIntelCpu(pVM->cpum.s.aGuestCpuIdPatmStd[0].uEbx,
6128 pVM->cpum.s.aGuestCpuIdPatmStd[0].uEcx,
6129 pVM->cpum.s.aGuestCpuIdPatmStd[0].uEdx);
6130
6131 /*
6132 * Standard leaves. Custom raw dump here due to ECX sub-leaves host handling.
6133 */
6134#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
6135 uint32_t cHstMax = ASMCpuId_EAX(0);
6136#else
6137 uint32_t cHstMax = 0;
6138#endif
6139 uint32_t cGstMax = paLeaves[0].uLeaf == 0 ? paLeaves[0].uEax : 0;
6140 uint32_t cMax = RT_MAX(cGstMax, cHstMax);
6141 pHlp->pfnPrintf(pHlp,
6142 " Raw Standard CPUID Leaves\n"
6143 " Leaf/sub-leaf eax ebx ecx edx\n");
6144 for (uLeaf = 0, pCurLeaf = paLeaves; uLeaf <= cMax; uLeaf++)
6145 {
6146 uint32_t cMaxSubLeaves = 1;
6147 if (uLeaf == 4 || uLeaf == 7 || uLeaf == 0xb)
6148 cMaxSubLeaves = 16;
6149 else if (uLeaf == 0xd)
6150 cMaxSubLeaves = 128;
6151
6152 for (uint32_t uSubLeaf = 0; uSubLeaf < cMaxSubLeaves; uSubLeaf++)
6153 {
6154#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
6155 ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
6156#endif
6157 if ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
6158 && pCurLeaf->uLeaf == uLeaf
6159 && pCurLeaf->uSubLeaf == uSubLeaf)
6160 {
6161 pHlp->pfnPrintf(pHlp,
6162 "Gst: %08x/%04x %08x %08x %08x %08x\n"
6163 "Hst: %08x %08x %08x %08x\n",
6164 uLeaf, uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx,
6165 Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
6166 pCurLeaf++;
6167 }
6168 else if ( uLeaf != 0xd
6169 || uSubLeaf <= 1
6170 || Host.uEbx != 0 )
6171 pHlp->pfnPrintf(pHlp,
6172 "Hst: %08x/%04x %08x %08x %08x %08x\n",
6173 uLeaf, uSubLeaf, Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
6174
6175 /* Done? */
6176 if ( ( (uintptr_t)(pCurLeaf - paLeaves) >= cLeaves
6177 || pCurLeaf->uLeaf != uLeaf)
6178 && ( (uLeaf == 0x4 && ((Host.uEax & 0x000f) == 0 || (Host.uEax & 0x000f) >= 8))
6179 || (uLeaf == 0x7 && Host.uEax == 0)
6180 || (uLeaf == 0xb && ((Host.uEcx & 0xff00) == 0 || (Host.uEcx & 0xff00) >= 8))
6181 || (uLeaf == 0xb && (Host.uEcx & 0xff) != uSubLeaf)
6182 || (uLeaf == 0xd && uSubLeaf >= 128)
6183 )
6184 )
6185 break;
6186 }
6187 }
6188 pNextLeaf = pCurLeaf;
6189
6190 /*
6191 * If verbose, decode it.
6192 */
6193 if (iVerbosity && paLeaves[0].uLeaf == 0)
6194 pHlp->pfnPrintf(pHlp,
6195 "%36s %.04s%.04s%.04s\n"
6196 "%36s 0x00000000-%#010x\n"
6197 ,
6198 "Name:", &paLeaves[0].uEbx, &paLeaves[0].uEdx, &paLeaves[0].uEcx,
6199 "Supports:", paLeaves[0].uEax);
6200
6201 if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x00000001), 0)) != NULL)
6202 cpumR3CpuIdInfoStdLeaf1Details(pHlp, pCurLeaf, iVerbosity > 1, fIntel);
6203
6204 if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x00000007), 0)) != NULL)
6205 cpumR3CpuIdInfoStdLeaf7Details(pHlp, paLeaves, cLeaves, pCurLeaf, iVerbosity > 1);
6206
6207 if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x0000000d), 0)) != NULL)
6208 cpumR3CpuIdInfoStdLeaf13Details(pHlp, paLeaves, cLeaves, pCurLeaf, iVerbosity > 1);
6209
6210 pCurLeaf = pNextLeaf;
6211
6212 /*
6213 * Hypervisor leaves.
6214 *
6215 * Unlike most of the other leaves reported, the guest hypervisor leaves
6216 * aren't a subset of the host CPUID bits.
6217 */
6218 pCurLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, UINT32_C(0x3fffffff), "Unknown CPUID Leaves");
6219
6220#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
6221 ASMCpuIdExSlow(UINT32_C(0x40000000), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
6222#endif
6223 cHstMax = Host.uEax >= UINT32_C(0x40000001) && Host.uEax <= UINT32_C(0x40000fff) ? Host.uEax : 0;
6224 cGstMax = (uintptr_t)(pCurLeaf - paLeaves) < cLeaves && pCurLeaf->uLeaf == UINT32_C(0x40000000)
6225 ? RT_MIN(pCurLeaf->uEax, UINT32_C(0x40000fff)) : 0;
6226 cMax = RT_MAX(cHstMax, cGstMax);
6227 if (cMax >= UINT32_C(0x40000000))
6228 {
6229 pNextLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, cMax, "Raw Hypervisor CPUID Leaves");
6230
6231 /** @todo dump these in more detail. */
6232
6233 pCurLeaf = pNextLeaf;
6234 }
6235
6236
6237 /*
6238 * Extended. Custom raw dump here due to ECX sub-leaves host handling.
6239 * Implemented after AMD specs.
6240 */
6241 pCurLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, UINT32_C(0x7fffffff), "Unknown CPUID Leaves");
6242
6243#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
6244 ASMCpuIdExSlow(UINT32_C(0x80000000), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
6245#endif
6246 cHstMax = RTX86IsValidExtRange(Host.uEax) ? RT_MIN(Host.uEax, UINT32_C(0x80000fff)) : 0;
6247 cGstMax = (uintptr_t)(pCurLeaf - paLeaves) < cLeaves && pCurLeaf->uLeaf == UINT32_C(0x80000000)
6248 ? RT_MIN(pCurLeaf->uEax, UINT32_C(0x80000fff)) : 0;
6249 cMax = RT_MAX(cHstMax, cGstMax);
6250 if (cMax >= UINT32_C(0x80000000))
6251 {
6252
6253 pHlp->pfnPrintf(pHlp,
6254 " Raw Extended CPUID Leaves\n"
6255 " Leaf/sub-leaf eax ebx ecx edx\n");
6256 PCCPUMCPUIDLEAF pExtLeaf = pCurLeaf;
6257 for (uLeaf = UINT32_C(0x80000000); uLeaf <= cMax; uLeaf++)
6258 {
6259 uint32_t cMaxSubLeaves = 1;
6260 if (uLeaf == UINT32_C(0x8000001d))
6261 cMaxSubLeaves = 16;
6262
6263 for (uint32_t uSubLeaf = 0; uSubLeaf < cMaxSubLeaves; uSubLeaf++)
6264 {
6265#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
6266 ASMCpuIdExSlow(uLeaf, 0, uSubLeaf, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
6267#endif
6268 if ( (uintptr_t)(pCurLeaf - paLeaves) < cLeaves
6269 && pCurLeaf->uLeaf == uLeaf
6270 && pCurLeaf->uSubLeaf == uSubLeaf)
6271 {
6272 pHlp->pfnPrintf(pHlp,
6273 "Gst: %08x/%04x %08x %08x %08x %08x\n"
6274 "Hst: %08x %08x %08x %08x\n",
6275 uLeaf, uSubLeaf, pCurLeaf->uEax, pCurLeaf->uEbx, pCurLeaf->uEcx, pCurLeaf->uEdx,
6276 Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
6277 pCurLeaf++;
6278 }
6279 else if ( uLeaf != 0xd
6280 || uSubLeaf <= 1
6281 || Host.uEbx != 0 )
6282 pHlp->pfnPrintf(pHlp,
6283 "Hst: %08x/%04x %08x %08x %08x %08x\n",
6284 uLeaf, uSubLeaf, Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
6285
6286 /* Done? */
6287 if ( ( (uintptr_t)(pCurLeaf - paLeaves) >= cLeaves
6288 || pCurLeaf->uLeaf != uLeaf)
6289 && (uLeaf == UINT32_C(0x8000001d) && ((Host.uEax & 0x000f) == 0 || (Host.uEax & 0x000f) >= 8)) )
6290 break;
6291 }
6292 }
6293 pNextLeaf = pCurLeaf;
6294
6295 /*
6296 * Understandable output
6297 */
6298 if (iVerbosity)
6299 pHlp->pfnPrintf(pHlp,
6300 "Ext Name: %.4s%.4s%.4s\n"
6301 "Ext Supports: 0x80000000-%#010x\n",
6302 &pExtLeaf->uEbx, &pExtLeaf->uEdx, &pExtLeaf->uEcx, pExtLeaf->uEax);
6303
6304 pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000001), 0);
6305 if (iVerbosity && pCurLeaf)
6306 {
6307 uint32_t uEAX = pCurLeaf->uEax;
6308 pHlp->pfnPrintf(pHlp,
6309 "Family: %d \tExtended: %d \tEffective: %d\n"
6310 "Model: %d \tExtended: %d \tEffective: %d\n"
6311 "Stepping: %d\n"
6312 "Brand ID: %#05x\n",
6313 (uEAX >> 8) & 0xf, (uEAX >> 20) & 0x7f, RTX86GetCpuFamily(uEAX),
6314 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, RTX86GetCpuModel(uEAX, fIntel),
6315 RTX86GetCpuStepping(uEAX),
6316 pCurLeaf->uEbx & 0xfff);
6317
6318 if (iVerbosity == 1)
6319 {
6320 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aExtLeaf1EdxSubFields, "Ext Features EDX:", 34);
6321 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEcx, g_aExtLeaf1EdxSubFields, "Ext Features ECX:", 34);
6322 }
6323 else
6324 {
6325#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
6326 ASMCpuIdExSlow(0x80000001, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
6327#endif
6328 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aExtLeaf1EdxSubFields, "Ext Features", 56);
6329 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aExtLeaf1EcxSubFields, NULL, 56);
6330 if (Host.uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM)
6331 {
6332#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
6333 ASMCpuIdExSlow(0x8000000a, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
6334#endif
6335 pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x8000000a), 0);
6336 uint32_t const uGstEdx = pCurLeaf ? pCurLeaf->uEdx : 0;
6337 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, uGstEdx, Host.uEdx, g_aExtLeafAEdxSubFields,
6338 "SVM Feature Identification (leaf A)", 56);
6339 }
6340 }
6341 }
6342
6343 if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000002), 0)) != NULL)
6344 {
6345 char szString[4*4*3+1] = {0};
6346 uint32_t *pu32 = (uint32_t *)szString;
6347 *pu32++ = pCurLeaf->uEax;
6348 *pu32++ = pCurLeaf->uEbx;
6349 *pu32++ = pCurLeaf->uEcx;
6350 *pu32++ = pCurLeaf->uEdx;
6351 pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000003), 0);
6352 if (pCurLeaf)
6353 {
6354 *pu32++ = pCurLeaf->uEax;
6355 *pu32++ = pCurLeaf->uEbx;
6356 *pu32++ = pCurLeaf->uEcx;
6357 *pu32++ = pCurLeaf->uEdx;
6358 }
6359 pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000004), 0);
6360 if (pCurLeaf)
6361 {
6362 *pu32++ = pCurLeaf->uEax;
6363 *pu32++ = pCurLeaf->uEbx;
6364 *pu32++ = pCurLeaf->uEcx;
6365 *pu32++ = pCurLeaf->uEdx;
6366 }
6367 pHlp->pfnPrintf(pHlp, "Full Name: \"%s\"\n", szString);
6368 }
6369
6370 if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000005), 0)) != NULL)
6371 {
6372 uint32_t uEAX = pCurLeaf->uEax;
6373 uint32_t uEBX = pCurLeaf->uEbx;
6374 uint32_t uECX = pCurLeaf->uEcx;
6375 uint32_t uEDX = pCurLeaf->uEdx;
6376 char sz1[32];
6377 char sz2[32];
6378
6379 pHlp->pfnPrintf(pHlp,
6380 "TLB 2/4M Instr/Uni: %s %3d entries\n"
6381 "TLB 2/4M Data: %s %3d entries\n",
6382 getCacheAss((uEAX >> 8) & 0xff, sz1), (uEAX >> 0) & 0xff,
6383 getCacheAss((uEAX >> 24) & 0xff, sz2), (uEAX >> 16) & 0xff);
6384 pHlp->pfnPrintf(pHlp,
6385 "TLB 4K Instr/Uni: %s %3d entries\n"
6386 "TLB 4K Data: %s %3d entries\n",
6387 getCacheAss((uEBX >> 8) & 0xff, sz1), (uEBX >> 0) & 0xff,
6388 getCacheAss((uEBX >> 24) & 0xff, sz2), (uEBX >> 16) & 0xff);
6389 pHlp->pfnPrintf(pHlp, "L1 Instr Cache Line Size: %d bytes\n"
6390 "L1 Instr Cache Lines Per Tag: %d\n"
6391 "L1 Instr Cache Associativity: %s\n"
6392 "L1 Instr Cache Size: %d KB\n",
6393 (uEDX >> 0) & 0xff,
6394 (uEDX >> 8) & 0xff,
6395 getCacheAss((uEDX >> 16) & 0xff, sz1),
6396 (uEDX >> 24) & 0xff);
6397 pHlp->pfnPrintf(pHlp,
6398 "L1 Data Cache Line Size: %d bytes\n"
6399 "L1 Data Cache Lines Per Tag: %d\n"
6400 "L1 Data Cache Associativity: %s\n"
6401 "L1 Data Cache Size: %d KB\n",
6402 (uECX >> 0) & 0xff,
6403 (uECX >> 8) & 0xff,
6404 getCacheAss((uECX >> 16) & 0xff, sz1),
6405 (uECX >> 24) & 0xff);
6406 }
6407
6408 if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000006), 0)) != NULL)
6409 {
6410 uint32_t uEAX = pCurLeaf->uEax;
6411 uint32_t uEBX = pCurLeaf->uEbx;
6412 uint32_t uECX = pCurLeaf->uEcx;
6413 uint32_t uEDX = pCurLeaf->uEdx;
6414
6415 pHlp->pfnPrintf(pHlp,
6416 "L2 TLB 2/4M Instr/Uni: %s %4d entries\n"
6417 "L2 TLB 2/4M Data: %s %4d entries\n",
6418 getL23CacheAss((uEAX >> 12) & 0xf), (uEAX >> 0) & 0xfff,
6419 getL23CacheAss((uEAX >> 28) & 0xf), (uEAX >> 16) & 0xfff);
6420 pHlp->pfnPrintf(pHlp,
6421 "L2 TLB 4K Instr/Uni: %s %4d entries\n"
6422 "L2 TLB 4K Data: %s %4d entries\n",
6423 getL23CacheAss((uEBX >> 12) & 0xf), (uEBX >> 0) & 0xfff,
6424 getL23CacheAss((uEBX >> 28) & 0xf), (uEBX >> 16) & 0xfff);
6425 pHlp->pfnPrintf(pHlp,
6426 "L2 Cache Line Size: %d bytes\n"
6427 "L2 Cache Lines Per Tag: %d\n"
6428 "L2 Cache Associativity: %s\n"
6429 "L2 Cache Size: %d KB\n",
6430 (uECX >> 0) & 0xff,
6431 (uECX >> 8) & 0xf,
6432 getL23CacheAss((uECX >> 12) & 0xf),
6433 (uECX >> 16) & 0xffff);
6434 pHlp->pfnPrintf(pHlp,
6435 "L3 Cache Line Size: %d bytes\n"
6436 "L3 Cache Lines Per Tag: %d\n"
6437 "L3 Cache Associativity: %s\n"
6438 "L3 Cache Size: %d KB\n",
6439 (uEDX >> 0) & 0xff,
6440 (uEDX >> 8) & 0xf,
6441 getL23CacheAss((uEDX >> 12) & 0xf),
6442 ((uEDX >> 18) & 0x3fff) * 512);
6443 }
6444
6445 if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000007), 0)) != NULL)
6446 {
6447#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
6448 ASMCpuIdExSlow(UINT32_C(0x80000007), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
6449#endif
6450 if (pCurLeaf->uEdx || (Host.uEdx && iVerbosity))
6451 {
6452 if (iVerbosity < 1)
6453 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aExtLeaf7EdxSubFields, "APM Features EDX:", 34);
6454 else
6455 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aExtLeaf7EdxSubFields,
6456 "APM Features EDX", 56);
6457 }
6458 }
6459
6460 pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0x80000008), 0);
6461 if (pCurLeaf != NULL)
6462 {
6463#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
6464 ASMCpuIdExSlow(UINT32_C(0x80000008), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
6465#endif
6466 if (pCurLeaf->uEbx || (Host.uEbx && iVerbosity))
6467 {
6468 if (iVerbosity < 1)
6469 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEbx, g_aExtLeaf8EbxSubFields, "Ext Features ext IDs EBX:", 34);
6470 else
6471 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aExtLeaf8EbxSubFields,
6472 "Ext Features ext IDs EBX", 56);
6473 }
6474
6475 if (iVerbosity)
6476 {
6477 uint32_t const uEAX = pCurLeaf->uEax;
6478 pHlp->pfnPrintf(pHlp,
6479 "Physical Address Width: %d bits\n"
6480 "Virtual Address Width: %d bits\n",
6481 (uEAX >> 0) & 0xff,
6482 (uEAX >> 8) & 0xff);
6483 if ( ((uEAX >> 16) & 0xff) != 0
6484 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
6485 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
6486 pHlp->pfnPrintf(pHlp, "Guest Physical Address Width: %d bits%s\n",
6487 (uEAX >> 16) & 0xff ? (uEAX >> 16) & 0xff : (uEAX >> 0) & 0xff,
6488 (uEAX >> 16) & 0xff ? "" : " (0)");
6489
6490 uint32_t const uECX = pCurLeaf->uEcx;
6491 if ( ((uECX >> 0) & 0xff) != 0
6492 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
6493 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)
6494 {
6495 uint32_t const cPhysCoreCount = ((uECX >> 0) & 0xff) + 1;
6496 uint32_t const cApicIdSize = (uECX >> 12) & 0xf ? RT_BIT_32((uECX >> 12) & 0xf) : cPhysCoreCount;
6497 pHlp->pfnPrintf(pHlp,
6498 "Physical Core Count: %d\n"
6499 "APIC ID size: %u (%#x)\n"
6500 "Performance TSC size: %u bits\n",
6501 cPhysCoreCount,
6502 cApicIdSize, cApicIdSize,
6503 (((uECX >> 16) & 0x3) << 3) + 40);
6504 }
6505 uint32_t const uEDX = pCurLeaf->uEax;
6506 if (uEDX)
6507 pHlp->pfnPrintf(pHlp,
6508 "Max page count for INVLPGB: %#x\n"
6509 "Max ECX for RDPRU: %#x\n",
6510 (uEDX & 0xffff), uEDX >> 16);
6511 }
6512 }
6513
6514 pCurLeaf = pNextLeaf;
6515 }
6516
6517
6518
6519 /*
6520 * Centaur.
6521 */
6522 pCurLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, UINT32_C(0xbfffffff), "Unknown CPUID Leaves");
6523
6524#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
6525 ASMCpuIdExSlow(UINT32_C(0xc0000000), 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
6526#endif
6527 cHstMax = Host.uEax >= UINT32_C(0xc0000001) && Host.uEax <= UINT32_C(0xc0000fff)
6528 ? RT_MIN(Host.uEax, UINT32_C(0xc0000fff)) : 0;
6529 cGstMax = (uintptr_t)(pCurLeaf - paLeaves) < cLeaves && pCurLeaf->uLeaf == UINT32_C(0xc0000000)
6530 ? RT_MIN(pCurLeaf->uEax, UINT32_C(0xc0000fff)) : 0;
6531 cMax = RT_MAX(cHstMax, cGstMax);
6532 if (cMax >= UINT32_C(0xc0000000))
6533 {
6534 pNextLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, cMax, "Raw Centaur CPUID Leaves");
6535
6536 /*
6537 * Understandable output
6538 */
6539 if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0xc0000000), 0)) != NULL)
6540 pHlp->pfnPrintf(pHlp,
6541 "Centaur Supports: 0xc0000000-%#010x\n",
6542 pCurLeaf->uEax);
6543
6544 if (iVerbosity && (pCurLeaf = cpumCpuIdGetLeafInt(paLeaves, cLeaves, UINT32_C(0xc0000001), 0)) != NULL)
6545 {
6546#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
6547 ASMCpuIdExSlow(0xc0000001, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
6548#endif
6549 uint32_t uEdxGst = pCurLeaf->uEdx;
6550 uint32_t uEdxHst = Host.uEdx;
6551
6552 if (iVerbosity == 1)
6553 {
6554 pHlp->pfnPrintf(pHlp, "Centaur Features EDX: ");
6555 if (uEdxGst & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " AIS");
6556 if (uEdxGst & RT_BIT(1)) pHlp->pfnPrintf(pHlp, " AIS-E");
6557 if (uEdxGst & RT_BIT(2)) pHlp->pfnPrintf(pHlp, " RNG");
6558 if (uEdxGst & RT_BIT(3)) pHlp->pfnPrintf(pHlp, " RNG-E");
6559 if (uEdxGst & RT_BIT(4)) pHlp->pfnPrintf(pHlp, " LH");
6560 if (uEdxGst & RT_BIT(5)) pHlp->pfnPrintf(pHlp, " FEMMS");
6561 if (uEdxGst & RT_BIT(6)) pHlp->pfnPrintf(pHlp, " ACE");
6562 if (uEdxGst & RT_BIT(7)) pHlp->pfnPrintf(pHlp, " ACE-E");
6563 /* possibly indicating MM/HE and MM/HE-E on older chips... */
6564 if (uEdxGst & RT_BIT(8)) pHlp->pfnPrintf(pHlp, " ACE2");
6565 if (uEdxGst & RT_BIT(9)) pHlp->pfnPrintf(pHlp, " ACE2-E");
6566 if (uEdxGst & RT_BIT(10)) pHlp->pfnPrintf(pHlp, " PHE");
6567 if (uEdxGst & RT_BIT(11)) pHlp->pfnPrintf(pHlp, " PHE-E");
6568 if (uEdxGst & RT_BIT(12)) pHlp->pfnPrintf(pHlp, " PMM");
6569 if (uEdxGst & RT_BIT(13)) pHlp->pfnPrintf(pHlp, " PMM-E");
6570 for (unsigned iBit = 14; iBit < 32; iBit++)
6571 if (uEdxGst & RT_BIT(iBit))
6572 pHlp->pfnPrintf(pHlp, " %d", iBit);
6573 pHlp->pfnPrintf(pHlp, "\n");
6574 }
6575 else
6576 {
6577 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n");
6578 pHlp->pfnPrintf(pHlp, "AIS - Alternate Instruction Set = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0)));
6579 pHlp->pfnPrintf(pHlp, "AIS-E - AIS enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 1)), !!(uEdxHst & RT_BIT( 1)));
6580 pHlp->pfnPrintf(pHlp, "RNG - Random Number Generator = %d (%d)\n", !!(uEdxGst & RT_BIT( 2)), !!(uEdxHst & RT_BIT( 2)));
6581 pHlp->pfnPrintf(pHlp, "RNG-E - RNG enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 3)), !!(uEdxHst & RT_BIT( 3)));
6582 pHlp->pfnPrintf(pHlp, "LH - LongHaul MSR 0000_110Ah = %d (%d)\n", !!(uEdxGst & RT_BIT( 4)), !!(uEdxHst & RT_BIT( 4)));
6583 pHlp->pfnPrintf(pHlp, "FEMMS - FEMMS = %d (%d)\n", !!(uEdxGst & RT_BIT( 5)), !!(uEdxHst & RT_BIT( 5)));
6584 pHlp->pfnPrintf(pHlp, "ACE - Advanced Cryptography Engine = %d (%d)\n", !!(uEdxGst & RT_BIT( 6)), !!(uEdxHst & RT_BIT( 6)));
6585 pHlp->pfnPrintf(pHlp, "ACE-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 7)), !!(uEdxHst & RT_BIT( 7)));
6586 /* possibly indicating MM/HE and MM/HE-E on older chips... */
6587 pHlp->pfnPrintf(pHlp, "ACE2 - Advanced Cryptography Engine 2 = %d (%d)\n", !!(uEdxGst & RT_BIT( 8)), !!(uEdxHst & RT_BIT( 8)));
6588 pHlp->pfnPrintf(pHlp, "ACE2-E - ACE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT( 9)), !!(uEdxHst & RT_BIT( 9)));
6589 pHlp->pfnPrintf(pHlp, "PHE - Padlock Hash Engine = %d (%d)\n", !!(uEdxGst & RT_BIT(10)), !!(uEdxHst & RT_BIT(10)));
6590 pHlp->pfnPrintf(pHlp, "PHE-E - PHE enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(11)), !!(uEdxHst & RT_BIT(11)));
6591 pHlp->pfnPrintf(pHlp, "PMM - Montgomery Multiplier = %d (%d)\n", !!(uEdxGst & RT_BIT(12)), !!(uEdxHst & RT_BIT(12)));
6592 pHlp->pfnPrintf(pHlp, "PMM-E - PMM enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(13)), !!(uEdxHst & RT_BIT(13)));
6593 pHlp->pfnPrintf(pHlp, "14 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(14)), !!(uEdxHst & RT_BIT(14)));
6594 pHlp->pfnPrintf(pHlp, "15 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(15)), !!(uEdxHst & RT_BIT(15)));
6595 pHlp->pfnPrintf(pHlp, "Parallax = %d (%d)\n", !!(uEdxGst & RT_BIT(16)), !!(uEdxHst & RT_BIT(16)));
6596 pHlp->pfnPrintf(pHlp, "Parallax enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(17)), !!(uEdxHst & RT_BIT(17)));
6597 pHlp->pfnPrintf(pHlp, "Overstress = %d (%d)\n", !!(uEdxGst & RT_BIT(18)), !!(uEdxHst & RT_BIT(18)));
6598 pHlp->pfnPrintf(pHlp, "Overstress enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(19)), !!(uEdxHst & RT_BIT(19)));
6599 pHlp->pfnPrintf(pHlp, "TM3 - Temperature Monitoring 3 = %d (%d)\n", !!(uEdxGst & RT_BIT(20)), !!(uEdxHst & RT_BIT(20)));
6600 pHlp->pfnPrintf(pHlp, "TM3-E - TM3 enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(21)), !!(uEdxHst & RT_BIT(21)));
6601 pHlp->pfnPrintf(pHlp, "RNG2 - Random Number Generator 2 = %d (%d)\n", !!(uEdxGst & RT_BIT(22)), !!(uEdxHst & RT_BIT(22)));
6602 pHlp->pfnPrintf(pHlp, "RNG2-E - RNG2 enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(23)), !!(uEdxHst & RT_BIT(23)));
6603 pHlp->pfnPrintf(pHlp, "24 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(24)), !!(uEdxHst & RT_BIT(24)));
6604 pHlp->pfnPrintf(pHlp, "PHE2 - Padlock Hash Engine 2 = %d (%d)\n", !!(uEdxGst & RT_BIT(25)), !!(uEdxHst & RT_BIT(25)));
6605 pHlp->pfnPrintf(pHlp, "PHE2-E - PHE2 enabled = %d (%d)\n", !!(uEdxGst & RT_BIT(26)), !!(uEdxHst & RT_BIT(26)));
6606 for (unsigned iBit = 27; iBit < 32; iBit++)
6607 if ((uEdxGst | uEdxHst) & RT_BIT(iBit))
6608 pHlp->pfnPrintf(pHlp, "Bit %d = %d (%d)\n", iBit, !!(uEdxGst & RT_BIT(iBit)), !!(uEdxHst & RT_BIT(iBit)));
6609 pHlp->pfnPrintf(pHlp, "\n");
6610 }
6611 }
6612
6613 pCurLeaf = pNextLeaf;
6614 }
6615
6616 /*
6617 * The remainder.
6618 */
6619 pCurLeaf = cpumR3CpuIdInfoRawRange(pHlp, paLeaves, cLeaves, pCurLeaf, UINT32_C(0xffffffff), "Unknown CPUID Leaves");
6620}
6621
6622#endif /* !IN_VBOX_CPU_REPORT */
6623
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette