Changeset 100935 in vbox
- Timestamp:
- Aug 22, 2023 9:30:06 AM (14 months ago)
- Location:
- trunk
- Files:
-
- 9 edited
-
include/VBox/vmm/cpum-x86-amd64.h (modified) (2 diffs)
-
include/VBox/vmm/cpumctx-x86-amd64.h (modified) (2 diffs)
-
include/iprt/x86.h (modified) (2 diffs)
-
src/VBox/VMM/VMMAll/CPUMAllCpuId.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp (modified) (12 diffs)
-
src/VBox/VMM/VMMR3/CPUM.cpp (modified) (4 diffs)
-
src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp (modified) (1 diff)
-
src/VBox/VMM/include/CPUMInternal.h (modified) (1 diff)
-
src/VBox/VMM/include/CPUMInternal.mac (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum-x86-amd64.h
r100855 r100935 826 826 /** Intel SYSENTER/SYSEXIT support */ 827 827 uint32_t fSysEnter : 1; 828 /** Supports MTRR. */ 829 uint32_t fMtrr : 1; 828 830 /** First generation APIC. */ 829 831 uint32_t fApic : 1; … … 950 952 /** Alignment padding / reserved for future use (96 bits total, plus 12 bytes 951 953 * prior to the bit fields -> total of 24 bytes) */ 952 uint32_t fPadding0 : 2 2;954 uint32_t fPadding0 : 21; 953 955 954 956 -
trunk/include/VBox/vmm/cpumctx-x86-amd64.h
r98955 r100935 1048 1048 /** @} */ 1049 1049 1050 /** Maximum number of variable-range MTRR pairs supported. 1051 * 1052 * Intel documents upto 10, see IA32_MTRR_PHYS[BASE|MASK](0..9). 1053 * AMD documents upto 8, see MTRR_phys[Base|Mask](0..7) 1054 * Hyper-V documents upto 16, see WHvX64RegisterMsrMtrrPhys[Base|Mask](0..F). 1055 * 1056 * CPUs can in theory accomodate upto 39 pairs ([0x200,0x201]..[0x24e,0x24f]) 1057 * unless AMD/Intel decides to put something else in this range. 1058 */ 1059 #define CPUMCTX_MAX_MTRRVAR_COUNT 16 1050 1060 1051 1061 … … 1077 1087 uint64_t SpecCtrl; /**< IA32_SPEC_CTRL */ 1078 1088 uint64_t ArchCaps; /**< IA32_ARCH_CAPABILITIES */ 1089 uint64_t MtrrCap; /**< IA32_MTRR_CAP */ 1090 X86MTRRVAR aMtrrVarMsrs[CPUMCTX_MAX_MTRRVAR_COUNT]; /**< IA32_MTRR_PHYSBASE, IA32_MTRR_PHYSMASK */ 1079 1091 } msr; 1080 1092 uint64_t au64[64]; -
trunk/include/iprt/x86.h
r100837 r100935 1355 1355 /** MTRR Capabilities. */ 1356 1356 #define MSR_IA32_MTRR_CAP 0xFE 1357 /** Bits 0-7 - VCNT - Variable range registers count. */ 1358 #define MSR_IA32_MTRR_CAP_VCNT_MASK UINT64_C(0x00000000000000ff) 1359 /** Bit 8 - FIX - Fixed range registers supported. */ 1360 #define MSR_IA32_MTRR_CAP_FIX RT_BIT_64(8) 1361 /** Bit 10 - WC - Write-Combining memory type supported. */ 1362 #define MSR_IA32_MTRR_CAP_WC RT_BIT_64(10) 1363 /** Bit 11 - SMRR - System Management Range Register supported. */ 1364 #define MSR_IA32_MTRR_CAP_SMRR RT_BIT_64(11) 1365 /** Bit 12 - PRMRR - Processor Reserved Memory Range Register supported. */ 1366 #define MSR_IA32_MTRR_CAP_PRMRR RT_BIT_64(12) 1367 1368 /** 1369 * Variable-range MTRR MSR pair. 1370 */ 1371 typedef struct X86MTRRVAR 1372 { 1373 uint64_t MtrrPhysBase; /**< IA32_MTRR_PHYSBASEn */ 1374 uint64_t MtrrPhysMask; /**< IA32_MTRR_PHYSMASKn */ 1375 } X86MTRRVAR; 1376 #ifndef VBOX_FOR_DTRACE_LIB 1377 AssertCompileSize(X86MTRRVAR, 16); 1378 #endif 1379 /** Pointer to a variable-range MTRR MSR pair. */ 1380 typedef X86MTRRVAR *PX86MTRRVAR; 1381 /** Pointer to a const variable-range MTRR MSR pair. */ 1382 typedef const X86MTRRVAR *PCX86MTRRVAR; 1383 1384 /** Memory types that can be encoded in MTRRs. 1385 * @{ */ 1386 /** Uncacheable. */ 1387 #define X86_MTRR_MT_UC 0 1388 /** Write Combining. */ 1389 #define X86_MTRR_MT_WC 1 1390 /** Write-through. */ 1391 #define X86_MTRR_MT_WT 4 1392 /** Write-protected. */ 1393 #define X86_MTRR_MT_WP 5 1394 /** Writeback. */ 1395 #define X86_MTRR_MT_WB 6 1396 /* @}*/ 1357 1397 1358 1398 /** Architecture capabilities (bugfixes). */ … … 1669 1709 /** @} */ 1670 1710 1671 /** MTRR Default Range. */ 1711 /** MTRR Default Type. 1712 * @{ */ 1672 1713 #define MSR_IA32_MTRR_DEF_TYPE 0x2FF 1714 #define MSR_IA32_MTRR_DEF_TYPE_DEF_MT_MASK 0xFF 1715 #define MSR_IA32_MTRR_DEF_TYPE_FIXED_EN RT_BIT_64(10) 1716 #define MSR_IA32_MTRR_DEF_TYPE_MTRR_EN RT_BIT_64(11) 1717 #define MSR_IA32_MTRR_DEF_TYPE_VALID_MASK ( MSR_IA32_MTRR_DEF_TYPE_DEF_MT_MASK \ 1718 | MSR_IA32_MTRR_DEF_TYPE_FIXED_EN \ 1719 | MSR_IA32_MTRR_DEF_TYPE_MTRR_EN) 1720 /** @} */ 1721 1722 /** Variable-range MTRR physical mask valid. */ 1723 #define MSR_IA32_MTRR_PHYSMASK_VALID RT_BIT_64(11) 1673 1724 1674 1725 /** Global performance counter control facilities (Intel only). */ -
trunk/src/VBox/VMM/VMMAll/CPUMAllCpuId.cpp
r100854 r100935 1446 1446 pFeatures->fTsc = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_TSC); 1447 1447 pFeatures->fSysEnter = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_SEP); 1448 pFeatures->fMtrr = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_MTRR); 1448 1449 pFeatures->fHypervisorPresent = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_HVP); 1449 1450 pFeatures->fMonitorMWait = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_MONITOR); -
trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
r98103 r100935 439 439 * @param pVCpu The cross context per CPU structure. 440 440 */ 441 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32MtrrCap(PCVMCPU pVCpu) 442 { 443 RT_NOREF_PV(pVCpu); 441 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32MtrrCap(PCVMCPUCC pVCpu) 442 { 443 if (pVCpu->CTX_SUFF(pVM)->cpum.s.fMtrrRead) 444 return pVCpu->cpum.s.GuestMsrs.msr.MtrrCap; 444 445 445 446 /* This is currently a bit weird. :-) */ … … 448 449 bool const fFixedRangeRegisters = false; 449 450 bool const fWriteCombiningType = false; 451 bool const fProcRsvdRangeRegisters = false; 450 452 return cVariableRangeRegs 451 | (fFixedRangeRegisters ? RT_BIT_64(8) : 0) 452 | (fWriteCombiningType ? RT_BIT_64(10) : 0) 453 | (fSystemManagementRangeRegisters ? RT_BIT_64(11) : 0); 453 | (fFixedRangeRegisters ? MSR_IA32_MTRR_CAP_FIX : 0) 454 | (fWriteCombiningType ? MSR_IA32_MTRR_CAP_WC : 0) 455 | (fSystemManagementRangeRegisters ? MSR_IA32_MTRR_CAP_SMRR : 0) 456 | (fProcRsvdRangeRegisters ? MSR_IA32_MTRR_CAP_PRMRR : 0); 454 457 } 455 458 … … 467 470 { 468 471 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 469 /** @todo Implement variable MTRR storage. */ 470 Assert(pRange->uValue == (idMsr - 0x200) / 2); 471 *puValue = 0; 472 Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fMtrr); 473 Assert(pRange->uValue == (idMsr - MSR_IA32_MTRR_PHYSBASE0) / 2); 474 if (pVCpu->CTX_SUFF(pVM)->cpum.s.fMtrrRead) 475 { 476 AssertLogRelMsgReturn(pRange->uValue < RT_ELEMENTS(pVCpu->cpum.s.GuestMsrs.msr.aMtrrVarMsrs), 477 ("MTRR MSR (%#RX32) out-of-bounds, must be <= %#RX32\n", idMsr, CPUMCTX_MAX_MTRRVAR_COUNT), 478 VERR_CPUM_RAISE_GP_0); 479 AssertLogRelMsgReturn(!(idMsr % 2), 480 ("MTRR MSR (%#RX32) invalid, must be at even offset\n", idMsr), VERR_CPUM_RAISE_GP_0); 481 *puValue = pVCpu->cpum.s.GuestMsrs.msr.aMtrrVarMsrs[pRange->uValue].MtrrPhysBase; 482 } 483 else 484 *puValue = 0; 472 485 return VINF_SUCCESS; 473 486 } … … 480 493 * Validate the value. 481 494 */ 482 Assert(pRange->uValue == (idMsr - 0x200) / 2); 483 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(uRawValue); RT_NOREF_PV(pRange); 495 Assert(pRange->uValue == (idMsr - MSR_IA32_MTRR_PHYSBASE0) / 2); 496 RT_NOREF_PV(uRawValue); 497 Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fMtrr); 484 498 485 499 uint8_t uType = uValue & 0xff; … … 501 515 * Store it. 502 516 */ 503 /** @todo Implement variable MTRR storage. */ 517 if (pVCpu->CTX_SUFF(pVM)->cpum.s.fMtrrWrite) 518 { 519 AssertCompile(CPUMCTX_MAX_MTRRVAR_COUNT == RT_ELEMENTS(pVCpu->cpum.s.GuestMsrs.msr.aMtrrVarMsrs)); 520 AssertLogRelMsgReturn(pRange->uValue < CPUMCTX_MAX_MTRRVAR_COUNT, 521 ("MTRR MSR (%#RX32) out-of-bounds, must be <= %#RX32\n", idMsr, CPUMCTX_MAX_MTRRVAR_COUNT), 522 VERR_CPUM_RAISE_GP_0); 523 AssertLogRelMsgReturn(!(idMsr % 2), 524 ("MTRR MSR (%#RX32) invalid, must be at even offset\n", idMsr), VERR_CPUM_RAISE_GP_0); 525 pVCpu->cpum.s.GuestMsrs.msr.aMtrrVarMsrs[pRange->uValue].MtrrPhysBase = uValue; 526 /** @todo Act on the potential memory type change. */ 527 } 504 528 return VINF_SUCCESS; 505 529 } … … 509 533 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32MtrrPhysMaskN(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) 510 534 { 511 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 512 /** @todo Implement variable MTRR storage. */ 513 Assert(pRange->uValue == (idMsr - 0x200) / 2); 514 *puValue = 0; 535 RT_NOREF_PV(idMsr); 536 Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fMtrr); 537 Assert(pRange->uValue == (idMsr - MSR_IA32_MTRR_PHYSBASE0) / 2); 538 if (pVCpu->CTX_SUFF(pVM)->cpum.s.fMtrrRead) 539 { 540 AssertLogRelMsgReturn(pRange->uValue < RT_ELEMENTS(pVCpu->cpum.s.GuestMsrs.msr.aMtrrVarMsrs), 541 ("MTRR MSR (%#RX32) out-of-bounds, must be <= %#RX32\n", idMsr, CPUMCTX_MAX_MTRRVAR_COUNT), 542 VERR_CPUM_RAISE_GP_0); 543 AssertLogRelMsgReturn(idMsr % 2, 544 ("MTRR MSR (%#RX32) invalid, must be at odd offset\n", idMsr), VERR_CPUM_RAISE_GP_0); 545 *puValue = pVCpu->cpum.s.GuestMsrs.msr.aMtrrVarMsrs[pRange->uValue].MtrrPhysMask; 546 } 547 else 548 *puValue = 0; 515 549 return VINF_SUCCESS; 516 550 } … … 523 557 * Validate the value. 524 558 */ 525 Assert(pRange->uValue == (idMsr - 0x200) / 2);559 Assert(pRange->uValue == (idMsr - MSR_IA32_MTRR_PHYSBASE0) / 2); 526 560 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(uRawValue); RT_NOREF_PV(pRange); 561 Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fMtrr); 527 562 528 563 uint64_t fInvPhysMask = ~(RT_BIT_64(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.cMaxPhysAddrWidth) - 1U); … … 537 572 * Store it. 538 573 */ 539 /** @todo Implement variable MTRR storage. */ 574 if (pVCpu->CTX_SUFF(pVM)->cpum.s.fMtrrWrite) 575 { 576 AssertLogRelMsgReturn(pRange->uValue < RT_ELEMENTS(pVCpu->cpum.s.GuestMsrs.msr.aMtrrVarMsrs), 577 ("MTRR MSR (%#RX32) out-of-bounds, must be <= %#RX32\n", idMsr, CPUMCTX_MAX_MTRRVAR_COUNT), 578 VERR_CPUM_RAISE_GP_0); 579 AssertLogRelMsgReturn(idMsr % 2, 580 ("MTRR MSR (%#RX32) invalid, must be at odd offset\n", idMsr), VERR_CPUM_RAISE_GP_0); 581 pVCpu->cpum.s.GuestMsrs.msr.aMtrrVarMsrs[pRange->uValue].MtrrPhysMask = uValue; 582 /** @todo Act on the potential memory type change. */ 583 } 540 584 return VINF_SUCCESS; 541 585 } … … 547 591 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 548 592 CPUM_MSR_ASSERT_CPUMCPU_OFFSET_RETURN(pVCpu, pRange, uint64_t, puFixedMtrr); 593 Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fMtrr); 549 594 *puValue = *puFixedMtrr; 550 595 return VINF_SUCCESS; … … 557 602 CPUM_MSR_ASSERT_CPUMCPU_OFFSET_RETURN(pVCpu, pRange, uint64_t, puFixedMtrr); 558 603 RT_NOREF_PV(idMsr); RT_NOREF_PV(uRawValue); 604 Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fMtrr); 559 605 560 606 for (uint32_t cShift = 0; cShift < 63; cShift += 8) … … 577 623 { 578 624 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 625 Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fMtrr); 579 626 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType; 580 627 return VINF_SUCCESS; … … 586 633 { 587 634 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue); 588 589 uint8_t uType = uValue & 0xff; 635 Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fMtrr); 636 637 uint8_t uType = uValue & MSR_IA32_MTRR_DEF_TYPE_DEF_MT_MASK; 590 638 if ((uType >= 7) || (uType == 2) || (uType == 3)) 591 639 { -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r100532 r100935 3212 3212 * 3213 3213 * @param pVM The cross context VM structure. 3214 * @param p Ctx The context to format.3214 * @param pVCpu The cross context virtual CPU structure. 3215 3215 * @param pHlp Output functions. 3216 3216 * @param enmType The dump type. 3217 3217 * @param pszPrefix Register name prefix. 3218 3218 */ 3219 static void cpumR3InfoOne(PVM pVM, P CPUMCTX pCtx, PCDBGFINFOHLP pHlp, CPUMDUMPTYPE enmType, const char *pszPrefix)3220 { 3221 NOREF(pVM);3219 static void cpumR3InfoOne(PVM pVM, PVMCPU pVCpu, PCDBGFINFOHLP pHlp, CPUMDUMPTYPE enmType, const char *pszPrefix) 3220 { 3221 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 3222 3222 3223 3223 /* … … 3528 3528 for (unsigned i = 0; i < RT_ELEMENTS(pCtx->aPaePdpes); i++) 3529 3529 pHlp->pfnPrintf(pHlp, "%sPAE PDPTE %u =%016RX64\n", pszPrefix, i, pCtx->aPaePdpes[i]); 3530 3531 /* 3532 * MTRRs. 3533 */ 3534 if (pVM->cpum.s.GuestFeatures.fMtrr) 3535 { 3536 pHlp->pfnPrintf(pHlp, 3537 "%sMTRR_CAP =%016RX64\n" 3538 "%sMTRR_DEF_TYPE =%016RX64\n" 3539 "%sMtrrFix64K_00000 =%016RX64\n" 3540 "%sMtrrFix16K_80000 =%016RX64\n" 3541 "%sMtrrFix16K_A0000 =%016RX64\n" 3542 "%sMtrrFix4K_C0000 =%016RX64\n" 3543 "%sMtrrFix4K_C8000 =%016RX64\n" 3544 "%sMtrrFix4K_D0000 =%016RX64\n" 3545 "%sMtrrFix4K_D8000 =%016RX64\n" 3546 "%sMtrrFix4K_E0000 =%016RX64\n" 3547 "%sMtrrFix4K_E8000 =%016RX64\n" 3548 "%sMtrrFix4K_F0000 =%016RX64\n" 3549 "%sMtrrFix4K_F8000 =%016RX64\n", 3550 pszPrefix, pVCpu->cpum.s.GuestMsrs.msr.MtrrCap, 3551 pszPrefix, pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType, 3552 pszPrefix, pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000, 3553 pszPrefix, pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000, 3554 pszPrefix, pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000, 3555 pszPrefix, pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000, 3556 pszPrefix, pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000, 3557 pszPrefix, pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000, 3558 pszPrefix, pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000, 3559 pszPrefix, pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000, 3560 pszPrefix, pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000, 3561 pszPrefix, pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000, 3562 pszPrefix, pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000); 3563 3564 for (uint8_t iRange = 0; iRange < RT_ELEMENTS(pVCpu->cpum.s.GuestMsrs.msr.aMtrrVarMsrs); iRange++) 3565 { 3566 PCX86MTRRVAR pMtrrVar = &pVCpu->cpum.s.GuestMsrs.msr.aMtrrVarMsrs[iRange]; 3567 bool const fIsValid = RT_BOOL(pMtrrVar->MtrrPhysMask & MSR_IA32_MTRR_PHYSMASK_VALID); 3568 if (fIsValid) 3569 { 3570 uint64_t const fInvPhysMask = ~(RT_BIT_64(pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth) - 1U); 3571 RTGCPHYS const GCPhysMask = pMtrrVar->MtrrPhysMask & X86_PAGE_BASE_MASK; 3572 RTGCPHYS const GCPhysFirst = pMtrrVar->MtrrPhysBase & X86_PAGE_BASE_MASK; 3573 RTGCPHYS const GCPhysLast = (GCPhysFirst | ~GCPhysMask) & ~fInvPhysMask; 3574 Assert((GCPhysLast & GCPhysMask) == (GCPhysFirst & GCPhysMask)); 3575 Assert(((GCPhysLast + 1) & GCPhysMask) != (GCPhysFirst & GCPhysMask)); 3576 pHlp->pfnPrintf(pHlp, 3577 "%sMTRR_PHYSBASE[%2u] =%016RX64 First=%016RX64\n" 3578 "%sMTRR_PHYSMASK[%2u] =%016RX64 Last =%016RX64\n", 3579 pszPrefix, iRange, pMtrrVar->MtrrPhysBase, GCPhysFirst, 3580 pszPrefix, iRange, pMtrrVar->MtrrPhysMask, GCPhysLast); 3581 } 3582 else 3583 pHlp->pfnPrintf(pHlp, 3584 "%sMTRR_PHYSBASE[%2u] =%016RX64\n" 3585 "%sMTRR_PHYSMASK[%2u] =%016RX64\n", 3586 pszPrefix, iRange, pMtrrVar->MtrrPhysBase, 3587 pszPrefix, iRange, pMtrrVar->MtrrPhysMask); 3588 } 3589 } 3530 3590 break; 3531 3591 } … … 3610 3670 pHlp->pfnPrintf(pHlp, "Guest CPUM (VCPU %d) state: %s\n", pVCpu->idCpu, pszComment); 3611 3671 3612 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 3613 cpumR3InfoOne(pVM, pCtx, pHlp, enmType, ""); 3672 cpumR3InfoOne(pVM, pVCpu, pHlp, enmType, ""); 3614 3673 } 3615 3674 … … 4539 4598 } 4540 4599 } 4600 4601 /* 4602 * Initialize MTRRs. 4603 */ 4604 if (pVM->cpum.s.fMtrrRead) 4605 { 4606 uint64_t cbRam; 4607 CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRam, 0); 4608 AssertReturn(cbRam > _1M, VERR_CPUM_IPE_1); 4609 RTGCPHYS const GCPhysFirst = _1M; 4610 RTGCPHYS const GCPhysLast = cbRam - 1; 4611 RTGCPHYS const GCPhysLength = GCPhysLast - GCPhysFirst; 4612 uint64_t const fInvPhysMask = ~(RT_BIT_64(pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth) - 1U); 4613 RTGCPHYS const GCPhysMask = (~(GCPhysLength - 1) & ~fInvPhysMask) & X86_PAGE_BASE_MASK; 4614 uint64_t const uMtrrPhysMask = GCPhysMask | MSR_IA32_MTRR_PHYSMASK_VALID; 4615 #ifdef VBOX_STRICT 4616 /* Paranoia. */ 4617 Assert(GCPhysLast == ((GCPhysFirst | ~GCPhysMask) & ~fInvPhysMask)); 4618 Assert((GCPhysLast & GCPhysMask) == (GCPhysFirst & GCPhysMask)); 4619 Assert(((GCPhysLast + 1) & GCPhysMask) != (GCPhysFirst & GCPhysMask)); 4620 #endif 4621 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 4622 { 4623 PCPUMCTXMSRS pCtxMsrs = &pVM->apCpusR3[idCpu]->cpum.s.GuestMsrs; 4624 pCtxMsrs->msr.MtrrFix64K_00000 = 0x0606060606060606; 4625 pCtxMsrs->msr.MtrrFix16K_80000 = 0x0606060606060606; 4626 pCtxMsrs->msr.MtrrFix16K_A0000 = 0; 4627 pCtxMsrs->msr.MtrrFix4K_C0000 = 0x0505050505050505; 4628 pCtxMsrs->msr.MtrrFix4K_C8000 = 0x0505050505050505; 4629 pCtxMsrs->msr.MtrrFix4K_D0000 = 0x0505050505050505; 4630 pCtxMsrs->msr.MtrrFix4K_D8000 = 0x0505050505050505; 4631 pCtxMsrs->msr.MtrrFix4K_E0000 = 0x0505050505050505; 4632 pCtxMsrs->msr.MtrrFix4K_E8000 = 0x0505050505050505; 4633 pCtxMsrs->msr.MtrrFix4K_F0000 = 0x0505050505050505; 4634 pCtxMsrs->msr.MtrrFix4K_F8000 = 0x0505050505050505; 4635 pCtxMsrs->msr.aMtrrVarMsrs[0].MtrrPhysBase = GCPhysFirst | X86_MTRR_MT_WB; 4636 pCtxMsrs->msr.aMtrrVarMsrs[0].MtrrPhysMask = uMtrrPhysMask; 4637 } 4638 LogRel(("CPUM: Initialized MTRRs (MtrrPhysMask=%RGp GCPhysLast=%RGp)\n", uMtrrPhysMask, GCPhysLast)); 4639 } 4541 4640 break; 4542 4641 } -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r100854 r100935 3231 3231 3232 3232 /* 3233 * MTRR support. 3234 * Currently we are exposing MTRRs with reasonable default values just to get Nested Hyper-V 3235 * going, it isn't feature complete, see @bugref{10318} and bugref{10498}. 3236 */ 3237 if (pVM->cpum.s.GuestFeatures.fMtrr) 3238 { 3239 /* Check if MTRR read+write support is enabled. */ 3240 bool fEnableMtrrWrite; 3241 rc = CFGMR3QueryBoolDef(pCpumCfg, "MTRRWrite", &fEnableMtrrWrite, false); 3242 AssertRCReturn(rc, rc); 3243 if (fEnableMtrrWrite) 3244 { 3245 pVM->cpum.s.fMtrrRead = true; 3246 pVM->cpum.s.fMtrrWrite = true; 3247 LogRel(("CPUM: Enabled MTRR read-write support\n")); 3248 } 3249 else 3250 { 3251 /* Check if MTRR read-only reporting is enabled. */ 3252 rc = CFGMR3QueryBoolDef(pCpumCfg, "MTRR", &pVM->cpum.s.fMtrrRead, false); 3253 AssertRCReturn(rc, rc); 3254 LogRel(("CPUM: Enabled MTRR read-only support\n")); 3255 } 3256 3257 /* Setup MTRR capability based on what the host supports. */ 3258 Assert(!pVM->cpum.s.fMtrrWrite || pVM->cpum.s.fMtrrRead); 3259 if (pVM->cpum.s.fMtrrRead) 3260 { 3261 Assert(pVM->cpum.s.HostFeatures.fMtrr); 3262 3263 /* Lookup the number of variable-range MTRRs supported on the host. */ 3264 PCCPUMMSRRANGE pMtrrCapRange = cpumLookupMsrRange(pVM, MSR_IA32_MTRR_CAP); 3265 AssertLogRelReturn(pMtrrCapRange, VERR_CPUM_IPE_2); 3266 uint8_t const cHostVarRangeRegs = pMtrrCapRange->uValue & MSR_IA32_MTRR_CAP_VCNT_MASK; 3267 3268 /* Construct guest MTRR support capabilities. */ 3269 uint8_t const cGuestVarRangeRegs = RT_MIN(cHostVarRangeRegs, CPUMCTX_MAX_MTRRVAR_COUNT); 3270 uint64_t const uGstMtrrCap = cGuestVarRangeRegs 3271 | MSR_IA32_MTRR_CAP_FIX; 3272 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 3273 { 3274 PVMCPU pVCpu = pVM->apCpusR3[idCpu]; 3275 pVCpu->cpum.s.GuestMsrs.msr.MtrrCap = uGstMtrrCap; 3276 pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType = MSR_IA32_MTRR_DEF_TYPE_FIXED_EN 3277 | MSR_IA32_MTRR_DEF_TYPE_MTRR_EN 3278 | X86_MTRR_MT_UC; 3279 } 3280 LogRel(("CPUM: Enabled fixed-range MTRRs and %u variable-range MTRRs\n", cGuestVarRangeRegs)); 3281 } 3282 } 3283 3284 /* 3233 3285 * Finally, initialize guest VMX MSRs. 3234 3286 * -
trunk/src/VBox/VMM/include/CPUMInternal.h
r99163 r100935 366 366 * This is used to verify load order dependencies (PGM). */ 367 367 bool fPendingRestore; 368 uint8_t abPadding0[2]; 368 /** Whether MTRR reads report valid memory types for memory regions. */ 369 bool fMtrrRead; 370 /** Whether the guest's writes to MTRRs are implemented. */ 371 bool fMtrrWrite; 369 372 370 373 /** XSAVE/XRTOR components we can expose to the guest mask. */ -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r98103 r100935 86 86 .u8PortableCpuIdLevel resb 1 87 87 .fPendingRestore resb 1 88 .fMtrrRead resb 1 89 .fMtrrWrite resb 1 88 90 89 91 alignb 8
Note:
See TracChangeset
for help on using the changeset viewer.

