Changeset 92515 in vbox
- Timestamp:
- Nov 19, 2021 8:42:45 PM (3 years ago)
- Location:
- trunk
- Files:
-
- 3 edited
-
include/VBox/vmm/cpum.h (modified) (1 diff)
-
src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp (modified) (2 diffs)
-
src/VBox/VMM/VMMR3/NEMR3Native-linux.cpp (modified) (7 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r92018 r92515 2705 2705 VMMR3DECL(int) CPUMR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF pNewLeaf); 2706 2706 VMMR3DECL(int) CPUMR3CpuIdGetLeaf(PVM pVM, PCPUMCPUIDLEAF pLeaf, uint32_t uLeaf, uint32_t uSubLeaf); 2707 VMMR3_INT_DECL(PCCPUMCPUIDLEAF) CPUMR3CpuIdGetPtr(PVM pVM, uint32_t *pcLeaves); 2707 2708 VMMR3DECL(CPUMMICROARCH) CPUMR3CpuIdDetermineMicroarchEx(CPUMCPUVENDOR enmVendor, uint8_t bFamily, 2708 2709 uint8_t bModel, uint8_t bStepping); -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r92449 r92515 1238 1238 1239 1239 /** 1240 * Gets all the leaves. 1241 * 1242 * This only works after the CPUID leaves have been initialized. The interface 1243 * is intended for NEM and configuring CPUID leaves for the native hypervisor. 1244 * 1245 * @returns Pointer to the array of leaves. NULL on failure. 1246 * @param pVM The cross context VM structure. 1247 * @param pcLeaves Where to return the number of leaves. 1248 */ 1249 VMMR3_INT_DECL(PCCPUMCPUIDLEAF) CPUMR3CpuIdGetPtr(PVM pVM, uint32_t *pcLeaves) 1250 { 1251 *pcLeaves = pVM->cpum.s.GuestInfo.cCpuIdLeaves; 1252 return pVM->cpum.s.GuestInfo.paCpuIdLeavesR3; 1253 } 1254 1255 1256 /** 1240 1257 * Inserts a CPU ID leaf, replacing any existing ones. 1241 1258 * … … 1269 1286 return cpumR3CpuIdInsert(pVM, NULL /* ppaLeaves */, NULL /* pcLeaves */, pNewLeaf); 1270 1287 } 1288 1271 1289 1272 1290 /** -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-linux.cpp
r92508 r92515 30 30 #include <VBox/vmm/vmcc.h> 31 31 32 #include <iprt/alloca.h> 32 33 #include <iprt/string.h> 33 34 #include <iprt/system.h> … … 603 604 604 605 606 /** 607 * Update the CPUID leaves for a VCPU. 608 * 609 * The KVM_SET_CPUID2 call replaces any previous leaves, so we have to redo 610 * everything when there really just are single bit changes. 611 */ 612 static int nemR3LnxUpdateCpuIdsLeaves(PVM pVM, PVMCPU pVCpu) 613 { 614 uint32_t cLeaves = 0; 615 PCCPUMCPUIDLEAF const paLeaves = CPUMR3CpuIdGetPtr(pVM, &cLeaves); 616 struct kvm_cpuid2 *pReq = (struct kvm_cpuid2 *)alloca(RT_UOFFSETOF_DYN(struct kvm_cpuid2, entries[cLeaves + 2])); 617 618 pReq->nent = cLeaves; 619 pReq->padding = 0; 620 621 for (uint32_t i = 0; i < cLeaves; i++) 622 { 623 CPUMGetGuestCpuId(pVCpu, paLeaves[i].uLeaf, paLeaves[i].uSubLeaf, 624 &pReq->entries[i].eax, 625 &pReq->entries[i].ebx, 626 &pReq->entries[i].ecx, 627 &pReq->entries[i].edx); 628 pReq->entries[i].function = paLeaves[i].uLeaf; 629 pReq->entries[i].index = paLeaves[i].uSubLeaf; 630 pReq->entries[i].flags = !paLeaves[i].fSubLeafMask ? 0 : KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 631 pReq->entries[i].padding[0] = 0; 632 pReq->entries[i].padding[1] = 0; 633 pReq->entries[i].padding[2] = 0; 634 } 635 636 int rcLnx = ioctl(pVCpu->nem.s.fdVCpu, KVM_SET_CPUID2, pReq); 637 AssertLogRelMsgReturn(rcLnx == 0, ("rcLnx=%d errno=%d cLeaves=%#x\n", rcLnx, errno, cLeaves), RTErrConvertFromErrno(errno)); 638 639 return VINF_SUCCESS; 640 } 641 642 605 643 int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat) 606 644 { 607 RT_NOREF(pVM, enmWhat); 645 /* 646 * Configure CPUIDs after ring-3 init has been done. 647 */ 648 if (enmWhat == VMINITCOMPLETED_RING3) 649 { 650 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 651 { 652 int rc = nemR3LnxUpdateCpuIdsLeaves(pVM, pVM->apCpusR3[idCpu]); 653 AssertRCReturn(rc, rc); 654 } 655 } 656 608 657 return VINF_SUCCESS; 609 658 } … … 1221 1270 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE)) 1222 1271 { 1223 /* Partial state is annoying as we have to do merging - is this possible at all? */ 1224 struct kvm_xsave XSave; 1225 int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_XSAVE, &XSave); 1272 fWhat |= CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE; /* we do all or nothing at all */ 1273 1274 AssertCompile(sizeof(pCtx->XState) >= sizeof(struct kvm_xsave)); 1275 int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_XSAVE, &pCtx->XState); 1226 1276 AssertMsgReturn(rc == 0, ("rc=%d errno=%d\n", rc, errno), VERR_NEM_IPE_3); 1227 1228 if (fWhat & CPUMCTX_EXTRN_X87)1229 memcpy(&pCtx->XState.x87, &XSave, sizeof(pCtx->XState.x87));1230 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)1231 {1232 /** @todo */1233 }1234 if (fWhat & CPUMCTX_EXTRN_OTHER_XSAVE)1235 {1236 /** @todo */1237 }1238 1277 } 1239 1278 … … 1612 1651 if (fExtrn & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE)) 1613 1652 { 1614 if ( (fExtrn & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE)) 1615 != (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE)) 1616 { 1617 /* Partial state is annoying as we have to do merging - is this possible at all? */ 1618 struct kvm_xsave XSave; 1619 int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_XSAVE, &XSave); 1620 AssertMsgReturn(rc == 0, ("rc=%d errno=%d\n", rc, errno), VERR_NEM_IPE_3); 1621 1622 if (fExtrn & CPUMCTX_EXTRN_X87) 1623 memcpy(&pCtx->XState.x87, &XSave, sizeof(pCtx->XState.x87)); 1624 if (fExtrn & CPUMCTX_EXTRN_SSE_AVX) 1625 { 1626 /** @todo */ 1627 } 1628 if (fExtrn & CPUMCTX_EXTRN_OTHER_XSAVE) 1629 { 1630 /** @todo */ 1631 } 1632 } 1633 1653 /** @todo could IEM just grab state partial control in some situations? */ 1654 Assert( (fExtrn & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE)) 1655 == (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE)); /* no partial states */ 1656 1657 AssertCompile(sizeof(pCtx->XState) >= sizeof(struct kvm_xsave)); 1634 1658 int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_SET_XSAVE, &pCtx->XState); 1635 1659 AssertMsgReturn(rc == 0, ("rc=%d errno=%d\n", rc, errno), VERR_NEM_IPE_3); … … 2129 2153 2130 2154 case KVM_EXIT_IRQ_WINDOW_OPEN: 2155 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, KVM_EXIT_IRQ_WINDOW_OPEN), 2156 pRun->s.regs.regs.rip + pRun->s.regs.sregs.cs.base, ASMReadTSC()); 2131 2157 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitIrqWindowOpen); 2132 2158 Log5(("IrqWinOpen/%u: %d\n", pVCpu->idCpu, pRun->request_interrupt_window)); … … 2155 2181 2156 2182 case KVM_EXIT_HLT: 2183 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, KVM_EXIT_HLT), 2184 pRun->s.regs.regs.rip + pRun->s.regs.sregs.cs.base, ASMReadTSC()); 2157 2185 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt); 2158 2186 Log5(("Halt/%u\n", pVCpu->idCpu)); … … 2160 2188 2161 2189 case KVM_EXIT_INTR: /* EINTR */ 2190 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, KVM_EXIT_INTR), 2191 pRun->s.regs.regs.rip + pRun->s.regs.sregs.cs.base, ASMReadTSC()); 2162 2192 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitIntr); 2163 2193 Log5(("Intr/%u\n", pVCpu->idCpu));
Note:
See TracChangeset
for help on using the changeset viewer.

