Changeset 55815 in vbox
- Timestamp:
- May 12, 2015 9:22:50 AM (9 years ago)
- File:
-
- 1 edited
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp (modified) (11 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r55757 r55815 843 843 { 844 844 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER); 845 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);845 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER); 846 846 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER); 847 847 … … 1262 1262 * auto-load/store MSR area in the VMCS. 1263 1263 * 1264 * @returns true if the MSR was added -and- its value was updated, false 1265 * otherwise. 1266 * @param pVCpu Pointer to the VMCPU. 1267 * @param uMsr The MSR. 1268 * @param uGuestMsr Value of the guest MSR. 1269 * @param fUpdateHostMsr Whether to update the value of the host MSR if 1270 * necessary. 1271 */ 1272 static bool hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, bool fUpdateHostMsr) 1264 * @returns VBox status code. 1265 * @param pVCpu Pointer to the VMCPU. 1266 * @param uMsr The MSR. 1267 * @param uGuestMsr Value of the guest MSR. 1268 * @param fUpdateHostMsr Whether to update the value of the host MSR if 1269 * necessary. 1270 * @param pfAddedAndUpdated Where to store whether the MSR was added -and- 1271 * its value was updated. Optional, can be NULL. 1272 */ 1273 static int hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, bool fUpdateHostMsr, 1274 bool *pfAddedAndUpdated) 1273 1275 { 1274 1276 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; … … 1287 1289 ++cMsrs; 1288 1290 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs); 1289 Assert RC(rc);1291 AssertMsgRCReturn(rc, ("hmR0VmxAddAutoLoadStoreMsr: Insufficient space to add MSR %u\n", uMsr), rc); 1290 1292 1291 1293 /* Now that we're swapping MSRs during the world-switch, allow the guest to read/write them without causing VM-exits. */ … … 1320 1322 } 1321 1323 1322 return fUpdatedMsrValue; 1324 if (pfAddedAndUpdated) 1325 *pfAddedAndUpdated = fUpdatedMsrValue; 1326 return VINF_SUCCESS; 1323 1327 } 1324 1328 … … 4776 4780 if (pVM->hm.s.fAllow64BitGuests) 4777 4781 { 4778 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false /* fUpdateHostMsr */); 4779 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false /* fUpdateHostMsr */); 4780 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pMixedCtx->msrSFMASK, false /* fUpdateHostMsr */); 4781 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false /* fUpdateHostMsr */); 4782 # ifdef DEBUG 4782 int rc = VINF_SUCCESS; 4783 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false, NULL); 4784 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false, NULL); 4785 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pMixedCtx->msrSFMASK, false, NULL); 4786 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false, NULL); 4787 AssertRCReturn(rc, rc); 4788 #ifdef DEBUG 4783 4789 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 4784 4790 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++) … … 4832 4838 else 4833 4839 { 4834 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */); 4840 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */, 4841 NULL /* pfAddedAndUpdated */); 4842 AssertRCReturn(rc, rc); 4843 4835 4844 /* We need to intercept reads too, see @bugref{7386} comment #16. */ 4836 4845 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE); … … 6791 6800 * We assume all general purpose registers other than RSP are available. 6792 6801 * 6793 * RIP is a must as it will be incremented or otherwise changed.6802 * RIP is a must, as it will be incremented or otherwise changed. 6794 6803 * 6795 6804 * RFLAGS are always required to figure the CPL. 6796 6805 * 6797 * RSP isn't always required, however it's a GPR so frequently required.6806 * RSP isn't always required, however it's a GPR, so frequently required. 6798 6807 * 6799 6808 * SS and CS are the only segment register needed if IEM doesn't do memory … … 6819 6828 6820 6829 /** 6821 * Ensures that we've got a complete basic context.6830 * Ensures that we've got a complete basic guest-context. 6822 6831 * 6823 6832 * This excludes the FPU, SSE, AVX, and similar extended state. The interface … … 7099 7108 /* 7100 7109 * !!! IMPORTANT !!! 7101 * If you modify code here, make sure tocheck whether hmR0VmxCallRing3Callback() needs to be updated too.7110 * If you modify code here, check whether hmR0VmxCallRing3Callback() needs to be updated too. 7102 7111 */ 7103 7112 … … 7364 7373 /* 7365 7374 * !!! IMPORTANT !!! 7366 * If you modify code here, make sure to check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs7367 * to be updated too. This is a stripped down version which gets out ASAP trying to not trigger any assertion.7375 * If you modify code here, check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs to be updated too. 7376 * This is a stripped down version which gets out ASAP, trying to not trigger any further assertions. 7368 7377 */ 7369 7378 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; \ … … 8787 8796 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT)) 8788 8797 { 8798 bool fMsrUpdated; 8789 8799 int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 8790 8800 AssertRC(rc2); 8791 8801 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS)); 8792 bool fMsrUpdated = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMR0GetGuestTscAux(pVCpu), 8793 true /* fUpdateHostMsr */); 8802 8803 rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMR0GetGuestTscAux(pVCpu), true /* fUpdateHostMsr */, 8804 &fMsrUpdated); 8805 AssertRC(rc2); 8794 8806 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs); 8807 8795 8808 /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */ 8796 8809 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
Note:
See TracChangeset
for help on using the changeset viewer.

