Changeset 45442 in vbox
- Timestamp:
- Apr 9, 2013 5:34:38 PM (11 years ago)
- File:
-
- 1 edited
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp (modified) (24 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r45419 r45442 1591 1591 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */ 1592 1592 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0); 1593 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic); 1593 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic); 1594 AssertRCReturn(rc, rc); 1594 1595 1595 1596 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */ … … 1977 1978 RTCCUINTREG uReg = ASMGetCR0(); 1978 1979 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg); 1979 AssertRCReturn(rc, rc);1980 1980 1981 1981 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL … … 1984 1984 { 1985 1985 uint64_t uReg = hmR0Get64bitCR3(); 1986 rc = VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uReg);1986 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uReg); 1987 1987 } 1988 1988 else … … 1990 1990 { 1991 1991 uReg = ASMGetCR3(); 1992 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg); 1993 } 1994 AssertRCReturn(rc, rc); 1992 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg); 1993 } 1995 1994 1996 1995 uReg = ASMGetCR4(); 1997 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);1996 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg); 1998 1997 AssertRCReturn(rc, rc); 1999 1998 return rc; … … 2223 2222 2224 2223 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, idxHostMsr); 2225 AssertRCReturn(rc, rc);2226 2224 2227 2225 /* 2228 2226 * Host Sysenter MSRs. 2229 2227 */ 2230 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));2228 rc |= VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)); 2231 2229 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 2232 2230 if (VMX_IS_64BIT_HOST_MODE()) … … 2591 2589 rc |= hmR0VmxLoadGuestRsp(pVM, pVCpu, pCtx); 2592 2590 rc |= hmR0VmxLoadGuestRflags(pVM, pVCpu, pCtx); 2593 AssertRCReturn(rc, rc);2594 2591 return rc; 2595 2592 } … … 2725 2722 2726 2723 /* Write VT-x's view of the guest CR0 into the VMCS and update the exception bitmap. */ 2727 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR0, u64GuestCR0);2724 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR0, u64GuestCR0); 2728 2725 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap); 2729 2726 Log2(("VMX_VMCS_GUEST_CR0=%#RX32\n", (uint32_t)u64GuestCR0)); … … 3002 2999 /* Save the host and load the guest debug registers. This will make the guest debug state active. */ 3003 3000 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */); 3004 AssertRC Return(rc,rc);3001 AssertRC(rc); 3005 3002 Assert(CPUMIsGuestDebugStateActive(pVCpu)); 3006 3003 Assert(fInterceptMovDRx == false); … … 3012 3009 /* Save the host and load the hypervisor debug registers. This will make the hyper debug state active. */ 3013 3010 rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pCtx, true /* include DR6 */); 3014 AssertRC Return(rc,rc);3011 AssertRC(rc); 3015 3012 Assert(CPUMIsHyperDebugStateActive(pVCpu)); 3016 3013 fInterceptMovDRx = true; … … 4787 4784 rc |= hmR0VmxSaveGuestRsp(pVM, pVCpu, pMixedCtx); 4788 4785 rc |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx); 4789 AssertRCReturn(rc, rc);4790 4786 return rc; 4791 4787 } … … 5006 5002 /* Guest CR0. Guest FPU. */ 5007 5003 rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx); 5008 AssertRCReturn(rc, rc);5009 5004 5010 5005 /* Guest CR4. */ 5011 rc = hmR0VmxSaveGuestCR4(pVM, pVCpu, pMixedCtx);5006 rc |= hmR0VmxSaveGuestCR4(pVM, pVCpu, pMixedCtx); 5012 5007 AssertRCReturn(rc, rc); 5013 5008 … … 5019 5014 { 5020 5015 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &uVal); 5021 AssertRCReturn(rc, rc);5022 5016 if (pMixedCtx->cr3 != uVal) 5023 5017 { … … 5028 5022 5029 5023 /* We require EFER to check PAE mode. */ 5030 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVM, pVCpu, pMixedCtx); 5031 AssertRCReturn(rc, rc); 5024 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVM, pVCpu, pMixedCtx); 5032 5025 5033 5026 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */ 5034 5027 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR. */ 5035 5028 { 5036 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);5029 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); 5037 5030 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); 5038 5031 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); 5039 5032 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); 5040 AssertRCReturn(rc, rc);5041 5033 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */ 5042 5034 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES); 5043 5035 } 5036 AssertRCReturn(rc, rc); 5044 5037 } 5045 5038 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_CR3; … … 5066 5059 { 5067 5060 uint32_t u32Val = 0; 5068 int rc = VMXReadVmcs32(idxSel, &u32Val); AssertRCReturn(rc, rc);5061 int rc = VMXReadVmcs32(idxSel, &u32Val); 5069 5062 pSelReg->Sel = (uint16_t)u32Val; 5070 5063 pSelReg->ValidSel = (uint16_t)u32Val; 5071 5064 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID; 5072 5065 5073 rc = VMXReadVmcs32(idxLimit, &u32Val); AssertRCReturn(rc, rc);5066 rc |= VMXReadVmcs32(idxLimit, &u32Val); 5074 5067 pSelReg->u32Limit = u32Val; 5075 5068 5076 5069 RTGCUINTREG uGCVal = 0; 5077 rc = VMXReadVmcsGstN(idxBase, &uGCVal); AssertRCReturn(rc, rc);5070 rc |= VMXReadVmcsGstN(idxBase, &uGCVal); 5078 5071 pSelReg->u64Base = uGCVal; 5079 5072 5080 rc = VMXReadVmcs32(idxAccess, &u32Val); AssertRCReturn(rc, rc);5073 rc |= VMXReadVmcs32(idxAccess, &u32Val); 5081 5074 pSelReg->Attr.u = u32Val; 5075 AssertRCReturn(rc, rc); 5082 5076 5083 5077 /* … … 5115 5109 { 5116 5110 rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx); 5117 AssertRCReturn(rc, rc); 5118 5119 rc = hmR0VmxReadSegmentReg(VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE, 5111 5112 rc |= hmR0VmxReadSegmentReg(VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE, 5120 5113 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs); 5121 5114 rc |= hmR0VmxReadSegmentReg(VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE, … … 5180 5173 { 5181 5174 rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx); 5182 AssertRCReturn(rc, rc);5183 5175 5184 5176 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't sync the fake one. */ 5185 5177 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 5186 5178 { 5187 rc = hmR0VmxReadSegmentReg(VMX_VMCS16_GUEST_FIELD_TR, VMX_VMCS32_GUEST_TR_LIMIT, VMX_VMCS_GUEST_TR_BASE,5179 rc |= hmR0VmxReadSegmentReg(VMX_VMCS16_GUEST_FIELD_TR, VMX_VMCS32_GUEST_TR_LIMIT, VMX_VMCS_GUEST_TR_BASE, 5188 5180 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &pMixedCtx->tr); 5189 AssertRCReturn(rc, rc);5190 }5181 } 5182 AssertRCReturn(rc, rc); 5191 5183 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_TR; 5192 5184 } … … 5347 5339 { 5348 5340 rc = PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); 5349 AssertRC Return(rc,rc);5341 AssertRC(rc); 5350 5342 } 5351 5343 … … 5884 5876 * @param uValue The value to push to the guest stack. 5885 5877 */ 5886 static inthmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)5878 DECLINLINE(int) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue) 5887 5879 { 5888 5880 /* … … 6003 5995 pMixedCtx->cs.Sel = selIdtEntry; 6004 5996 pMixedCtx->cs.u64Base = selIdtEntry << cbIdtEntry; 6005 6006 5997 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS 6007 5998 | HM_CHANGED_GUEST_RIP 6008 5999 | HM_CHANGED_GUEST_RFLAGS 6009 6000 | HM_CHANGED_GUEST_RSP; 6010 AssertRCReturn(rc, rc);6011 6001 } 6012 6002 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET); … … 6035 6025 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr); 6036 6026 AssertRCReturn(rc, rc); 6037 6038 6027 return rc; 6039 6028 } … … 7198 7187 { 7199 7188 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 7200 int rc = hmR0VmxInjectXcptUD(pVM, pVCpu, pMixedCtx); 7201 AssertRCReturn(rc, rc); 7202 return rc; 7189 return hmR0VmxInjectXcptUD(pVM, pVCpu, pMixedCtx); 7203 7190 } 7204 7191 … … 7972 7959 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */ 7973 7960 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */); 7974 AssertRC Return(rc,rc);7961 AssertRC(rc); 7975 7962 Assert(CPUMIsGuestDebugStateActive(pVCpu)); 7976 7963 … … 8045 8032 RTGCPHYS GCPhys = 0; 8046 8033 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys); 8047 AssertRCReturn(rc, rc);8048 8034 8049 8035 #if 0 … … 8051 8037 #else 8052 8038 /* Aggressive state sync. for now. */ 8053 rc = hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx);8039 rc |= hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx); 8054 8040 rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx); 8055 8041 rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
Note:
See TracChangeset
for help on using the changeset viewer.

