Changeset 76040 in vbox
- Timestamp:
- Dec 7, 2018 7:01:21 AM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r76002 r76040 99 99 100 100 /** Gets the guest-physical address of the shadows VMCS for the given VCPU. */ 101 # define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu)((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)101 # define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs) 102 102 103 103 /** Whether a shadow VMCS is present for the given VCPU. */ 104 # define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu)RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)104 # define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS) 105 105 106 106 /** Gets the VMXON region pointer. */ 107 # define IEM_VMX_GET_VMXON_PTR(a_pVCpu)((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)107 # define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon) 108 108 109 109 /** Gets the guest-physical address of the current VMCS for the given VCPU. */ 110 # define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu)((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)110 # define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs) 111 111 112 112 /** Whether a current VMCS is present for the given VCPU. */ 113 # define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu)RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)113 # define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS) 114 114 115 115 /** Assigns the guest-physical address of the current VMCS for the given VCPU. */ 116 # define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \116 # define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \ 117 117 do \ 118 118 { \ … … 122 122 123 123 /** Clears any current VMCS for the given VCPU. */ 124 # define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \124 # define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \ 125 125 do \ 126 126 { \ … … 130 130 /** Check for VMX instructions requiring to be in VMX operation. 131 131 * @note Any changes here, check if IEMOP_HLP_IN_VMX_OPERATION needs updating. */ 132 # define IEM_VMX_IN_VMX_OPERATION(a_pVCpu, a_szInstr, a_InsDiagPrefix) \132 # define IEM_VMX_IN_VMX_OPERATION(a_pVCpu, a_szInstr, a_InsDiagPrefix) \ 133 133 do \ 134 134 { \ … … 144 144 145 145 /** Marks a VM-entry failure with a diagnostic reason, logs and returns. */ 146 # define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_VmxDiag) \146 # define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_VmxDiag) \ 147 147 do \ 148 148 { \ … … 154 154 155 155 /** Marks a VM-exit failure with a diagnostic reason, logs and returns. */ 156 # define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \156 # define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \ 157 157 do \ 158 158 { \ … … 162 162 return VERR_VMX_VMEXIT_FAILED; \ 163 163 } while (0) 164 165 /** Enables/disables IEM-only EM execution policy in and from ring-3. */ 166 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3) 167 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix) \ 168 do { \ 169 Log(("%s: Enabling IEM-only EM execution policy!\n", (a_pszLogPrefix))); \ 170 return EMR3SetExecutionPolicy((a_pVCpu)->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true); \ 171 } while (0) 172 173 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE(a_pVCpu, a_pszLogPrefix) \ 174 do { \ 175 Log(("%s: Disabling IEM-only EM execution policy!\n", (a_pszLogPrefix))); \ 176 EMR3SetExecutionPolicy((a_pVCpu)->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false); \ 177 } while (0) 178 # else 179 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix) do { return VINF_SUCCESS; } while (0) 180 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE(a_pVCpu, a_pszLogPrefix) do { } while (0) 181 # endif 164 182 165 183 … … 2809 2827 IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason) 2810 2828 { 2829 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3) 2830 RT_NOREF2(pVCpu, uExitReason); 2831 return VINF_EM_RAW_EMULATE_INSTR; 2832 # else 2811 2833 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 2812 2834 Assert(pVmcs); … … 2830 2852 { /* likely */ } 2831 2853 else 2854 { 2855 IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE(pVCpu, "VMX-Abort"); 2832 2856 return iemVmxAbort(pVCpu, VMXABORT_SAVE_GUEST_MSRS); 2857 } 2833 2858 } 2834 2859 else … … 2853 2878 2854 2879 Assert(rcStrict == VINF_SUCCESS); 2880 IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE(pVCpu, "VM-exit"); 2855 2881 return VINF_VMX_VMEXIT; 2882 # endif 2856 2883 } 2857 2884 … … 7133 7160 IEM_STATIC VBOXSTRICTRC iemVmxVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId, PCVMXVEXITINFO pExitInfo) 7134 7161 { 7162 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3) 7163 RT_NOREF4(pVCpu, cbInstr, uInstrId, pExitInfo); 7164 return VINF_EM_RAW_EMULATE_INSTR; 7165 # else 7135 7166 Assert( uInstrId == VMXINSTRID_VMLAUNCH 7136 7167 || uInstrId == VMXINSTRID_VMRESUME); … … 7332 7363 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS); 7333 7364 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 7334 return VINF_SUCCESS; 7365 IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(pVCpu, pszInstr); 7366 # endif 7335 7367 } 7336 7368 … … 7895 7927 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPtrVmcs + RT_UOFFSETOF(VMXVVMCS, fVmcsState), 7896 7928 (const void *)&fVmcsStateClear, sizeof(fVmcsStateClear)); 7929 if (RT_FAILURE(rcStrict)) 7930 return rcStrict; 7897 7931 } 7898 7932 7899 7933 iemVmxVmSucceed(pVCpu); 7900 7934 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 7901 return rcStrict;7935 return VINF_SUCCESS; 7902 7936 } 7903 7937 … … 8108 8142 PCVMXVEXITINFO pExitInfo) 8109 8143 { 8110 #if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)8111 RT_NOREF5(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);8112 return VINF_EM_RAW_EMULATE_INSTR;8113 #else8114 8144 if (!IEM_VMX_IS_ROOT_MODE(pVCpu)) 8115 8145 { … … 8267 8297 iemVmxVmSucceed(pVCpu); 8268 8298 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 8269 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)8270 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);8271 # else8272 8299 return VINF_SUCCESS; 8273 # endif8274 8300 } 8275 8301 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu)) … … 8296 8322 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 8297 8323 return VINF_SUCCESS; 8298 #endif8299 8324 } 8300 8325 … … 8308 8333 IEM_CIMPL_DEF_0(iemCImpl_vmxoff) 8309 8334 { 8310 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)8311 RT_NOREF2(pVCpu, cbInstr);8312 return VINF_EM_RAW_EMULATE_INSTR;8313 # else8314 8335 /* Nested-guest intercept. */ 8315 8336 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu)) … … 8345 8366 iemVmxVmSucceed(pVCpu); 8346 8367 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 8347 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)8348 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);8349 # else8350 8368 return VINF_SUCCESS; 8351 # endif8352 # endif8353 8369 } 8354 8370
Note:
See TracChangeset
for help on using the changeset viewer.

