[65989] | 1 | /* $Id: HMSVMAll.cpp 103194 2024-02-05 07:23:40Z vboxsync $ */
|
---|
| 2 | /** @file
|
---|
| 3 | * HM SVM (AMD-V) - All contexts.
|
---|
| 4 | */
|
---|
| 5 |
|
---|
| 6 | /*
|
---|
[98103] | 7 | * Copyright (C) 2017-2023 Oracle and/or its affiliates.
|
---|
[65989] | 8 | *
|
---|
[96407] | 9 | * This file is part of VirtualBox base platform packages, as
|
---|
| 10 | * available from https://www.virtualbox.org.
|
---|
| 11 | *
|
---|
| 12 | * This program is free software; you can redistribute it and/or
|
---|
| 13 | * modify it under the terms of the GNU General Public License
|
---|
| 14 | * as published by the Free Software Foundation, in version 3 of the
|
---|
| 15 | * License.
|
---|
| 16 | *
|
---|
| 17 | * This program is distributed in the hope that it will be useful, but
|
---|
| 18 | * WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
| 19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
| 20 | * General Public License for more details.
|
---|
| 21 | *
|
---|
| 22 | * You should have received a copy of the GNU General Public License
|
---|
| 23 | * along with this program; if not, see <https://www.gnu.org/licenses>.
|
---|
| 24 | *
|
---|
| 25 | * SPDX-License-Identifier: GPL-3.0-only
|
---|
[65989] | 26 | */
|
---|
| 27 |
|
---|
| 28 |
|
---|
| 29 | /*********************************************************************************************************************************
|
---|
| 30 | * Header Files *
|
---|
| 31 | *********************************************************************************************************************************/
|
---|
| 32 | #define LOG_GROUP LOG_GROUP_HM
|
---|
[66318] | 33 | #define VMCPU_INCL_CPUM_GST_CTX
|
---|
[65989] | 34 | #include "HMInternal.h"
|
---|
| 35 | #include <VBox/vmm/apic.h>
|
---|
| 36 | #include <VBox/vmm/gim.h>
|
---|
[66227] | 37 | #include <VBox/vmm/iem.h>
|
---|
[80268] | 38 | #include <VBox/vmm/vmcc.h>
|
---|
[65989] | 39 |
|
---|
[76402] | 40 | #include <VBox/err.h>
|
---|
[93725] | 41 | #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
|
---|
| 42 | # include <iprt/asm-amd64-x86.h> /* ASMCpuId */
|
---|
| 43 | #endif
|
---|
[65989] | 44 |
|
---|
[76402] | 45 |
|
---|
[72462] | 46 |
|
---|
[65989] | 47 | /**
|
---|
[70457] | 48 | * Emulates a simple MOV TPR (CR8) instruction.
|
---|
[65989] | 49 | *
|
---|
[70457] | 50 | * Used for TPR patching on 32-bit guests. This simply looks up the patch record
|
---|
| 51 | * at EIP and does the required.
|
---|
| 52 | *
|
---|
[65989] | 53 | * This VMMCALL is used a fallback mechanism when mov to/from cr8 isn't exactly
|
---|
| 54 | * like how we want it to be (e.g. not followed by shr 4 as is usually done for
|
---|
| 55 | * TPR). See hmR3ReplaceTprInstr() for the details.
|
---|
| 56 | *
|
---|
| 57 | * @returns VBox status code.
|
---|
[72462] | 58 | * @retval VINF_SUCCESS if the access was handled successfully, RIP + RFLAGS updated.
|
---|
[65989] | 59 | * @retval VERR_NOT_FOUND if no patch record for this RIP could be found.
|
---|
| 60 | * @retval VERR_SVM_UNEXPECTED_PATCH_TYPE if the found patch type is invalid.
|
---|
| 61 | *
|
---|
[80268] | 62 | * @param pVM The cross context VM structure.
|
---|
[65989] | 63 | * @param pVCpu The cross context virtual CPU structure.
|
---|
| 64 | */
|
---|
[80268] | 65 | VMM_INT_DECL(int) hmEmulateSvmMovTpr(PVMCC pVM, PVMCPUCC pVCpu)
|
---|
[65989] | 66 | {
|
---|
[72967] | 67 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
[65989] | 68 | Log4(("Emulated VMMCall TPR access replacement at RIP=%RGv\n", pCtx->rip));
|
---|
| 69 |
|
---|
[97209] | 70 | AssertCompile(DISGREG_EAX == X86_GREG_xAX);
|
---|
| 71 | AssertCompile(DISGREG_ECX == X86_GREG_xCX);
|
---|
| 72 | AssertCompile(DISGREG_EDX == X86_GREG_xDX);
|
---|
| 73 | AssertCompile(DISGREG_EBX == X86_GREG_xBX);
|
---|
| 74 | AssertCompile(DISGREG_ESP == X86_GREG_xSP);
|
---|
| 75 | AssertCompile(DISGREG_EBP == X86_GREG_xBP);
|
---|
| 76 | AssertCompile(DISGREG_ESI == X86_GREG_xSI);
|
---|
| 77 | AssertCompile(DISGREG_EDI == X86_GREG_xDI);
|
---|
| 78 | AssertCompile(DISGREG_R8D == X86_GREG_x8);
|
---|
| 79 | AssertCompile(DISGREG_R9D == X86_GREG_x9);
|
---|
| 80 | AssertCompile(DISGREG_R10D == X86_GREG_x10);
|
---|
| 81 | AssertCompile(DISGREG_R11D == X86_GREG_x11);
|
---|
| 82 | AssertCompile(DISGREG_R12D == X86_GREG_x12);
|
---|
| 83 | AssertCompile(DISGREG_R13D == X86_GREG_x13);
|
---|
| 84 | AssertCompile(DISGREG_R14D == X86_GREG_x14);
|
---|
| 85 | AssertCompile(DISGREG_R15D == X86_GREG_x15);
|
---|
| 86 |
|
---|
[65989] | 87 | /*
|
---|
| 88 | * We do this in a loop as we increment the RIP after a successful emulation
|
---|
| 89 | * and the new RIP may be a patched instruction which needs emulation as well.
|
---|
| 90 | */
|
---|
[72462] | 91 | bool fPatchFound = false;
|
---|
[65989] | 92 | for (;;)
|
---|
| 93 | {
|
---|
| 94 | PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
|
---|
| 95 | if (!pPatch)
|
---|
| 96 | break;
|
---|
[72462] | 97 | fPatchFound = true;
|
---|
[65989] | 98 |
|
---|
[72462] | 99 | uint8_t u8Tpr;
|
---|
[65989] | 100 | switch (pPatch->enmType)
|
---|
| 101 | {
|
---|
| 102 | case HMTPRINSTR_READ:
|
---|
| 103 | {
|
---|
[72462] | 104 | bool fPending;
|
---|
| 105 | int rc = APICGetTpr(pVCpu, &u8Tpr, &fPending, NULL /* pu8PendingIrq */);
|
---|
[65989] | 106 | AssertRC(rc);
|
---|
| 107 |
|
---|
[97209] | 108 | uint8_t idxReg = pPatch->uDstOperand;
|
---|
| 109 | AssertStmt(idxReg < RT_ELEMENTS(pCtx->aGRegs), idxReg = RT_ELEMENTS(pCtx->aGRegs) - 1);
|
---|
| 110 | pCtx->aGRegs[idxReg].u64 = u8Tpr;
|
---|
[65989] | 111 | pCtx->rip += pPatch->cbOp;
|
---|
| 112 | pCtx->eflags.Bits.u1RF = 0;
|
---|
| 113 | break;
|
---|
| 114 | }
|
---|
| 115 |
|
---|
| 116 | case HMTPRINSTR_WRITE_REG:
|
---|
| 117 | case HMTPRINSTR_WRITE_IMM:
|
---|
| 118 | {
|
---|
| 119 | if (pPatch->enmType == HMTPRINSTR_WRITE_REG)
|
---|
| 120 | {
|
---|
[97492] | 121 | uint8_t idxReg = pPatch->uSrcOperand;
|
---|
[97209] | 122 | AssertStmt(idxReg < RT_ELEMENTS(pCtx->aGRegs), idxReg = RT_ELEMENTS(pCtx->aGRegs) - 1);
|
---|
| 123 | u8Tpr = pCtx->aGRegs[idxReg].u8;
|
---|
[65989] | 124 | }
|
---|
| 125 | else
|
---|
| 126 | u8Tpr = (uint8_t)pPatch->uSrcOperand;
|
---|
| 127 |
|
---|
| 128 | int rc2 = APICSetTpr(pVCpu, u8Tpr);
|
---|
| 129 | AssertRC(rc2);
|
---|
| 130 | pCtx->rip += pPatch->cbOp;
|
---|
| 131 | pCtx->eflags.Bits.u1RF = 0;
|
---|
[72963] | 132 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR
|
---|
| 133 | | HM_CHANGED_GUEST_RIP
|
---|
| 134 | | HM_CHANGED_GUEST_RFLAGS);
|
---|
[65989] | 135 | break;
|
---|
| 136 | }
|
---|
| 137 |
|
---|
| 138 | default:
|
---|
| 139 | {
|
---|
| 140 | AssertMsgFailed(("Unexpected patch type %d\n", pPatch->enmType));
|
---|
| 141 | pVCpu->hm.s.u32HMError = pPatch->enmType;
|
---|
| 142 | return VERR_SVM_UNEXPECTED_PATCH_TYPE;
|
---|
| 143 | }
|
---|
| 144 | }
|
---|
| 145 | }
|
---|
| 146 |
|
---|
[72462] | 147 | return fPatchFound ? VINF_SUCCESS : VERR_NOT_FOUND;
|
---|
[65989] | 148 | }
|
---|
[70462] | 149 |
|
---|
[80161] | 150 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
[70462] | 151 | /**
|
---|
| 152 | * Notification callback for when a \#VMEXIT happens outside SVM R0 code (e.g.
|
---|
| 153 | * in IEM).
|
---|
| 154 | *
|
---|
[71933] | 155 | * @param pVCpu The cross context virtual CPU structure.
|
---|
| 156 | * @param pCtx Pointer to the guest-CPU context.
|
---|
[70462] | 157 | *
|
---|
| 158 | * @sa hmR0SvmVmRunCacheVmcb.
|
---|
| 159 | */
|
---|
[80268] | 160 | VMM_INT_DECL(void) HMNotifySvmNstGstVmexit(PVMCPUCC pVCpu, PCPUMCTX pCtx)
|
---|
[70462] | 161 | {
|
---|
[72643] | 162 | PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
|
---|
| 163 | if (pVmcbNstGstCache->fCacheValid)
|
---|
[70462] | 164 | {
|
---|
[71415] | 165 | /*
|
---|
[71933] | 166 | * Restore fields as our own code might look at the VMCB controls as part
|
---|
[71966] | 167 | * of the #VMEXIT handling in IEM. Otherwise, strictly speaking we don't need to
|
---|
| 168 | * restore these fields because currently none of them are written back to memory
|
---|
| 169 | * by a physical CPU on #VMEXIT.
|
---|
[71415] | 170 | */
|
---|
[91287] | 171 | PSVMVMCBCTRL pVmcbNstGstCtrl = &pCtx->hwvirt.svm.Vmcb.ctrl;
|
---|
[71933] | 172 | pVmcbNstGstCtrl->u16InterceptRdCRx = pVmcbNstGstCache->u16InterceptRdCRx;
|
---|
| 173 | pVmcbNstGstCtrl->u16InterceptWrCRx = pVmcbNstGstCache->u16InterceptWrCRx;
|
---|
| 174 | pVmcbNstGstCtrl->u16InterceptRdDRx = pVmcbNstGstCache->u16InterceptRdDRx;
|
---|
| 175 | pVmcbNstGstCtrl->u16InterceptWrDRx = pVmcbNstGstCache->u16InterceptWrDRx;
|
---|
| 176 | pVmcbNstGstCtrl->u16PauseFilterThreshold = pVmcbNstGstCache->u16PauseFilterThreshold;
|
---|
| 177 | pVmcbNstGstCtrl->u16PauseFilterCount = pVmcbNstGstCache->u16PauseFilterCount;
|
---|
| 178 | pVmcbNstGstCtrl->u32InterceptXcpt = pVmcbNstGstCache->u32InterceptXcpt;
|
---|
| 179 | pVmcbNstGstCtrl->u64InterceptCtrl = pVmcbNstGstCache->u64InterceptCtrl;
|
---|
| 180 | pVmcbNstGstCtrl->u64TSCOffset = pVmcbNstGstCache->u64TSCOffset;
|
---|
| 181 | pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = pVmcbNstGstCache->fVIntrMasking;
|
---|
| 182 | pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging = pVmcbNstGstCache->fNestedPaging;
|
---|
| 183 | pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pVmcbNstGstCache->fLbrVirt;
|
---|
[72643] | 184 | pVmcbNstGstCache->fCacheValid = false;
|
---|
[70462] | 185 | }
|
---|
[70700] | 186 |
|
---|
| 187 | /*
|
---|
[72744] | 188 | * Transitions to ring-3 flag a full CPU-state change except if we transition to ring-3
|
---|
| 189 | * in response to a physical CPU interrupt as no changes to the guest-CPU state are
|
---|
| 190 | * expected (see VINF_EM_RAW_INTERRUPT handling in hmR0SvmExitToRing3).
|
---|
[71755] | 191 | *
|
---|
[72744] | 192 | * However, with nested-guests, the state -can- change on trips to ring-3 for we might
|
---|
| 193 | * try to inject a nested-guest physical interrupt and cause a SVM_EXIT_INTR #VMEXIT for
|
---|
[73140] | 194 | * the nested-guest from ring-3. Import the complete state here as we will be swapping
|
---|
| 195 | * to the guest VMCB after the #VMEXIT.
|
---|
[70700] | 196 | */
|
---|
[73140] | 197 | CPUMImportGuestStateOnDemand(pVCpu, CPUMCTX_EXTRN_ALL);
|
---|
[79118] | 198 | CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_ALL);
|
---|
[72744] | 199 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
|
---|
[70462] | 200 | }
|
---|
[80161] | 201 | #endif
|
---|
[70462] | 202 |
|
---|
| 203 | /**
|
---|
| 204 | * Checks if the Virtual GIF (Global Interrupt Flag) feature is supported and
|
---|
| 205 | * enabled for the VM.
|
---|
| 206 | *
|
---|
| 207 | * @returns @c true if VGIF is enabled, @c false otherwise.
|
---|
| 208 | * @param pVM The cross context VM structure.
|
---|
[70463] | 209 | *
|
---|
| 210 | * @remarks This value returned by this functions is expected by the callers not
|
---|
| 211 | * to change throughout the lifetime of the VM.
|
---|
[70462] | 212 | */
|
---|
[87511] | 213 | VMM_INT_DECL(bool) HMIsSvmVGifActive(PCVMCC pVM)
|
---|
[70462] | 214 | {
|
---|
[87511] | 215 | #ifdef IN_RING0
|
---|
[87538] | 216 | bool const fVGif = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VGIF);
|
---|
[87511] | 217 | #else
|
---|
[87563] | 218 | bool const fVGif = RT_BOOL(pVM->hm.s.ForR3.svm.fFeatures & X86_CPUID_SVM_FEATURE_EDX_VGIF);
|
---|
[87511] | 219 | #endif
|
---|
| 220 | return fVGif && pVM->hm.s.svm.fVGif;
|
---|
[70462] | 221 | }
|
---|
[70782] | 222 |
|
---|
| 223 |
|
---|
| 224 | /**
|
---|
[72462] | 225 | * Interface used by IEM to handle patched TPR accesses.
|
---|
[65989] | 226 | *
|
---|
[72462] | 227 | * @returns VBox status code
|
---|
| 228 | * @retval VINF_SUCCESS if hypercall was handled, RIP + RFLAGS all dealt with.
|
---|
| 229 | * @retval VERR_NOT_FOUND if hypercall was _not_ handled.
|
---|
| 230 | * @retval VERR_SVM_UNEXPECTED_PATCH_TYPE on IPE.
|
---|
[65989] | 231 | *
|
---|
[80268] | 232 | * @param pVM The cross context VM structure.
|
---|
[65989] | 233 | * @param pVCpu The cross context virtual CPU structure.
|
---|
| 234 | */
|
---|
[80268] | 235 | VMM_INT_DECL(int) HMHCMaybeMovTprSvmHypercall(PVMCC pVM, PVMCPUCC pVCpu)
|
---|
[65989] | 236 | {
|
---|
| 237 | if (pVM->hm.s.fTprPatchingAllowed)
|
---|
| 238 | {
|
---|
[80268] | 239 | int rc = hmEmulateSvmMovTpr(pVM, pVCpu);
|
---|
[65989] | 240 | if (RT_SUCCESS(rc))
|
---|
| 241 | return VINF_SUCCESS;
|
---|
[72462] | 242 | return rc;
|
---|
[65989] | 243 | }
|
---|
[72462] | 244 | return VERR_NOT_FOUND;
|
---|
[65989] | 245 | }
|
---|
| 246 |
|
---|
[73606] | 247 |
|
---|
[93725] | 248 | #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
|
---|
[73606] | 249 | /**
|
---|
| 250 | * Checks if the current AMD CPU is subject to erratum 170 "In SVM mode,
|
---|
| 251 | * incorrect code bytes may be fetched after a world-switch".
|
---|
| 252 | *
|
---|
| 253 | * @param pu32Family Where to store the CPU family (can be NULL).
|
---|
| 254 | * @param pu32Model Where to store the CPU model (can be NULL).
|
---|
| 255 | * @param pu32Stepping Where to store the CPU stepping (can be NULL).
|
---|
| 256 | * @returns true if the erratum applies, false otherwise.
|
---|
| 257 | */
|
---|
[76993] | 258 | VMM_INT_DECL(int) HMIsSubjectToSvmErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping)
|
---|
[73606] | 259 | {
|
---|
| 260 | /*
|
---|
| 261 | * Erratum 170 which requires a forced TLB flush for each world switch:
|
---|
| 262 | * See AMD spec. "Revision Guide for AMD NPT Family 0Fh Processors".
|
---|
| 263 | *
|
---|
| 264 | * All BH-G1/2 and DH-G1/2 models include a fix:
|
---|
| 265 | * Athlon X2: 0x6b 1/2
|
---|
| 266 | * 0x68 1/2
|
---|
| 267 | * Athlon 64: 0x7f 1
|
---|
| 268 | * 0x6f 2
|
---|
| 269 | * Sempron: 0x7f 1/2
|
---|
| 270 | * 0x6f 2
|
---|
| 271 | * 0x6c 2
|
---|
| 272 | * 0x7c 2
|
---|
| 273 | * Turion 64: 0x68 2
|
---|
| 274 | */
|
---|
| 275 | uint32_t u32Dummy;
|
---|
| 276 | uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
|
---|
| 277 | ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
|
---|
| 278 | u32BaseFamily = (u32Version >> 8) & 0xf;
|
---|
| 279 | u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
|
---|
| 280 | u32Model = ((u32Version >> 4) & 0xf);
|
---|
| 281 | u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
|
---|
| 282 | u32Stepping = u32Version & 0xf;
|
---|
| 283 |
|
---|
| 284 | bool fErratumApplies = false;
|
---|
| 285 | if ( u32Family == 0xf
|
---|
| 286 | && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
|
---|
| 287 | && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
|
---|
| 288 | fErratumApplies = true;
|
---|
| 289 |
|
---|
| 290 | if (pu32Family)
|
---|
| 291 | *pu32Family = u32Family;
|
---|
| 292 | if (pu32Model)
|
---|
| 293 | *pu32Model = u32Model;
|
---|
| 294 | if (pu32Stepping)
|
---|
| 295 | *pu32Stepping = u32Stepping;
|
---|
| 296 |
|
---|
| 297 | return fErratumApplies;
|
---|
| 298 | }
|
---|
[93725] | 299 | #endif
|
---|
[73606] | 300 |
|
---|
[66000] | 301 |
|
---|
[66581] | 302 | /**
|
---|
| 303 | * Converts an SVM event type to a TRPM event type.
|
---|
| 304 | *
|
---|
| 305 | * @returns The TRPM event type.
|
---|
| 306 | * @retval TRPM_32BIT_HACK if the specified type of event isn't among the set
|
---|
| 307 | * of recognized trap types.
|
---|
| 308 | *
|
---|
| 309 | * @param pEvent Pointer to the SVM event.
|
---|
[77902] | 310 | * @param uVector The vector associated with the event.
|
---|
[66581] | 311 | */
|
---|
[77902] | 312 | VMM_INT_DECL(TRPMEVENT) HMSvmEventToTrpmEventType(PCSVMEVENT pEvent, uint8_t uVector)
|
---|
[66581] | 313 | {
|
---|
| 314 | uint8_t const uType = pEvent->n.u3Type;
|
---|
| 315 | switch (uType)
|
---|
| 316 | {
|
---|
| 317 | case SVM_EVENT_EXTERNAL_IRQ: return TRPM_HARDWARE_INT;
|
---|
| 318 | case SVM_EVENT_SOFTWARE_INT: return TRPM_SOFTWARE_INT;
|
---|
[103194] | 319 | case SVM_EVENT_NMI: return TRPM_NMI;
|
---|
[66581] | 320 | case SVM_EVENT_EXCEPTION:
|
---|
[77902] | 321 | {
|
---|
| 322 | if ( uVector == X86_XCPT_BP
|
---|
| 323 | || uVector == X86_XCPT_OF)
|
---|
| 324 | return TRPM_SOFTWARE_INT;
|
---|
| 325 | return TRPM_TRAP;
|
---|
| 326 | }
|
---|
[66581] | 327 | default:
|
---|
| 328 | break;
|
---|
| 329 | }
|
---|
| 330 | AssertMsgFailed(("HMSvmEventToTrpmEvent: Invalid pending-event type %#x\n", uType));
|
---|
| 331 | return TRPM_32BIT_HACK;
|
---|
| 332 | }
|
---|
| 333 |
|
---|
| 334 |
|
---|
[66000] | 335 | /**
|
---|
[78869] | 336 | * Gets the SVM nested-guest control intercepts if cached by HM.
|
---|
[72643] | 337 | *
|
---|
[78869] | 338 | * @returns @c true on success, @c false otherwise.
|
---|
| 339 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
| 340 | * EMT.
|
---|
| 341 | * @param pu64Intercepts Where to store the control intercepts. Only updated when
|
---|
| 342 | * @c true is returned.
|
---|
[72643] | 343 | */
|
---|
[78869] | 344 | VMM_INT_DECL(bool) HMGetGuestSvmCtrlIntercepts(PCVMCPU pVCpu, uint64_t *pu64Intercepts)
|
---|
[72643] | 345 | {
|
---|
[78869] | 346 | Assert(pu64Intercepts);
|
---|
[72643] | 347 | PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
|
---|
[78869] | 348 | if (pVmcbNstGstCache->fCacheValid)
|
---|
| 349 | {
|
---|
| 350 | *pu64Intercepts = pVmcbNstGstCache->u64InterceptCtrl;
|
---|
| 351 | return true;
|
---|
| 352 | }
|
---|
| 353 | return false;
|
---|
[72643] | 354 | }
|
---|
| 355 |
|
---|
| 356 |
|
---|
| 357 | /**
|
---|
[78869] | 358 | * Gets the SVM nested-guest CRx-read intercepts if cached by HM.
|
---|
[70000] | 359 | *
|
---|
[78869] | 360 | * @returns @c true on success, @c false otherwise.
|
---|
| 361 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
| 362 | * EMT.
|
---|
| 363 | * @param pu16Intercepts Where to store the CRx-read intercepts. Only updated
|
---|
| 364 | * when @c true is returned.
|
---|
[70000] | 365 | */
|
---|
[78869] | 366 | VMM_INT_DECL(bool) HMGetGuestSvmReadCRxIntercepts(PCVMCPU pVCpu, uint16_t *pu16Intercepts)
|
---|
[70000] | 367 | {
|
---|
[78869] | 368 | Assert(pu16Intercepts);
|
---|
[70000] | 369 | PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
|
---|
[78869] | 370 | if (pVmcbNstGstCache->fCacheValid)
|
---|
| 371 | {
|
---|
| 372 | *pu16Intercepts = pVmcbNstGstCache->u16InterceptRdCRx;
|
---|
| 373 | return true;
|
---|
| 374 | }
|
---|
| 375 | return false;
|
---|
[70000] | 376 | }
|
---|
| 377 |
|
---|
| 378 |
|
---|
| 379 | /**
|
---|
[78869] | 380 | * Gets the SVM nested-guest CRx-write intercepts if cached by HM.
|
---|
[70000] | 381 | *
|
---|
[78869] | 382 | * @returns @c true on success, @c false otherwise.
|
---|
| 383 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
| 384 | * EMT.
|
---|
| 385 | * @param pu16Intercepts Where to store the CRx-write intercepts. Only updated
|
---|
| 386 | * when @c true is returned.
|
---|
[70000] | 387 | */
|
---|
[78869] | 388 | VMM_INT_DECL(bool) HMGetGuestSvmWriteCRxIntercepts(PCVMCPU pVCpu, uint16_t *pu16Intercepts)
|
---|
[70000] | 389 | {
|
---|
[78869] | 390 | Assert(pu16Intercepts);
|
---|
[70000] | 391 | PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
|
---|
[78869] | 392 | if (pVmcbNstGstCache->fCacheValid)
|
---|
| 393 | {
|
---|
| 394 | *pu16Intercepts = pVmcbNstGstCache->u16InterceptWrCRx;
|
---|
| 395 | return true;
|
---|
| 396 | }
|
---|
| 397 | return false;
|
---|
[70000] | 398 | }
|
---|
| 399 |
|
---|
| 400 |
|
---|
| 401 | /**
|
---|
[78869] | 402 | * Gets the SVM nested-guest DRx-read intercepts if cached by HM.
|
---|
[70000] | 403 | *
|
---|
[78869] | 404 | * @returns @c true on success, @c false otherwise.
|
---|
| 405 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
| 406 | * EMT.
|
---|
| 407 | * @param pu16Intercepts Where to store the DRx-read intercepts. Only updated
|
---|
| 408 | * when @c true is returned.
|
---|
[70000] | 409 | */
|
---|
[78869] | 410 | VMM_INT_DECL(bool) HMGetGuestSvmReadDRxIntercepts(PCVMCPU pVCpu, uint16_t *pu16Intercepts)
|
---|
[70000] | 411 | {
|
---|
[78869] | 412 | Assert(pu16Intercepts);
|
---|
[70000] | 413 | PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
|
---|
[78869] | 414 | if (pVmcbNstGstCache->fCacheValid)
|
---|
| 415 | {
|
---|
| 416 | *pu16Intercepts = pVmcbNstGstCache->u16InterceptRdDRx;
|
---|
| 417 | return true;
|
---|
| 418 | }
|
---|
| 419 | return false;
|
---|
[70000] | 420 | }
|
---|
| 421 |
|
---|
| 422 |
|
---|
| 423 | /**
|
---|
[78869] | 424 | * Gets the SVM nested-guest DRx-write intercepts if cached by HM.
|
---|
[70000] | 425 | *
|
---|
[78869] | 426 | * @returns @c true on success, @c false otherwise.
|
---|
| 427 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
| 428 | * EMT.
|
---|
| 429 | * @param pu16Intercepts Where to store the DRx-write intercepts. Only updated
|
---|
| 430 | * when @c true is returned.
|
---|
[70000] | 431 | */
|
---|
[78869] | 432 | VMM_INT_DECL(bool) HMGetGuestSvmWriteDRxIntercepts(PCVMCPU pVCpu, uint16_t *pu16Intercepts)
|
---|
[70000] | 433 | {
|
---|
[78869] | 434 | Assert(pu16Intercepts);
|
---|
[70000] | 435 | PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
|
---|
[78869] | 436 | if (pVmcbNstGstCache->fCacheValid)
|
---|
| 437 | {
|
---|
| 438 | *pu16Intercepts = pVmcbNstGstCache->u16InterceptWrDRx;
|
---|
| 439 | return true;
|
---|
| 440 | }
|
---|
| 441 | return false;
|
---|
[70000] | 442 | }
|
---|
| 443 |
|
---|
| 444 |
|
---|
| 445 | /**
|
---|
[78869] | 446 | * Gets the SVM nested-guest exception intercepts if cached by HM.
|
---|
[70000] | 447 | *
|
---|
[78869] | 448 | * @returns @c true on success, @c false otherwise.
|
---|
| 449 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
| 450 | * EMT.
|
---|
| 451 | * @param pu32Intercepts Where to store the exception intercepts. Only updated
|
---|
| 452 | * when @c true is returned.
|
---|
[70000] | 453 | */
|
---|
[78869] | 454 | VMM_INT_DECL(bool) HMGetGuestSvmXcptIntercepts(PCVMCPU pVCpu, uint32_t *pu32Intercepts)
|
---|
[70000] | 455 | {
|
---|
[78869] | 456 | Assert(pu32Intercepts);
|
---|
[70000] | 457 | PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
|
---|
[78869] | 458 | if (pVmcbNstGstCache->fCacheValid)
|
---|
| 459 | {
|
---|
| 460 | *pu32Intercepts = pVmcbNstGstCache->u32InterceptXcpt;
|
---|
| 461 | return true;
|
---|
| 462 | }
|
---|
| 463 | return false;
|
---|
[70000] | 464 | }
|
---|
| 465 |
|
---|
| 466 |
|
---|
| 467 | /**
|
---|
[78869] | 468 | * Checks if the nested-guest VMCB has virtual-interrupts masking enabled.
|
---|
[70000] | 469 | *
|
---|
[78869] | 470 | * @returns @c true on success, @c false otherwise.
|
---|
| 471 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
| 472 | * EMT.
|
---|
| 473 | * @param pfVIntrMasking Where to store the virtual-interrupt masking bit.
|
---|
| 474 | * Updated only when @c true is returned.
|
---|
[70000] | 475 | */
|
---|
[78869] | 476 | VMM_INT_DECL(bool) HMGetGuestSvmVirtIntrMasking(PCVMCPU pVCpu, bool *pfVIntrMasking)
|
---|
[70000] | 477 | {
|
---|
[78869] | 478 | Assert(pfVIntrMasking);
|
---|
[70000] | 479 | PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
|
---|
[78869] | 480 | if (pVmcbNstGstCache->fCacheValid)
|
---|
| 481 | {
|
---|
| 482 | *pfVIntrMasking = pVmcbNstGstCache->fVIntrMasking;
|
---|
| 483 | return true;
|
---|
| 484 | }
|
---|
| 485 | return false;
|
---|
[70000] | 486 | }
|
---|
| 487 |
|
---|
| 488 |
|
---|
| 489 | /**
|
---|
[78869] | 490 | * Gets the SVM nested-guest nested-paging bit if cached by HM.
|
---|
[71640] | 491 | *
|
---|
[78869] | 492 | * @returns @c true on success, @c false otherwise.
|
---|
| 493 | * @param pVCpu The cross context virtual CPU structure of the
|
---|
| 494 | * calling EMT.
|
---|
[78871] | 495 | * @param pfNestedPaging Where to store the nested-paging bit. Updated only
|
---|
[78869] | 496 | * when @c true is returned.
|
---|
[71640] | 497 | */
|
---|
[78869] | 498 | VMM_INT_DECL(bool) HMGetGuestSvmNestedPaging(PCVMCPU pVCpu, bool *pfNestedPaging)
|
---|
[71640] | 499 | {
|
---|
[78869] | 500 | Assert(pfNestedPaging);
|
---|
[71640] | 501 | PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
|
---|
[78869] | 502 | if (pVmcbNstGstCache->fCacheValid)
|
---|
| 503 | {
|
---|
| 504 | *pfNestedPaging = pVmcbNstGstCache->fNestedPaging;
|
---|
| 505 | return true;
|
---|
| 506 | }
|
---|
| 507 | return false;
|
---|
[71640] | 508 | }
|
---|
| 509 |
|
---|
| 510 |
|
---|
| 511 | /**
|
---|
[78869] | 512 | * Returns the nested-guest VMCB pause-filter count.
|
---|
[71755] | 513 | *
|
---|
[78869] | 514 | * @returns @c true on success, @c false otherwise.
|
---|
| 515 | * @param pVCpu The cross context virtual CPU structure of the
|
---|
| 516 | * calling EMT.
|
---|
| 517 | * @param pu16PauseFilterCount Where to store the pause-filter count. Only
|
---|
| 518 | * updated @c true is returned.
|
---|
[71755] | 519 | */
|
---|
[78869] | 520 | VMM_INT_DECL(bool) HMGetGuestSvmPauseFilterCount(PCVMCPU pVCpu, uint16_t *pu16PauseFilterCount)
|
---|
[71755] | 521 | {
|
---|
[78869] | 522 | Assert(pu16PauseFilterCount);
|
---|
[71755] | 523 | PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
|
---|
[78869] | 524 | if (pVmcbNstGstCache->fCacheValid)
|
---|
| 525 | {
|
---|
| 526 | *pu16PauseFilterCount = pVmcbNstGstCache->u16PauseFilterCount;
|
---|
| 527 | return true;
|
---|
| 528 | }
|
---|
| 529 | return false;
|
---|
[71755] | 530 | }
|
---|
| 531 |
|
---|
| 532 |
|
---|
| 533 | /**
|
---|
[78869] | 534 | * Returns the SVM nested-guest TSC offset if cached by HM.
|
---|
[71833] | 535 | *
|
---|
[78869] | 536 | * @returns The TSC offset after applying any nested-guest TSC offset.
|
---|
| 537 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
| 538 | * EMT.
|
---|
| 539 | * @param pu64TscOffset Where to store the TSC offset. Only updated when @c
|
---|
| 540 | * true is returned.
|
---|
[71833] | 541 | */
|
---|
[78869] | 542 | VMM_INT_DECL(bool) HMGetGuestSvmTscOffset(PCVMCPU pVCpu, uint64_t *pu64TscOffset)
|
---|
[71833] | 543 | {
|
---|
[78869] | 544 | Assert(pu64TscOffset);
|
---|
[71833] | 545 | PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
|
---|
[78869] | 546 | if (pVmcbNstGstCache->fCacheValid)
|
---|
| 547 | {
|
---|
| 548 | *pu64TscOffset = pVmcbNstGstCache->u64TSCOffset;
|
---|
| 549 | return true;
|
---|
| 550 | }
|
---|
| 551 | return false;
|
---|
[71833] | 552 | }
|
---|
| 553 |
|
---|