VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp@ 72065

Last change on this file since 72065 was 72065, checked in by vboxsync, 7 years ago

VMM/SVM: Interrupt injection fixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 21.4 KB
Line 
1/* $Id: HMSVMAll.cpp 72065 2018-04-30 06:27:34Z vboxsync $ */
2/** @file
3 * HM SVM (AMD-V) - All contexts.
4 */
5
6/*
7 * Copyright (C) 2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include "HMInternal.h"
25#include <VBox/vmm/apic.h>
26#include <VBox/vmm/gim.h>
27#include <VBox/vmm/hm.h>
28#include <VBox/vmm/iem.h>
29#include <VBox/vmm/vm.h>
30#include <VBox/vmm/hm_svm.h>
31
32
33#ifndef IN_RC
34/**
35 * Emulates a simple MOV TPR (CR8) instruction.
36 *
37 * Used for TPR patching on 32-bit guests. This simply looks up the patch record
38 * at EIP and does the required.
39 *
40 * This VMMCALL is used a fallback mechanism when mov to/from cr8 isn't exactly
41 * like how we want it to be (e.g. not followed by shr 4 as is usually done for
42 * TPR). See hmR3ReplaceTprInstr() for the details.
43 *
44 * @returns VBox status code.
45 * @retval VINF_SUCCESS if the access was handled successfully.
46 * @retval VERR_NOT_FOUND if no patch record for this RIP could be found.
47 * @retval VERR_SVM_UNEXPECTED_PATCH_TYPE if the found patch type is invalid.
48 *
49 * @param pVCpu The cross context virtual CPU structure.
50 * @param pCtx Pointer to the guest-CPU context.
51 * @param pfUpdateRipAndRF Whether the guest RIP/EIP has been updated as
52 * part of the TPR patch operation.
53 */
54static int hmSvmEmulateMovTpr(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfUpdateRipAndRF)
55{
56 Log4(("Emulated VMMCall TPR access replacement at RIP=%RGv\n", pCtx->rip));
57
58 /*
59 * We do this in a loop as we increment the RIP after a successful emulation
60 * and the new RIP may be a patched instruction which needs emulation as well.
61 */
62 bool fUpdateRipAndRF = false;
63 bool fPatchFound = false;
64 PVM pVM = pVCpu->CTX_SUFF(pVM);
65 for (;;)
66 {
67 bool fPending;
68 uint8_t u8Tpr;
69
70 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
71 if (!pPatch)
72 break;
73
74 fPatchFound = true;
75 switch (pPatch->enmType)
76 {
77 case HMTPRINSTR_READ:
78 {
79 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPending, NULL /* pu8PendingIrq */);
80 AssertRC(rc);
81
82 rc = DISWriteReg32(CPUMCTX2CORE(pCtx), pPatch->uDstOperand, u8Tpr);
83 AssertRC(rc);
84 pCtx->rip += pPatch->cbOp;
85 pCtx->eflags.Bits.u1RF = 0;
86 fUpdateRipAndRF = true;
87 break;
88 }
89
90 case HMTPRINSTR_WRITE_REG:
91 case HMTPRINSTR_WRITE_IMM:
92 {
93 if (pPatch->enmType == HMTPRINSTR_WRITE_REG)
94 {
95 uint32_t u32Val;
96 int rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &u32Val);
97 AssertRC(rc);
98 u8Tpr = u32Val;
99 }
100 else
101 u8Tpr = (uint8_t)pPatch->uSrcOperand;
102
103 int rc2 = APICSetTpr(pVCpu, u8Tpr);
104 AssertRC(rc2);
105 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
106
107 pCtx->rip += pPatch->cbOp;
108 pCtx->eflags.Bits.u1RF = 0;
109 fUpdateRipAndRF = true;
110 break;
111 }
112
113 default:
114 {
115 AssertMsgFailed(("Unexpected patch type %d\n", pPatch->enmType));
116 pVCpu->hm.s.u32HMError = pPatch->enmType;
117 *pfUpdateRipAndRF = fUpdateRipAndRF;
118 return VERR_SVM_UNEXPECTED_PATCH_TYPE;
119 }
120 }
121 }
122
123 *pfUpdateRipAndRF = fUpdateRipAndRF;
124 if (fPatchFound)
125 return VINF_SUCCESS;
126 return VERR_NOT_FOUND;
127}
128
129
130/**
131 * Notification callback for when a \#VMEXIT happens outside SVM R0 code (e.g.
132 * in IEM).
133 *
134 * @param pVCpu The cross context virtual CPU structure.
135 * @param pCtx Pointer to the guest-CPU context.
136 *
137 * @sa hmR0SvmVmRunCacheVmcb.
138 */
139VMM_INT_DECL(void) HMSvmNstGstVmExitNotify(PVMCPU pVCpu, PCPUMCTX pCtx)
140{
141 if (pCtx->hwvirt.svm.fHMCachedVmcb)
142 {
143 PSVMVMCBCTRL pVmcbNstGstCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
144 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
145
146 /*
147 * Restore fields as our own code might look at the VMCB controls as part
148 * of the #VMEXIT handling in IEM. Otherwise, strictly speaking we don't need to
149 * restore these fields because currently none of them are written back to memory
150 * by a physical CPU on #VMEXIT.
151 */
152 pVmcbNstGstCtrl->u16InterceptRdCRx = pVmcbNstGstCache->u16InterceptRdCRx;
153 pVmcbNstGstCtrl->u16InterceptWrCRx = pVmcbNstGstCache->u16InterceptWrCRx;
154 pVmcbNstGstCtrl->u16InterceptRdDRx = pVmcbNstGstCache->u16InterceptRdDRx;
155 pVmcbNstGstCtrl->u16InterceptWrDRx = pVmcbNstGstCache->u16InterceptWrDRx;
156 pVmcbNstGstCtrl->u16PauseFilterThreshold = pVmcbNstGstCache->u16PauseFilterThreshold;
157 pVmcbNstGstCtrl->u16PauseFilterCount = pVmcbNstGstCache->u16PauseFilterCount;
158 pVmcbNstGstCtrl->u32InterceptXcpt = pVmcbNstGstCache->u32InterceptXcpt;
159 pVmcbNstGstCtrl->u64InterceptCtrl = pVmcbNstGstCache->u64InterceptCtrl;
160 pVmcbNstGstCtrl->u64TSCOffset = pVmcbNstGstCache->u64TSCOffset;
161 pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = pVmcbNstGstCache->fVIntrMasking;
162 pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging = pVmcbNstGstCache->fNestedPaging;
163 pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pVmcbNstGstCache->fLbrVirt;
164 pCtx->hwvirt.svm.fHMCachedVmcb = false;
165 }
166
167 /*
168 * Currently, VMRUN, #VMEXIT transitions involves trips to ring-3 that would flag a full
169 * CPU state change. However, if we exit to ring-3 in response to receiving a physical
170 * interrupt, we skip signaling any CPU state change as normally no change is done to the
171 * execution state (see VINF_EM_RAW_INTERRUPT handling in hmR0SvmExitToRing3).
172 *
173 * With nested-guests, the state can change on trip to ring-3 for e.g., we might perform a
174 * SVM_EXIT_INTR #VMEXIT for the nested-guest in ring-3. Hence we signal a full CPU state
175 * change here.
176 */
177 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
178}
179
180
181/**
182 * Checks if the Virtual GIF (Global Interrupt Flag) feature is supported and
183 * enabled for the VM.
184 *
185 * @returns @c true if VGIF is enabled, @c false otherwise.
186 * @param pVM The cross context VM structure.
187 *
188 * @remarks This value returned by this functions is expected by the callers not
189 * to change throughout the lifetime of the VM.
190 */
191VMM_INT_DECL(bool) HMSvmIsVGifActive(PVM pVM)
192{
193 bool const fVGif = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF);
194 bool const fUseVGif = fVGif && pVM->hm.s.svm.fVGif;
195
196 return HMIsEnabled(pVM) && fVGif && fUseVGif;
197}
198
199
200/**
201 * Applies the TSC offset of an SVM nested-guest if any and returns the new TSC
202 * value for the nested-guest.
203 *
204 * @returns The TSC offset after applying any nested-guest TSC offset.
205 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
206 * @param uTicks The guest TSC.
207 *
208 * @remarks This function looks at the VMCB cache rather than directly at the
209 * nested-guest VMCB. The latter may have been modified for executing
210 * using hardware-assisted SVM.
211 *
212 * @sa CPUMApplyNestedGuestTscOffset.
213 */
214VMM_INT_DECL(uint64_t) HMSvmNstGstApplyTscOffset(PVMCPU pVCpu, uint64_t uTicks)
215{
216 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
217 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
218 Assert(pCtx->hwvirt.svm.fHMCachedVmcb);
219 NOREF(pCtx);
220 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
221 return uTicks + pVmcbNstGstCache->u64TSCOffset;
222}
223#endif /* !IN_RC */
224
225
226/**
227 * Performs the operations necessary that are part of the vmmcall instruction
228 * execution in the guest.
229 *
230 * @returns Strict VBox status code (i.e. informational status codes too).
231 * @retval VINF_SUCCESS on successful handling, no \#UD needs to be thrown,
232 * update RIP and eflags.RF depending on @a pfUpdatedRipAndRF and
233 * continue guest execution.
234 * @retval VINF_GIM_HYPERCALL_CONTINUING continue hypercall without updating
235 * RIP.
236 * @retval VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3.
237 *
238 * @param pVCpu The cross context virtual CPU structure.
239 * @param pCtx Pointer to the guest-CPU context.
240 * @param pfUpdatedRipAndRF Whether the guest RIP/EIP has been updated as
241 * part of handling the VMMCALL operation.
242 */
243VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmmcall(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfUpdatedRipAndRF)
244{
245#ifndef IN_RC
246 /*
247 * TPR patched instruction emulation for 32-bit guests.
248 */
249 PVM pVM = pVCpu->CTX_SUFF(pVM);
250 if (pVM->hm.s.fTprPatchingAllowed)
251 {
252 int rc = hmSvmEmulateMovTpr(pVCpu, pCtx, pfUpdatedRipAndRF);
253 if (RT_SUCCESS(rc))
254 return VINF_SUCCESS;
255
256 if (rc != VERR_NOT_FOUND)
257 {
258 Log(("hmSvmExitVmmCall: hmSvmEmulateMovTpr returns %Rrc\n", rc));
259 return rc;
260 }
261 }
262#endif
263
264 /*
265 * Paravirtualized hypercalls.
266 */
267 *pfUpdatedRipAndRF = false;
268 if (pVCpu->hm.s.fHypercallsEnabled)
269 return GIMHypercall(pVCpu, pCtx);
270
271 return VERR_NOT_AVAILABLE;
272}
273
274
275/**
276 * Converts an SVM event type to a TRPM event type.
277 *
278 * @returns The TRPM event type.
279 * @retval TRPM_32BIT_HACK if the specified type of event isn't among the set
280 * of recognized trap types.
281 *
282 * @param pEvent Pointer to the SVM event.
283 */
284VMM_INT_DECL(TRPMEVENT) HMSvmEventToTrpmEventType(PCSVMEVENT pEvent)
285{
286 uint8_t const uType = pEvent->n.u3Type;
287 switch (uType)
288 {
289 case SVM_EVENT_EXTERNAL_IRQ: return TRPM_HARDWARE_INT;
290 case SVM_EVENT_SOFTWARE_INT: return TRPM_SOFTWARE_INT;
291 case SVM_EVENT_EXCEPTION:
292 case SVM_EVENT_NMI: return TRPM_TRAP;
293 default:
294 break;
295 }
296 AssertMsgFailed(("HMSvmEventToTrpmEvent: Invalid pending-event type %#x\n", uType));
297 return TRPM_32BIT_HACK;
298}
299
300
301/**
302 * Gets the MSR permission bitmap byte and bit offset for the specified MSR.
303 *
304 * @returns VBox status code.
305 * @param idMsr The MSR being requested.
306 * @param pbOffMsrpm Where to store the byte offset in the MSR permission
307 * bitmap for @a idMsr.
308 * @param puMsrpmBit Where to store the bit offset starting at the byte
309 * returned in @a pbOffMsrpm.
310 */
311VMM_INT_DECL(int) HMSvmGetMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit)
312{
313 Assert(pbOffMsrpm);
314 Assert(puMsrpmBit);
315
316 /*
317 * MSRPM Layout:
318 * Byte offset MSR range
319 * 0x000 - 0x7ff 0x00000000 - 0x00001fff
320 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
321 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
322 * 0x1800 - 0x1fff Reserved
323 *
324 * Each MSR is represented by 2 permission bits (read and write).
325 */
326 if (idMsr <= 0x00001fff)
327 {
328 /* Pentium-compatible MSRs. */
329 uint32_t const bitoffMsr = idMsr << 1;
330 *pbOffMsrpm = bitoffMsr >> 3;
331 *puMsrpmBit = bitoffMsr & 7;
332 return VINF_SUCCESS;
333 }
334
335 if ( idMsr >= 0xc0000000
336 && idMsr <= 0xc0001fff)
337 {
338 /* AMD Sixth Generation x86 Processor MSRs. */
339 uint32_t const bitoffMsr = (idMsr - 0xc0000000) << 1;
340 *pbOffMsrpm = 0x800 + (bitoffMsr >> 3);
341 *puMsrpmBit = bitoffMsr & 7;
342 return VINF_SUCCESS;
343 }
344
345 if ( idMsr >= 0xc0010000
346 && idMsr <= 0xc0011fff)
347 {
348 /* AMD Seventh and Eighth Generation Processor MSRs. */
349 uint32_t const bitoffMsr = (idMsr - 0xc0010000) << 1;
350 *pbOffMsrpm = 0x1000 + (bitoffMsr >> 3);
351 *puMsrpmBit = bitoffMsr & 7;
352 return VINF_SUCCESS;
353 }
354
355 *pbOffMsrpm = 0;
356 *puMsrpmBit = 0;
357 return VERR_OUT_OF_RANGE;
358}
359
360
361/**
362 * Determines whether an IOIO intercept is active for the nested-guest or not.
363 *
364 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
365 * @param u16Port The IO port being accessed.
366 * @param enmIoType The type of IO access.
367 * @param cbReg The IO operand size in bytes.
368 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
369 * @param iEffSeg The effective segment number.
370 * @param fRep Whether this is a repeating IO instruction (REP prefix).
371 * @param fStrIo Whether this is a string IO instruction.
372 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO struct to be filled.
373 * Optional, can be NULL.
374 */
375VMM_INT_DECL(bool) HMSvmIsIOInterceptActive(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
376 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
377 PSVMIOIOEXITINFO pIoExitInfo)
378{
379 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
380 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
381
382 /*
383 * The IOPM layout:
384 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
385 * two 4K pages.
386 *
387 * For IO instructions that access more than a single byte, the permission bits
388 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
389 *
390 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),
391 * we need 3 extra bits beyond the second 4K page.
392 */
393 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 };
394
395 uint16_t const offIopm = u16Port >> 3;
396 uint16_t const fSizeMask = s_auSizeMasks[(cAddrSizeBits >> SVM_IOIO_OP_SIZE_SHIFT) & 7];
397 uint8_t const cShift = u16Port - (offIopm << 3);
398 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);
399
400 uint8_t const *pbIopm = (uint8_t *)pvIoBitmap;
401 Assert(pbIopm);
402 pbIopm += offIopm;
403 uint16_t const u16Iopm = *(uint16_t *)pbIopm;
404 if (u16Iopm & fIopmMask)
405 {
406 if (pIoExitInfo)
407 {
408 static const uint32_t s_auIoOpSize[] =
409 { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
410
411 static const uint32_t s_auIoAddrSize[] =
412 { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
413
414 pIoExitInfo->u = s_auIoOpSize[cbReg & 7];
415 pIoExitInfo->u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
416 pIoExitInfo->n.u1Str = fStrIo;
417 pIoExitInfo->n.u1Rep = fRep;
418 pIoExitInfo->n.u3Seg = iEffSeg & 7;
419 pIoExitInfo->n.u1Type = enmIoType;
420 pIoExitInfo->n.u16Port = u16Port;
421 }
422 return true;
423 }
424
425 /** @todo remove later (for debugging as VirtualBox always traps all IO
426 * intercepts). */
427 AssertMsgFailed(("iemSvmHandleIOIntercept: We expect an IO intercept here!\n"));
428 return false;
429}
430
431
432/**
433 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
434 * active.
435 *
436 * @returns @c true if in intercept is set, @c false otherwise.
437 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
438 * @param pCtx Pointer to the context.
439 * @param fIntercept The SVM control/instruction intercept, see
440 * SVM_CTRL_INTERCEPT_*.
441 */
442VMM_INT_DECL(bool) HMIsGuestSvmCtrlInterceptSet(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fIntercept)
443{
444 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
445 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
446 return RT_BOOL(pVmcbNstGstCache->u64InterceptCtrl & fIntercept);
447}
448
449
450/**
451 * Checks if the nested-guest VMCB has the specified CR read intercept active.
452 *
453 * @returns @c true if in intercept is set, @c false otherwise.
454 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
455 * @param pCtx Pointer to the context.
456 * @param uCr The CR register number (0 to 15).
457 */
458VMM_INT_DECL(bool) HMIsGuestSvmReadCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
459{
460 Assert(uCr < 16);
461 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
462 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
463 return RT_BOOL(pVmcbNstGstCache->u16InterceptRdCRx & (1 << uCr));
464}
465
466
467/**
468 * Checks if the nested-guest VMCB has the specified CR write intercept active.
469 *
470 * @returns @c true if in intercept is set, @c false otherwise.
471 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
472 * @param pCtx Pointer to the context.
473 * @param uCr The CR register number (0 to 15).
474 */
475VMM_INT_DECL(bool) HMIsGuestSvmWriteCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
476{
477 Assert(uCr < 16);
478 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
479 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
480 return RT_BOOL(pVmcbNstGstCache->u16InterceptWrCRx & (1 << uCr));
481}
482
483
484/**
485 * Checks if the nested-guest VMCB has the specified DR read intercept active.
486 *
487 * @returns @c true if in intercept is set, @c false otherwise.
488 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
489 * @param pCtx Pointer to the context.
490 * @param uDr The DR register number (0 to 15).
491 */
492VMM_INT_DECL(bool) HMIsGuestSvmReadDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
493{
494 Assert(uDr < 16);
495 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
496 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
497 return RT_BOOL(pVmcbNstGstCache->u16InterceptRdDRx & (1 << uDr));
498}
499
500
501/**
502 * Checks if the nested-guest VMCB has the specified DR write intercept active.
503 *
504 * @returns @c true if in intercept is set, @c false otherwise.
505 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
506 * @param pCtx Pointer to the context.
507 * @param uDr The DR register number (0 to 15).
508 */
509VMM_INT_DECL(bool) HMIsGuestSvmWriteDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
510{
511 Assert(uDr < 16);
512 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
513 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
514 return RT_BOOL(pVmcbNstGstCache->u16InterceptWrDRx & (1 << uDr));
515}
516
517
518/**
519 * Checks if the nested-guest VMCB has the specified exception intercept active.
520 *
521 * @returns true if in intercept is active, false otherwise.
522 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
523 * @param pCtx Pointer to the context.
524 * @param uVector The exception / interrupt vector.
525 */
526VMM_INT_DECL(bool) HMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector)
527{
528 Assert(uVector < 32);
529 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
530 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
531 return RT_BOOL(pVmcbNstGstCache->u32InterceptXcpt & (1 << uVector));
532}
533
534
535/**
536 * Checks if the nested-guest VMCB has virtual-interrupts masking enabled.
537 *
538 * @returns true if virtual-interrupts are masked, @c false otherwise.
539 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
540 * @param pCtx Pointer to the context.
541 */
542VMM_INT_DECL(bool) HMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu, PCCPUMCTX pCtx)
543{
544 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
545 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
546 return pVmcbNstGstCache->fVIntrMasking;
547}
548
549
550/**
551 * Checks if the nested-guest VMCB has nested-paging enabled.
552 *
553 * @returns true if nested-paging is enabled, @c false otherwise.
554 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
555 * @param pCtx Pointer to the context.
556 */
557VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx)
558{
559 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
560 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
561 return pVmcbNstGstCache->fNestedPaging;
562}
563
564
565/**
566 * Returns the nested-guest VMCB pause-filter count.
567 *
568 * @returns The pause-filter count.
569 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
570 * @param pCtx Pointer to the context.
571 */
572VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PVMCPU pVCpu, PCCPUMCTX pCtx)
573{
574 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
575 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
576 return pVmcbNstGstCache->u16PauseFilterCount;
577}
578
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette