Changeset 73111 in vbox
- Timestamp:
- Jul 13, 2018 7:44:00 AM (6 years ago)
- File:
-
- 1 edited
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp (modified) (30 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r73097 r73111 175 175 } while (0) 176 176 #endif 177 178 /** Macro which updates interrupt shadow for the current RIP. */179 #define HMSVM_UPDATE_INTR_SHADOW(a_pVCpu) \180 do { \181 /* Update interrupt shadow. */ \182 if ( VMCPU_FF_IS_PENDING((a_pVCpu), VMCPU_FF_INHIBIT_INTERRUPTS) \183 && (a_pVCpu)->cpum.GstCtx.rip != EMGetInhibitInterruptsPC((a_pVCpu))) \184 VMCPU_FF_CLEAR((a_pVCpu), VMCPU_FF_INHIBIT_INTERRUPTS); \185 } while (0)186 177 187 178 /** Macro for upgrading a @a a_rc to VINF_EM_DBG_STEPPED after emulating an … … 6034 6025 6035 6026 /** 6036 * Advances the guest RIP making use of the CPU's NRIP_SAVE feature if 6037 * supported, otherwise advances the RIP by the number of bytes specified in 6038 * @a cb. 6039 * 6040 * @param pVCpu The cross context virtual CPU structure. 6041 * @param cb RIP increment value in bytes when the CPU doesn't support 6042 * NRIP_SAVE. 6043 */ 6044 DECLINLINE(void) hmR0SvmAdvanceRipHwAssist(PVMCPU pVCpu, uint32_t cb) 6045 { 6046 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6047 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu); 6048 if (fSupportsNextRipSave) 6049 { 6050 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6051 Assert(pVmcb); 6052 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP)); 6053 Assert(pVmcb->ctrl.u64NextRIP - pCtx->rip == cb); 6054 pCtx->rip = pVmcb->ctrl.u64NextRIP; 6055 } 6056 else 6057 pCtx->rip += cb; 6058 6059 HMSVM_UPDATE_INTR_SHADOW(pVCpu); 6060 } 6061 6062 6063 /** 6064 * Gets the length of the current instruction when the CPU supports the NRIP_SAVE 6065 * feature. 6066 * 6067 * @returns The current instruction length in bytes. 6068 * @param pVCpu The cross context virtual CPU structure. 6069 * 6070 * @remarks Requires the NRIP_SAVE feature to be supported by the CPU. 6071 */ 6072 DECLINLINE(uint8_t) hmR0SvmGetInstrLength(PVMCPU pVCpu) 6073 { 6074 Assert(hmR0SvmSupportsNextRipSave(pVCpu)); 6075 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6076 return pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 6077 } 6078 6079 6080 /** 6081 * Advances the guest RIP by the number of bytes specified in @a cb. This does 6082 * not make use of any hardware features to determine the instruction length. 6027 * Advances the guest RIP by the number of bytes specified in @a cb. 6083 6028 * 6084 6029 * @param pVCpu The cross context virtual CPU structure. 6085 6030 * @param cb RIP increment value in bytes. 6086 6031 */ 6087 DECLINLINE(void) hmR0SvmAdvanceRipDumb(PVMCPU pVCpu, uint32_t cb) 6088 { 6089 pVCpu->cpum.GstCtx.rip += cb; 6090 HMSVM_UPDATE_INTR_SHADOW(pVCpu); 6091 } 6092 #undef HMSVM_UPDATE_INTR_SHADOW 6032 DECLINLINE(void) hmR0SvmAdvanceRip(PVMCPU pVCpu, uint32_t cb) 6033 { 6034 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6035 pCtx->rip += cb; 6036 6037 /* Update interrupt shadow. */ 6038 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 6039 && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) 6040 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6041 } 6093 6042 6094 6043 … … 6139 6088 { 6140 6089 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK); 6141 uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu); 6090 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6091 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 6142 6092 rcStrict = IEMExecDecodedWbinvd(pVCpu, cbInstr); 6143 6093 } … … 6170 6120 { 6171 6121 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK); 6172 uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu); 6122 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6123 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 6173 6124 rcStrict = IEMExecDecodedInvd(pVCpu, cbInstr); 6174 6125 } … … 6206 6157 if (fSupportsNextRipSave) 6207 6158 { 6208 uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu); 6159 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6160 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 6209 6161 rcStrict = IEMExecDecodedCpuid(pVCpu, cbInstr); 6210 6162 } … … 6254 6206 { 6255 6207 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4); 6256 uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu); 6208 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6209 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 6257 6210 rcStrict = IEMExecDecodedRdtsc(pVCpu, cbInstr); 6258 6211 } … … 6286 6239 if (fSupportsNextRipSave) 6287 6240 { 6288 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 6289 | CPUMCTX_EXTRN_TSC_AUX);6290 uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu);6241 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX); 6242 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6243 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 6291 6244 rcStrict = IEMExecDecodedRdtscp(pVCpu, cbInstr); 6292 6245 } … … 6321 6274 { 6322 6275 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4); 6323 uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu); 6276 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6277 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 6324 6278 rcStrict = IEMExecDecodedRdpmc(pVCpu, cbInstr); 6325 6279 } … … 6355 6309 { 6356 6310 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 6357 PCSVMVMCB pVmcb= hmR0SvmGetCurrentVmcb(pVCpu);6311 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6358 6312 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 6359 6313 RTGCPTR const GCPtrPage = pVmcb->ctrl.u64ExitInfo1; … … 6388 6342 { 6389 6343 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK); 6390 uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu); 6344 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6345 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 6391 6346 rcStrict = IEMExecDecodedHlt(pVCpu, cbInstr); 6392 6347 } … … 6408 6363 } 6409 6364 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); 6365 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 6410 6366 if (rcStrict != VINF_SUCCESS) 6411 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);6367 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3); 6412 6368 return VBOXSTRICTRC_VAL(rcStrict);; 6413 6369 } … … 6422 6378 6423 6379 /* 6424 * SVM unfortunately does not provide us with any segment override prefix information. 6425 * 6426 * If the instruction length supplied by the CPU is 3 bytes, we can be certain that no 6380 * If the instruction length is supplied by the CPU is 3 bytes, we can be certain that no 6427 6381 * segment override prefix is present (and thus use the default segment DS). Otherwise, a 6428 6382 * segment override prefix or other prefixes might be used, in which case we fallback to 6429 * IEMExecOne() to handle it.6383 * IEMExecOne() to figure out. 6430 6384 */ 6431 6385 VBOXSTRICTRC rcStrict; 6432 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);6433 uint8_t const cbInstr = fSupportsNextRipSave ? hmR0SvmGetInstrLength(pVCpu): 0;6434 if (cbInstr == 3)6386 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6387 uint8_t const cbInstr = hmR0SvmSupportsNextRipSave(pVCpu) ? pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip : 0; 6388 if (cbInstr) 6435 6389 { 6436 6390 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS); … … 6466 6420 { 6467 6421 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK); 6468 uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu); 6422 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6423 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 6469 6424 rcStrict = IEMExecDecodedMwait(pVCpu, cbInstr); 6470 6425 } … … 6545 6500 if (fMovCRx) 6546 6501 { 6547 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR 3 | CPUMCTX_EXTRN_CR46502 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR_MASK 6548 6503 | CPUMCTX_EXTRN_APIC_TPR); 6549 6504 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip; … … 6689 6644 * can ask for what it needs instead of using CPUMCTX_EXTRN_ALL_MSRS. */ 6690 6645 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS); 6691 rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmcb->ctrl.u64NextRIP - pCtx->rip); 6646 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 6647 rcStrict = IEMExecDecodedRdmsr(pVCpu, cbInstr); 6692 6648 } 6693 6649 else … … 6731 6687 * We utilitize the LSTAR MSR for patching. 6732 6688 */ 6689 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu); 6733 6690 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive 6734 6691 && idMsr == MSR_K8_LSTAR) 6735 6692 { 6693 unsigned cbInstr; 6694 if (fSupportsNextRipSave) 6695 cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 6696 else 6697 { 6698 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState; 6699 int rc = EMInterpretDisasCurrent(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, &cbInstr); 6700 if ( rc == VINF_SUCCESS 6701 && pDis->pCurInstr->uOpcode == OP_WRMSR) 6702 Assert(cbInstr > 0); 6703 else 6704 cbInstr = 0; 6705 } 6706 6707 /* Our patch code uses LSTAR for TPR caching for 32-bit guests. */ 6736 6708 if ((pCtx->eax & 0xff) != pSvmTransient->u8GuestTpr) 6737 6709 { 6738 /* Our patch code uses LSTAR for TPR caching for 32-bit guests. */ 6739 int rc2 = APICSetTpr(pVCpu, pCtx->eax & 0xff); 6740 AssertRC(rc2); 6710 int rc = APICSetTpr(pVCpu, pCtx->eax & 0xff); 6711 AssertRCReturn(rc, rc); 6741 6712 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 6742 6713 } 6743 6714 6744 6715 int rc = VINF_SUCCESS; 6745 hmR0SvmAdvanceRip HwAssist(pVCpu, 2);6716 hmR0SvmAdvanceRip(pVCpu, cbInstr); 6746 6717 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 6747 6718 return rc; … … 6752 6723 */ 6753 6724 VBOXSTRICTRC rcStrict; 6754 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);6755 6725 if (fSupportsNextRipSave) 6756 6726 { … … 6759 6729 * clear the applicable extern flags. */ 6760 6730 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS); 6761 rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmcb->ctrl.u64NextRIP - pCtx->rip); 6731 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 6732 rcStrict = IEMExecDecodedWrmsr(pVCpu, cbInstr); 6762 6733 } 6763 6734 else … … 7385 7356 if (EMAreHypercallInstructionsEnabled(pVCpu)) 7386 7357 { 7358 unsigned cbInstr; 7359 if (hmR0SvmSupportsNextRipSave(pVCpu)) 7360 { 7361 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 7362 cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 7363 } 7364 else 7365 { 7366 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState; 7367 int rc = EMInterpretDisasCurrent(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, &cbInstr); 7368 if ( rc == VINF_SUCCESS 7369 && pDis->pCurInstr->uOpcode == OP_VMMCALL) 7370 Assert(cbInstr > 0); 7371 else 7372 cbInstr = 0; 7373 } 7374 7387 7375 VBOXSTRICTRC rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx); 7388 7376 if (RT_SUCCESS(rcStrict)) … … 7391 7379 of say VINF_GIM_R3_HYPERCALL. */ 7392 7380 if (rcStrict == VINF_SUCCESS) 7393 hmR0SvmAdvanceRip HwAssist(pVCpu, 3 /* cbInstr */);7381 hmR0SvmAdvanceRip(pVCpu, cbInstr); 7394 7382 7395 7383 return VBOXSTRICTRC_VAL(rcStrict); … … 7410 7398 { 7411 7399 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7412 hmR0SvmAdvanceRipHwAssist(pVCpu, 2); 7400 7401 VBOXSTRICTRC rcStrict; 7402 unsigned cbInstr; 7403 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu); 7404 if (fSupportsNextRipSave) 7405 { 7406 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 7407 cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 7408 } 7409 else 7410 { 7411 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState; 7412 int rc = EMInterpretDisasCurrent(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, &cbInstr); 7413 if ( rc == VINF_SUCCESS 7414 && pDis->pCurInstr->uOpcode == OP_PAUSE) 7415 Assert(cbInstr > 0); 7416 else 7417 cbInstr = 0; 7418 } 7419 7413 7420 /** @todo The guest has likely hit a contended spinlock. We might want to 7414 7421 * poke a schedule different guest VCPU. */ 7422 hmR0SvmAdvanceRip(pVCpu, cbInstr); 7415 7423 return VINF_EM_RAW_INTERRUPT; 7416 7424 } … … 7615 7623 { 7616 7624 /* #UD #VMEXIT does not have valid NRIP information, manually advance RIP. See @bugref{7270#c170}. */ 7617 hmR0SvmAdvanceRip Dumb(pVCpu, cbInstr);7625 hmR0SvmAdvanceRip(pVCpu, cbInstr); 7618 7626 rc = VINF_SUCCESS; 7619 7627 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); … … 7660 7668 PVM pVM = pVCpu->CTX_SUFF(pVM); 7661 7669 PDISSTATE pDis = &pVCpu->hm.s.DisState; 7662 unsigned cb Op;7663 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cb Op);7670 unsigned cbInstr; 7671 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbInstr); 7664 7672 if (RT_SUCCESS(rc)) 7665 7673 { … … 7667 7675 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */); 7668 7676 if (RT_SUCCESS(rc)) 7669 pCtx->rip += cbOp;7677 hmR0SvmAdvanceRip(pVCpu, cbInstr); 7670 7678 } 7671 7679 else … … 7846 7854 if (fSupportsNextRipSave) 7847 7855 { 7848 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | fImport);7849 uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu);7856 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 7857 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 7850 7858 rcStrict = IEMExecDecodedClgi(pVCpu, cbInstr); 7851 7859 } … … 7888 7896 if (fSupportsNextRipSave) 7889 7897 { 7890 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | fImport); 7891 uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu); 7898 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 7892 7899 rcStrict = IEMExecDecodedStgi(pVCpu, cbInstr); 7893 7900 } … … 7931 7938 if (fSupportsNextRipSave) 7932 7939 { 7933 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | fImport); 7934 uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu); 7940 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 7935 7941 rcStrict = IEMExecDecodedVmload(pVCpu, cbInstr); 7936 7942 } … … 7975 7981 if (fSupportsNextRipSave) 7976 7982 { 7977 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK); 7978 uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu); 7983 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 7979 7984 rcStrict = IEMExecDecodedVmsave(pVCpu, cbInstr); 7980 7985 } … … 8006 8011 if (fSupportsNextRipSave) 8007 8012 { 8008 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);8009 uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu);8013 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 8014 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 8010 8015 rcStrict = IEMExecDecodedInvlpga(pVCpu, cbInstr); 8011 8016 } … … 8041 8046 if (fSupportsNextRipSave) 8042 8047 { 8043 uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu); 8048 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 8049 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 8044 8050 rcStrict = IEMExecDecodedVmrun(pVCpu, cbInstr); 8045 8051 }
Note:
See TracChangeset
for help on using the changeset viewer.

