Changeset 52041 in vbox
- Timestamp:
- Jul 15, 2014 3:43:30 PM (10 years ago)
- Location:
- trunk
- Files:
-
- 7 edited
-
include/VBox/vmm/hm_vmx.h (modified) (2 diffs)
-
include/VBox/vmm/vm.h (modified) (1 diff)
-
src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h (modified) (1 diff)
-
src/VBox/VMM/VMMR0/HMSVMR0.cpp (modified) (21 diffs)
-
src/VBox/VMM/VMMR0/HMVMXR0.cpp (modified) (14 diffs)
-
src/VBox/VMM/VMMR3/HM.cpp (modified) (1 diff)
-
src/recompiler/VBoxRecompiler.c (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/hm_vmx.h
r51946 r52041 1351 1351 /** Use TPR shadow. */ 1352 1352 #define VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW RT_BIT(21) 1353 /** VM-exit when virtual nmiblocking is disabled. */1353 /** VM-exit when virtual NMI blocking is disabled. */ 1354 1354 #define VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT RT_BIT(22) 1355 1355 /** VM-exit when executing a MOV DRx instruction. */ … … 1489 1489 #define VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID RT_BIT(11) 1490 1490 #define VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(a) RT_BOOL((a) & VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID) 1491 #define VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK (a)((a) & RT_BIT(12))1491 #define VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(a) ((a) & RT_BIT(12)) 1492 1492 #define VMX_EXIT_INTERRUPTION_INFO_VALID RT_BIT(31) 1493 1493 #define VMX_EXIT_INTERRUPTION_INFO_IS_VALID(a) RT_BOOL((a) & RT_BIT(31)) -
trunk/include/VBox/vmm/vm.h
r51934 r52041 405 405 /** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */ 406 406 #define VMCPU_FF_INHIBIT_INTERRUPTS RT_BIT_32(24) 407 /** Inhibit non-maskable interrupts. */ 408 #define VMCPU_FF_INHIBIT_NMIS RT_BIT_32(25) 407 409 #ifdef VBOX_WITH_RAW_MODE 408 410 /** CSAM needs to scan the page that's being executed */ -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r51434 r52041 2989 2989 IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize) 2990 2990 { 2991 /* 2992 * First, clear NMI inhibition before causing any exceptions. 2993 */ 2994 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); 2995 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_NMIS); 2996 2991 2997 /* 2992 2998 * Call a mode specific worker. -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r52009 r52041 229 229 typedef enum SVMMSREXITREAD 230 230 { 231 /** Reading this MSR causes a VM-exit. */231 /** Reading this MSR causes a #VMEXIT. */ 232 232 SVMMSREXIT_INTERCEPT_READ = 0xb, 233 /** Reading this MSR does not cause a VM-exit. */233 /** Reading this MSR does not cause a #VMEXIT. */ 234 234 SVMMSREXIT_PASSTHRU_READ 235 235 } SVMMSREXITREAD; … … 240 240 typedef enum SVMMSREXITWRITE 241 241 { 242 /** Writing to this MSR causes a VM-exit. */242 /** Writing to this MSR causes a #VMEXIT. */ 243 243 SVMMSREXIT_INTERCEPT_WRITE = 0xd, 244 /** Writing to this MSR does not cause a VM-exit. */244 /** Writing to this MSR does not cause a #VMEXIT. */ 245 245 SVMMSREXIT_PASSTHRU_WRITE 246 246 } SVMMSREXITWRITE; 247 247 248 248 /** 249 * SVM VM-exithandler.249 * SVM #VMEXIT handler. 250 250 * 251 251 * @returns VBox status code. … … 263 263 static void hmR0SvmLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 264 264 265 /** @name VM-exithandlers.265 /** @name #VMEXIT handlers. 266 266 * @{ 267 267 */ … … 289 289 static FNSVMEXITHANDLER hmR0SvmExitTaskSwitch; 290 290 static FNSVMEXITHANDLER hmR0SvmExitVmmCall; 291 static FNSVMEXITHANDLER hmR0SvmExitIret; 291 292 static FNSVMEXITHANDLER hmR0SvmExitXcptPF; 292 293 static FNSVMEXITHANDLER hmR0SvmExitXcptNM; … … 692 693 693 694 /* Set up unconditional intercepts and conditions. */ 694 pVmcb->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR /* External interrupt causes a VM-exit. */695 | SVM_CTRL1_INTERCEPT_NMI /* Non- Maskable Interrupts causes a VM-exit. */696 | SVM_CTRL1_INTERCEPT_INIT /* INIT signal causes a VM-exit. */697 | SVM_CTRL1_INTERCEPT_RDPMC /* RDPMC causes a VM-exit. */698 | SVM_CTRL1_INTERCEPT_CPUID /* CPUID causes a VM-exit. */699 | SVM_CTRL1_INTERCEPT_RSM /* RSM causes a VM-exit. */700 | SVM_CTRL1_INTERCEPT_HLT /* HLT causes a VM-exit. */701 | SVM_CTRL1_INTERCEPT_INOUT_BITMAP /* Use the IOPM to cause IOIO VM-exits. */702 | SVM_CTRL1_INTERCEPT_MSR_SHADOW /* MSR access not covered by MSRPM causes a VM-exit.*/703 | SVM_CTRL1_INTERCEPT_INVLPGA /* INVLPGA causes a VM-exit. */704 | SVM_CTRL1_INTERCEPT_SHUTDOWN /* Shutdown events causes a VM-exit. */695 pVmcb->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR /* External interrupt causes a #VMEXIT. */ 696 | SVM_CTRL1_INTERCEPT_NMI /* Non-maskable interrupts causes a #VMEXIT. */ 697 | SVM_CTRL1_INTERCEPT_INIT /* INIT signal causes a #VMEXIT. */ 698 | SVM_CTRL1_INTERCEPT_RDPMC /* RDPMC causes a #VMEXIT. */ 699 | SVM_CTRL1_INTERCEPT_CPUID /* CPUID causes a #VMEXIT. */ 700 | SVM_CTRL1_INTERCEPT_RSM /* RSM causes a #VMEXIT. */ 701 | SVM_CTRL1_INTERCEPT_HLT /* HLT causes a #VMEXIT. */ 702 | SVM_CTRL1_INTERCEPT_INOUT_BITMAP /* Use the IOPM to cause IOIO #VMEXITs. */ 703 | SVM_CTRL1_INTERCEPT_MSR_SHADOW /* MSR access not covered by MSRPM causes a #VMEXIT.*/ 704 | SVM_CTRL1_INTERCEPT_INVLPGA /* INVLPGA causes a #VMEXIT. */ 705 | SVM_CTRL1_INTERCEPT_SHUTDOWN /* Shutdown events causes a #VMEXIT. */ 705 706 | SVM_CTRL1_INTERCEPT_FERR_FREEZE; /* Intercept "freezing" during legacy FPU handling. */ 706 707 707 pVmcb->ctrl.u32InterceptCtrl2 = SVM_CTRL2_INTERCEPT_VMRUN /* VMRUN causes a VM-exit. */708 | SVM_CTRL2_INTERCEPT_VMMCALL /* VMMCALL causes a VM-exit. */709 | SVM_CTRL2_INTERCEPT_VMLOAD /* VMLOAD causes a VM-exit. */710 | SVM_CTRL2_INTERCEPT_VMSAVE /* VMSAVE causes a VM-exit. */711 | SVM_CTRL2_INTERCEPT_STGI /* STGI causes a VM-exit. */712 | SVM_CTRL2_INTERCEPT_CLGI /* CLGI causes a VM-exit. */713 | SVM_CTRL2_INTERCEPT_SKINIT /* SKINIT causes a VM-exit. */714 | SVM_CTRL2_INTERCEPT_WBINVD /* WBINVD causes a VM-exit. */715 | SVM_CTRL2_INTERCEPT_MONITOR /* MONITOR causes a VM-exit. */716 | SVM_CTRL2_INTERCEPT_MWAIT; /* MWAIT causes a VM-exit. */708 pVmcb->ctrl.u32InterceptCtrl2 = SVM_CTRL2_INTERCEPT_VMRUN /* VMRUN causes a #VMEXIT. */ 709 | SVM_CTRL2_INTERCEPT_VMMCALL /* VMMCALL causes a #VMEXIT. */ 710 | SVM_CTRL2_INTERCEPT_VMLOAD /* VMLOAD causes a #VMEXIT. */ 711 | SVM_CTRL2_INTERCEPT_VMSAVE /* VMSAVE causes a #VMEXIT. */ 712 | SVM_CTRL2_INTERCEPT_STGI /* STGI causes a #VMEXIT. */ 713 | SVM_CTRL2_INTERCEPT_CLGI /* CLGI causes a #VMEXIT. */ 714 | SVM_CTRL2_INTERCEPT_SKINIT /* SKINIT causes a #VMEXIT. */ 715 | SVM_CTRL2_INTERCEPT_WBINVD /* WBINVD causes a #VMEXIT. */ 716 | SVM_CTRL2_INTERCEPT_MONITOR /* MONITOR causes a #VMEXIT. */ 717 | SVM_CTRL2_INTERCEPT_MWAIT; /* MWAIT causes a #VMEXIT. */ 717 718 718 719 /* CR0, CR4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */ … … 1133 1134 { 1134 1135 u64GuestCR0 |= X86_CR0_PG; /* When Nested Paging is not available, use shadow page tables. */ 1135 u64GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */1136 u64GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF #VMEXIT. */ 1136 1137 } 1137 1138 … … 1153 1154 else 1154 1155 { 1155 fInterceptNM = true; /* Guest FPU inactive, VM-exiton #NM for lazy FPU loading. */1156 fInterceptNM = true; /* Guest FPU inactive, #VMEXIT on #NM for lazy FPU loading. */ 1156 1157 u64GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */ 1157 1158 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */ … … 1886 1887 if (pVmcb->ctrl.u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE) 1887 1888 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip); 1888 else 1889 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1889 1890 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1890 1891 … … 2525 2526 2526 2527 /** 2528 * Sets the IRET intercept control in the VMCB which instructs AMD-V to cause a 2529 * #VMEXIT as soon as a guest starts executing an IRET. This is used to unblock 2530 * virtual NMIs. 2531 * 2532 * @param pVmcb Pointer to the VM control block. 2533 */ 2534 DECLINLINE(void) hmR0SvmSetIretIntercept(PSVMVMCB pVmcb) 2535 { 2536 if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_IRET)) 2537 { 2538 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_IRET; 2539 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS); 2540 2541 Log4(("Setting IRET intercept\n")); 2542 } 2543 } 2544 2545 2546 /** 2547 * Clears the IRET intercept control in the VMCB. 2548 * 2549 * @param pVmcb Pointer to the VM control block. 2550 */ 2551 DECLINLINE(void) hmR0SvmClearIretIntercept(PSVMVMCB pVmcb) 2552 { 2553 if (pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_IRET) 2554 { 2555 pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_IRET; 2556 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS); 2557 2558 Log4(("Clearing IRET intercept\n")); 2559 } 2560 } 2561 2562 2563 /** 2527 2564 * Evaluates the event to be delivered to the guest and sets it as the pending 2528 2565 * event. … … 2538 2575 const bool fIntShadow = RT_BOOL(hmR0SvmGetGuestIntrShadow(pVCpu, pCtx)); 2539 2576 const bool fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF); 2577 const bool fBlockNmi = RT_BOOL(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_NMIS)); 2540 2578 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 2541 2579 … … 2545 2583 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */ 2546 2584 { 2547 if (!fIntShadow) 2585 if (fBlockNmi) 2586 hmR0SvmSetIretIntercept(pVmcb); 2587 else if (fIntShadow) 2588 hmR0SvmSetVirtIntrIntercept(pVmcb); 2589 else 2548 2590 { 2549 2591 Log4(("Pending NMI\n")); … … 2556 2598 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); 2557 2599 } 2558 else2559 hmR0SvmSetVirtIntrIntercept(pVmcb);2560 2600 } 2561 2601 else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))) … … 2962 3002 } 2963 3003 3004 /* 3005 * If we are injecting an NMI, we must set VMCPU_FF_INHIBIT_NMIS only when we are going to execute 3006 * guest code for certain (no exits to ring-3). Otherwise, we could re-read the flag on re-entry into 3007 * AMD-V and conclude that NMI inhibition is active when we have not even delivered the NMI. 3008 * 3009 * With VT-x, this is handled by the Guest interruptibility information VMCS field which will set the 3010 * VMCS field after actually delivering the NMI which we read on VM-exit to determine the state. 3011 */ 3012 if (pVCpu->hm.s.Event.fPending) 3013 { 3014 SVMEVENT Event; 3015 Event.u = pVCpu->hm.s.Event.u64IntInfo; 3016 if ( Event.n.u1Valid 3017 && Event.n.u3Type == SVM_EVENT_NMI 3018 && Event.n.u8Vector == X86_XCPT_NMI) 3019 { 3020 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_NMIS); 3021 } 3022 } 3023 2964 3024 return VINF_SUCCESS; 2965 3025 } … … 3390 3450 3391 3451 /* 3392 * The ordering of the case labels is based on most-frequently-occurring VM-exits for most guests under3452 * The ordering of the case labels is based on most-frequently-occurring #VMEXITs for most guests under 3393 3453 * normal workloads (for some definition of "normal"). 3394 3454 */ … … 3487 3547 case SVM_EXIT_VMMCALL: 3488 3548 return hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient); 3549 3550 case SVM_EXIT_IRET: 3551 return hmR0SvmExitIret(pVCpu, pCtx, pSvmTransient); 3489 3552 3490 3553 case SVM_EXIT_SHUTDOWN: … … 3930 3993 * 3931 3994 * @returns VBox status code (informational error codes included). 3932 * @retval VINF_SUCCESS if we should continue handling the VM-exit.3995 * @retval VINF_SUCCESS if we should continue handling the #VMEXIT. 3933 3996 * @retval VINF_HM_DOUBLE_FAULT if a #DF condition was detected and we ought to 3934 3997 * continue execution of the guest which will delivery the #DF. … … 4002 4065 /* 4003 4066 * If event delivery caused an #VMEXIT that is not an exception (e.g. #NPF) then reflect the original 4004 * exception to the guest after handling the VM-exit.4067 * exception to the guest after handling the #VMEXIT. 4005 4068 */ 4006 4069 enmReflect = SVMREFLECTXCPT_XCPT; … … 4077 4140 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 4078 4141 4079 /** @name VM-exithandlers.4142 /** @name #VMEXIT handlers. 4080 4143 * @{ 4081 4144 */ … … 4515 4578 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead); 4516 4579 4517 /* We should -not- get this VM-exitif the guest's debug registers were active. */4580 /* We should -not- get this #VMEXIT if the guest's debug registers were active. */ 4518 4581 if (pSvmTransient->fWasGuestDebugStateActive) 4519 4582 { … … 4855 4918 pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0; 4856 4919 4857 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */ 4920 /* Clear NMI inhibition, if it's active. */ 4921 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_NMIS)) 4922 { 4923 hmR0SvmClearIretIntercept(pVmcb); 4924 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_NMIS); 4925 } 4926 4927 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive interrupts/NMIs, it is now ready. */ 4858 4928 pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_VINTR; 4859 4929 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR); 4860 4930 4861 /* Deliver the pending interrupt via hmR0SvmPreRunGuest()->hmR0SvmInjectEventVmcb() and resume guest execution. */4931 /* Deliver the pending interrupt/NMI via hmR0SvmEvaluatePendingEvent() and resume guest execution. */ 4862 4932 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow); 4863 4933 return VINF_SUCCESS; … … 4918 4988 if (rc != VINF_SUCCESS) 4919 4989 hmR0SvmSetPendingXcptUD(pVCpu); 4990 return VINF_SUCCESS; 4991 } 4992 4993 4994 /** 4995 * #VMEXIT handler for IRET (SVM_EXIT_IRET). Conditional #VMEXIT. 4996 */ 4997 HMSVM_EXIT_DECL hmR0SvmExitIret(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 4998 { 4999 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 5000 5001 /* Clear NMI inhibition. */ 5002 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_NMIS); 5003 5004 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */ 5005 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 5006 hmR0SvmClearIretIntercept(pVmcb); 5007 5008 /* Deliver the pending NMI via hmR0SvmEvaluatePendingEvent() and resume guest execution. */ 4920 5009 return VINF_SUCCESS; 4921 5010 } -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r52009 r52041 2323 2323 2324 2324 val |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts causes a VM-exits. */ 2325 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts causes a VM-exit. */ 2326 Assert(!(val & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)); 2325 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts (NMIs) causes a VM-exit. */ 2326 2327 if (pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI) 2328 val |= VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */ 2327 2329 2328 2330 /* Enable the VMX preemption timer. */ … … 2451 2453 } 2452 2454 #endif 2455 } 2456 2457 /* If we're using virtual NMIs, we need the NMI-window exiting feature. */ 2458 if ( (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI) 2459 && (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)) 2460 { 2461 val |= VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT; 2453 2462 } 2454 2463 … … 3513 3522 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS; 3514 3523 } 3524 3525 /* 3526 * NMIs to the guest are inhibited until the guest executes an IRET. We only 3527 * bother with virtual-NMI blocking when we have support for virtual NMIs in the 3528 * CPU, otherwise setting this would block host-NMIs and IRET will not clear the 3529 * blocking. 3530 * 3531 * See Intel spec. 26.6.1 "Interruptibility state". See @bugref{7445}. 3532 */ 3533 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_NMIS) 3534 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)) 3535 { 3536 uIntrState |= VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI; 3537 } 3538 3515 3539 return uIntrState; 3516 3540 } … … 6069 6093 6070 6094 if (!uIntrState) 6071 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6095 { 6096 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 6097 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6098 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_NMIS)) 6099 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_NMIS); 6100 } 6072 6101 else 6073 6102 { 6074 Assert( uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI 6075 || uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS); 6076 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 6077 AssertRC(rc); 6078 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */ 6079 AssertRC(rc); 6080 6081 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip); 6082 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 6103 if ( (uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS) 6104 || (uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)) 6105 { 6106 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 6107 AssertRC(rc); 6108 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */ 6109 AssertRC(rc); 6110 6111 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip); 6112 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 6113 } 6114 6115 if (uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) 6116 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_NMIS); 6117 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_NMIS)) 6118 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_NMIS); 6083 6119 } 6084 6120 } … … 7277 7313 7278 7314 /** 7315 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to 7316 * cause a VM-exit as soon as the guest is in a state to receive NMIs. 7317 * 7318 * @param pVCpu Pointer to the VMCPU. 7319 */ 7320 DECLINLINE(void) hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu) 7321 { 7322 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)) 7323 { 7324 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)) 7325 { 7326 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT; 7327 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 7328 AssertRC(rc); 7329 Log4(("Setup NMI-window exiting\n")); 7330 } 7331 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */ 7332 } 7333 7334 7335 /** 7336 * Clears the NMI-window exiting control in the VMCS. 7337 * 7338 * @param pVCpu Pointer to the VMCPU. 7339 */ 7340 DECLINLINE(void) hmR0VmxClearNmiWindowExitVmcs(PVMCPU pVCpu) 7341 { 7342 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT); 7343 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT; 7344 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 7345 AssertRC(rc); 7346 Log4(("Cleared NMI-window exiting\n")); 7347 } 7348 7349 7350 /** 7279 7351 * Evaluates the event to be delivered to the guest and sets it as the pending 7280 7352 * event. … … 7293 7365 bool fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS); 7294 7366 bool fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 7367 bool fBlockNmi = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI); 7295 7368 7296 7369 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS)); 7297 Assert( !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) /* We don't support block-by-NMI and SMI yet.*/ 7298 && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); 7370 Assert(!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/ 7299 7371 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */ 7300 7372 Assert(!TRPMHasTrap(pVCpu)); … … 7303 7375 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */ 7304 7376 { 7305 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */7306 if ( !fBlockMovSS7307 && !fBlockSti)7377 if ( fBlockNmi 7378 || fBlockSti 7379 || fBlockMovSS) 7308 7380 { 7309 7381 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */ 7382 hmR0VmxSetNmiWindowExitVmcs(pVCpu); 7383 } 7384 else 7385 { 7310 7386 Log4(("Pending NMI vcpu[%RU32]\n", pVCpu->idCpu)); 7311 7387 uint32_t u32IntInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID; … … 7315 7391 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); 7316 7392 } 7317 else7318 hmR0VmxSetIntWindowExitVmcs(pVCpu);7319 7393 } 7320 7394 /* … … 7396 7470 7397 7471 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS)); 7398 Assert( !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) /* We don't support block-by-NMI and SMI yet.*/ 7399 && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); 7472 Assert(!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/ 7400 7473 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */ 7401 7474 Assert(!TRPMHasTrap(pVCpu)); … … 7411 7484 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo); 7412 7485 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT) 7413 && ( uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT 7414 || uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)) 7486 && uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT) 7415 7487 { 7416 7488 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT); 7417 7489 hmR0VmxClearIntWindowExitVmcs(pVCpu); 7418 7490 } 7419 #if 1 /* defined(VBOX_STRICT) */ /* Temporarily for debugging. */ 7491 7492 #ifdef VBOX_STRICT 7420 7493 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT) 7421 7494 { … … 7425 7498 Assert(!fBlockSti); 7426 7499 Assert(!fBlockMovSS); 7427 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT));7428 7500 } 7429 7501 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI) 7430 7502 { 7503 bool fBlockNmi = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI); 7431 7504 Assert(!fBlockSti); 7432 7505 Assert(!fBlockMovSS); 7433 Assert(! (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT));7506 Assert(!fBlockNmi); 7434 7507 } 7435 7508 #endif … … 7838 7911 7839 7912 /* Validate. */ 7840 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */7841 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK (u32IntInfo)); /* Bit 12 MBZ. */7842 Assert(!(u32IntInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */7913 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */ 7914 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(u32IntInfo)); /* Bit 12 MBZ. */ 7915 Assert(!(u32IntInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */ 7843 7916 7844 7917 /* Inject. */ … … 9812 9885 hmR0VmxClearIntWindowExitVmcs(pVCpu); 9813 9886 9814 /* Deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and resume guest execution. */9887 /* Deliver the pending interrupts via hmR0VmxEvaluatePendingEvent() and resume guest execution. */ 9815 9888 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow); 9816 9889 return VINF_SUCCESS; … … 9824 9897 { 9825 9898 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 9826 AssertMsgFailed(("Unexpected NMI-window exit.\n")); 9827 HMVMX_RETURN_UNEXPECTED_EXIT(); 9899 if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))) 9900 { 9901 AssertMsgFailed(("Unexpected NMI-window exit.\n")); 9902 HMVMX_RETURN_UNEXPECTED_EXIT(); 9903 } 9904 9905 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_NMIS)); 9906 9907 /* 9908 * Clear block-by-STI if it's active. The force-flag couldn't have been set by block-by-Mov SS in 9909 * hmR0VmxSaveGuestIntrState() when this VM-exit happens as Intel CPUs are consistent with 9910 * block-by-Mov SS and NMIs. See @bugref{7445}. 9911 */ 9912 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 9913 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 9914 9915 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */ 9916 hmR0VmxClearNmiWindowExitVmcs(pVCpu); 9917 9918 /* Deliver the pending NMI via hmR0VmxEvaluatePendingEvent() and resume guest execution. */ 9919 return VINF_SUCCESS; 9828 9920 } 9829 9921 -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r52005 r52041 70 70 EXIT_REASON(VMX_EXIT_SMI_IRQ , 6, "Other SMI."), 71 71 EXIT_REASON(VMX_EXIT_INT_WINDOW , 7, "Interrupt window."), 72 EXIT_REASON _NIL(),72 EXIT_REASON(VMX_EXIT_NMI_WINDOW , 8, "NMI window."), 73 73 EXIT_REASON(VMX_EXIT_TASK_SWITCH , 9, "Task switch."), 74 74 EXIT_REASON(VMX_EXIT_CPUID , 10, "Guest attempted to execute CPUID."), -
trunk/src/recompiler/VBoxRecompiler.c
r51326 r52041 2237 2237 } 2238 2238 2239 /* Update the inhibit NMI mask. */ 2240 pVM->rem.s.Env.hflags2 &= ~HF2_NMI_MASK; 2241 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_NMIS)) 2242 pVM->rem.s.Env.hflags2 |= HF2_NMI_MASK; 2243 2239 2244 /* 2240 2245 * Sync the A20 gate. … … 2723 2728 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#2)\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu))); 2724 2729 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 2730 } 2731 2732 /* Inhibit NMI flag. */ 2733 if (pVM->rem.s.Env.hflags2 & HF2_NMI_MASK) 2734 { 2735 Log(("Settings VMCPU_FF_INHIBIT_NMIS at %RGv (REM)\n", (RTGCPTR)pCtx->rip)); 2736 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_NMIS); 2737 } 2738 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_NMIS)) 2739 { 2740 Log(("Clearing VMCPU_FF_INHIBIT_NMIS at %RGv (REM)\n", (RTGCPTR)pCtx->rip)); 2741 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_NMIS); 2725 2742 } 2726 2743
Note:
See TracChangeset
for help on using the changeset viewer.

