Changeset 68346 in vbox
- Timestamp:
- Aug 9, 2017 5:38:42 AM (7 years ago)
- File:
-
- 1 edited
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r68311 r68346 3132 3132 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF); 3133 3133 bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS); 3134 #ifdef VBOX_WITH_NESTED_HWVIRT 3135 bool const fGlobalIF = pCtx->hwvirt.svm.fGif 3136 #else 3137 bool const fGlobalIF = true; 3138 #endif 3134 3139 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 3135 3140 3136 3141 SVMEVENT Event; 3137 3142 Event.u = 0; 3138 /** @todo SMI. SMIs take priority over NMIs. */ 3139 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */ 3140 { 3141 if (fBlockNmi) 3142 hmR0SvmSetIretIntercept(pVmcb); 3143 else if (fIntShadow) 3144 hmR0SvmSetVirtIntrIntercept(pVmcb); 3145 else 3146 { 3147 Log4(("Pending NMI\n")); 3148 3149 Event.n.u1Valid = 1; 3150 Event.n.u8Vector = X86_XCPT_NMI; 3151 Event.n.u3Type = SVM_EVENT_NMI; 3152 3153 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 3154 hmR0SvmSetIretIntercept(pVmcb); 3155 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); 3156 return; 3157 } 3158 } 3159 else if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 3160 && !pVCpu->hm.s.fSingleInstruction) 3161 { 3162 /* 3163 * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns 3164 * a valid interrupt we -must- deliver the interrupt. We can no longer re-request it from the APIC. 3165 */ 3166 if ( !fBlockInt 3167 && !fIntShadow) 3168 { 3169 uint8_t u8Interrupt; 3170 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt); 3171 if (RT_SUCCESS(rc)) 3143 3144 /* 3145 * If the global interrupt flag (GIF) isn't set, even NMIs are blocked. 3146 * Only relevant when SVM capability is exposed to the guest. 3147 */ 3148 if (fGlobalIF) 3149 { 3150 /** @todo SMI. SMIs take priority over NMIs. */ 3151 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */ 3152 { 3153 if (fBlockNmi) 3154 hmR0SvmSetIretIntercept(pVmcb); 3155 else if (fIntShadow) 3156 hmR0SvmSetVirtIntrIntercept(pVmcb); 3157 else 3172 3158 { 3173 Log4((" Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt));3159 Log4(("Pending NMI\n")); 3174 3160 3175 3161 Event.n.u1Valid = 1; 3176 Event.n.u8Vector = u8Interrupt;3177 Event.n.u3Type = SVM_EVENT_ EXTERNAL_IRQ;3162 Event.n.u8Vector = X86_XCPT_NMI; 3163 Event.n.u3Type = SVM_EVENT_NMI; 3178 3164 3179 3165 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 3166 hmR0SvmSetIretIntercept(pVmcb); 3167 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); 3168 return; 3180 3169 } 3181 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR) 3170 } 3171 else if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 3172 && !pVCpu->hm.s.fSingleInstruction) 3173 { 3174 /* 3175 * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns 3176 * a valid interrupt we -must- deliver the interrupt. We can no longer re-request it from the APIC. 3177 */ 3178 if ( !fBlockInt 3179 && !fIntShadow) 3182 3180 { 3183 /* 3184 * AMD-V has no TPR thresholding feature. We just avoid posting the interrupt. 3185 * We just avoid delivering the TPR-masked interrupt here. TPR will be updated 3186 * always via hmR0SvmLoadGuestState() -> hmR0SvmLoadGuestApicState(). 3187 */ 3188 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq); 3181 uint8_t u8Interrupt; 3182 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt); 3183 if (RT_SUCCESS(rc)) 3184 { 3185 Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt)); 3186 3187 Event.n.u1Valid = 1; 3188 Event.n.u8Vector = u8Interrupt; 3189 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ; 3190 3191 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 3192 } 3193 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR) 3194 { 3195 /* 3196 * AMD-V has no TPR thresholding feature. We just avoid posting the interrupt. 3197 * We just avoid delivering the TPR-masked interrupt here. TPR will be updated 3198 * always via hmR0SvmLoadGuestState() -> hmR0SvmLoadGuestApicState(). 3199 */ 3200 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq); 3201 } 3202 else 3203 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 3189 3204 } 3190 3205 else 3191 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 3192 } 3193 else 3194 hmR0SvmSetVirtIntrIntercept(pVmcb); 3206 hmR0SvmSetVirtIntrIntercept(pVmcb); 3207 } 3195 3208 } 3196 3209 }
Note:
See TracChangeset
for help on using the changeset viewer.

