Changeset 46657 in vbox
- Timestamp:
- Jun 19, 2013 1:18:19 PM (11 years ago)
- File:
-
- 1 edited
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp (modified) (21 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r46603 r46657 44 44 #define HMSVM_EXIT_DECL static int 45 45 46 46 47 /** @name Segment attribute conversion between CPU and AMD-V VMCB format. 47 48 * … … 56 57 #define HMSVM_VMCB_2_CPU_SEG_ATTR(a) (a & 0xff) | ((a & 0x0f00) << 4) 57 58 /** @} */ 59 58 60 59 61 /** @name Macros for loading, storing segment registers to/from the VMCB. … … 81 83 } while (0) 82 84 /** @} */ 85 86 87 /** @name Macro for checking and returning from the using function for 88 * #VMEXIT intercepts that maybe caused during delivering of another 89 * event in the guest. */ 90 #define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY() \ 91 do \ 92 { \ 93 int rc = hmR0SvmCheckExitDueToEventDelivery(pVCpu, pCtx, pSvmTransient); \ 94 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT)) \ 95 return VINF_SUCCESS; \ 96 else if (RT_UNLIKELY(rc == VINF_EM_RESET)) \ 97 return rc; \ 98 } while (0) 99 /** @} */ 100 101 102 /** 103 * @name Exception bitmap mask for all contributory exceptions. 104 * 105 * Page fault is deliberately excluded here as it's conditional whether it's 106 * contributory or benign. It's handled separately. 107 */ 108 #define HMSVM_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \ 109 | RT_BIT(X86_XCPT_DE)) 110 /** @} */ 111 83 112 84 113 /** @name VMCB Clean Bits. … … 148 177 /** The guest's TPR value used for TPR shadowing. */ 149 178 uint8_t u8GuestTpr; 179 180 /** Whether the #VMEXIT was caused by a page-fault during delivery of a 181 * contributary exception or a page-fault. */ 182 bool fVectoringPF; 150 183 } SVMTRANSIENT, *PSVMTRANSIENT; 151 184 /** @} */ … … 2522 2555 } 2523 2556 2524 pSvmTransient->u64ExitCode = pVmcb->ctrl.u64ExitCode; 2557 pSvmTransient->u64ExitCode = pVmcb->ctrl.u64ExitCode; /* Save the #VMEXIT reason. */ 2558 pSvmTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */ 2525 2559 hmR0SvmSaveGuestState(pVCpu, pMixedCtx); /* Save the guest state from the VMCB to the guest-CPU context. */ 2526 2560 2527 if (RT_LIKELY(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID))2561 if (RT_LIKELY(pSvmTransient->u64ExitCode != (uint64_t)SVM_EXIT_INVALID)) 2528 2562 { 2529 2563 if (pVCpu->hm.s.svm.fSyncVTpr) … … 2595 2629 */ 2596 2630 hmR0SvmPostRunGuest(pVM, pVCpu, pCtx, &SvmTransient, rc); 2597 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for errors with running the VM (VMRUN). */2598 || SvmTransient.u64ExitCode == SVM_EXIT_INVALID)) /* Check for errors due to invalid guest state. */2631 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */ 2632 || SvmTransient.u64ExitCode == (uint64_t)SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */ 2599 2633 { 2600 2634 if (rc == VINF_SUCCESS); … … 2605 2639 2606 2640 /* Handle the #VMEXIT. */ 2607 AssertMsg(SvmTransient.u64ExitCode != SVM_EXIT_INVALID, ("%#x\n", SvmTransient.u64ExitCode));2641 AssertMsg(SvmTransient.u64ExitCode != (uint64_t)SVM_EXIT_INVALID, ("%#x\n", SvmTransient.u64ExitCode)); 2608 2642 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode); 2609 2643 rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient); … … 2752 2786 case SVM_EXIT_EXCEPTION_C: /* X86_XCPT_SS */ 2753 2787 case SVM_EXIT_EXCEPTION_D: /* X86_XCPT_GP */ 2754 return 2788 { 2789 SVMEVENT Event; 2790 Event.u = 0; 2791 Event.n.u3Type = SVM_EVENT_EXCEPTION; 2792 Event.n.u1Valid = 1; 2793 Event.n.u8Vector = pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0; 2794 2795 switch (Event.n.u8Vector) 2796 { 2797 case X86_XCPT_GP: 2798 Event.n.u1ErrorCodeValid = 1; 2799 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1; 2800 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP); 2801 break; 2802 case X86_XCPT_BP: 2803 /** Saves the wrong EIP on the stack (pointing to the int3) instead of the 2804 * next instruction. */ 2805 /** @todo Investigate this later. */ 2806 break; 2807 case X86_XCPT_DE: 2808 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE); 2809 break; 2810 case X86_XCPT_UD: 2811 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD); 2812 break; 2813 case X86_XCPT_SS: 2814 Event.n.u1ErrorCodeValid = 1; 2815 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1; 2816 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS); 2817 break; 2818 case X86_XCPT_NP: 2819 Event.n.u1ErrorCodeValid = 1; 2820 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1; 2821 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP); 2822 break; 2823 } 2824 Log4(("#Xcpt: Vector=%#x at CS:RIP=%04x:%RGv\n", Event.n.u8Vector, pCtx->cs.Sel, (RTGCPTR)pCtx->rip)); 2825 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 2826 return VINF_SUCCESS; 2827 } 2755 2828 #endif 2756 2829 … … 2781 2854 do { \ 2782 2855 AssertPtr(pVCpu); \ 2783 AssertPtr(p MixedCtx); \2856 AssertPtr(pCtx); \ 2784 2857 AssertPtr(pSvmTransient); \ 2785 2858 Assert(ASMIntAreEnabled()); \ 2786 2859 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \ 2787 2860 HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \ 2788 Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v- \n", (uint32_t)pVCpu->idCpu)); \2861 Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (uint32_t)pVCpu->idCpu)); \ 2789 2862 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \ 2790 2863 if (VMMR0IsLogFlushDisabled(pVCpu)) \ … … 2924 2997 2925 2998 hmR0SvmSetPendingEvent(pVCpu, &Event); 2926 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);2927 2999 } 2928 3000 … … 2957 3029 Event.n.u3Type = SVM_EVENT_EXCEPTION; 2958 3030 Event.n.u8Vector = X86_XCPT_MF; 3031 hmR0SvmSetPendingEvent(pVCpu, &Event); 3032 } 3033 3034 3035 /** 3036 * Sets a double fault (#DF) exception as pending-for-injection into the VM. 3037 * 3038 * @param pVCpu Pointer to the VMCPU. 3039 */ 3040 DECLINLINE(void) hmR0SvmSetPendingXcptDF(PVMCPU pVCpu) 3041 { 3042 SVMEVENT Event; 3043 Event.u = 0; 3044 Event.n.u1Valid = 1; 3045 Event.n.u3Type = SVM_EVENT_EXCEPTION; 3046 Event.n.u8Vector = X86_XCPT_DF; 3047 Event.n.u1ErrorCodeValid = 1; 3048 Event.n.u32ErrorCode = 0; 2959 3049 hmR0SvmSetPendingEvent(pVCpu, &Event); 2960 3050 } … … 3027 3117 } 3028 3118 3119 /** 3120 * Determines if an exception is a contributory exception. Contributory 3121 * exceptions are ones which can cause double-faults. Page-fault is 3122 * intentionally not included here as it's a conditional contributory exception. 3123 * 3124 * @returns true if the exception is contributory, false otherwise. 3125 * @param uVector The exception vector. 3126 */ 3127 DECLINLINE(bool) hmR0SvmIsContributoryXcpt(const uint32_t uVector) 3128 { 3129 switch (uVector) 3130 { 3131 case X86_XCPT_GP: 3132 case X86_XCPT_SS: 3133 case X86_XCPT_NP: 3134 case X86_XCPT_TS: 3135 case X86_XCPT_DE: 3136 return true; 3137 default: 3138 break; 3139 } 3140 return false; 3141 } 3142 3143 3144 /** 3145 * Handle a condition that occurred while delivering an event through the guest 3146 * IDT. 3147 * 3148 * @returns VBox status code (informational error codes included). 3149 * @retval VINF_SUCCESS if we should continue handling the VM-exit. 3150 * @retval VINF_HM_DOUBLE_FAULT if a #DF condition was detected and we ought to 3151 * continue execution of the guest which will delivery the #DF. 3152 * @retval VINF_EM_RESET if we detected a triple-fault condition. 3153 * 3154 * @param pVCpu Pointer to the VMCPU. 3155 * @param pCtx Pointer to the guest-CPU context. 3156 * @param pSvmTransient Pointer to the SVM transient structure. 3157 * 3158 * @remarks No-long-jump zone!!! 3159 */ 3160 static int hmR0SvmCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 3161 { 3162 int rc = VINF_SUCCESS; 3163 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 3164 3165 /* See AMD spec. 15.7.3 "EXITINFO Pseudo-Code". The EXITINTINFO (if valid) contains the prior exception (IDT vector) 3166 * that was trying to be delivered to the guest which caused a #VMEXIT which was intercepted (Exit vector). */ 3167 if (pVmcb->ctrl.ExitIntInfo.n.u1Valid) 3168 { 3169 if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXCEPTION) 3170 { 3171 typedef enum 3172 { 3173 SVMREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */ 3174 SVMREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */ 3175 SVMREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */ 3176 SVMREFLECTXCPT_NONE /* Nothing to reflect. */ 3177 } SVMREFLECTXCPT; 3178 3179 SVMREFLECTXCPT enmReflect = SVMREFLECTXCPT_NONE; 3180 3181 if (pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0 <= SVM_EXIT_EXCEPTION_1F) 3182 { 3183 uint8_t uExitVector = (uint8_t)(pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0); 3184 uint8_t uIdtVector = pVmcb->ctrl.ExitIntInfo.n.u8Vector; 3185 3186 if ( uExitVector == X86_XCPT_PF 3187 && uIdtVector == X86_XCPT_PF) 3188 { 3189 pSvmTransient->fVectoringPF = true; 3190 Log4(("IDT: Vectoring #PF uCR2=%#RX64\n", pCtx->cr2)); 3191 } 3192 else if ( (pVmcb->ctrl.u32InterceptException & HMSVM_CONTRIBUTORY_XCPT_MASK) 3193 && hmR0SvmIsContributoryXcpt(uExitVector) 3194 && ( hmR0SvmIsContributoryXcpt(uIdtVector) 3195 || uIdtVector == X86_XCPT_PF)) 3196 { 3197 enmReflect = SVMREFLECTXCPT_DF; 3198 } 3199 else if (uIdtVector == X86_XCPT_DF) 3200 enmReflect = SVMREFLECTXCPT_TF; 3201 else 3202 enmReflect = SVMREFLECTXCPT_XCPT; 3203 } 3204 else 3205 { 3206 /* 3207 * If event delivery caused an #VMEXIT that is not an exception (e.g. #NPF) then reflect the original 3208 * exception to the guest after handling the VM-exit. 3209 */ 3210 enmReflect = SVMREFLECTXCPT_XCPT; 3211 } 3212 } 3213 else if (pVmcb->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT) 3214 { 3215 /* Ignore software interrupts (INT n) as they reoccur when restarting the instruction. */ 3216 enmReflect = SVMREFLECTXCPT_XCPT; 3217 } 3218 3219 switch (enmReflect) 3220 { 3221 case SVMREFLECTXCPT_XCPT: 3222 { 3223 pVCpu->hm.s.Event.u64IntrInfo = pVmcb->ctrl.ExitIntInfo.u; 3224 pVCpu->hm.s.Event.fPending = true; 3225 3226 /* If uExitVector is #PF, CR2 value will be updated from the VMCB if it's a guest #PF. See hmR0SvmExitXcptPF(). */ 3227 Log4(("IDT: Pending vectoring event %#RX64 ErrValid=%RTbool Err=%#RX32\n", pVmcb->ctrl.ExitIntInfo.u, 3228 !!pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid, pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode)); 3229 break; 3230 } 3231 3232 case SVMREFLECTXCPT_DF: 3233 { 3234 hmR0SvmSetPendingXcptDF(pVCpu); 3235 rc = VINF_HM_DOUBLE_FAULT; 3236 Log4(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo, 3237 uIdtVector, uExitVector)); 3238 break; 3239 } 3240 3241 case SVMREFLECTXCPT_TF: 3242 { 3243 rc = VINF_EM_RESET; 3244 Log4(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector)); 3245 break; 3246 } 3247 3248 default: 3249 Assert(rc == VINF_SUCCESS); 3250 break; 3251 } 3252 } 3253 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET); 3254 return rc; 3255 } 3256 3029 3257 3030 3258 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ … … 3214 3442 3215 3443 if ( rc == VINF_EM_HALT 3216 && EMShouldContinueAfterHalt(pVCpu, p MixedCtx))3444 && EMShouldContinueAfterHalt(pVCpu, pCtx)) 3217 3445 { 3218 3446 rc = VINF_SUCCESS; … … 3541 3769 3542 3770 /* Paranoia. */ 3543 p MixedCtx->dr[7] &= 0xffffffff; /* Upper 32 bits MBZ. */3544 p MixedCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* MBZ. */3545 p MixedCtx->dr[7] |= 0x400; /* MB1. */3771 pCtx->dr[7] &= 0xffffffff; /* Upper 32 bits MBZ. */ 3772 pCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* MBZ. */ 3773 pCtx->dr[7] |= 0x400; /* MB1. */ 3546 3774 3547 3775 pVmcb->guest.u64DR7 = pCtx->dr[7]; … … 3584 3812 PVM pVM = pVCpu->CTX_SUFF(pVM); 3585 3813 Assert(pVM->hm.s.fNestedPaging); 3814 3815 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 3586 3816 3587 3817 /* See AMD spec. 15.25.6 "Nested versus Guest Page Faults, Fault Ordering" for VMCB details for #NPF. */ … … 3754 3984 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3755 3985 3756 /* -XXX- @todo Vectoring pagefaults!! */3986 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 3757 3987 3758 3988 /* See AMD spec. 15.12.15 "#PF (Page Fault)". */ … … 3761 3991 RTGCUINTPTR uFaultAddress = pVmcb->ctrl.u64ExitInfo2; 3762 3992 3763 #if defined(HM VMX_ALWAYS_TRAP_ALL_XCPTS) || defined(HMVMX_ALWAYS_TRAP_PF)3993 #if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(HMSVM_ALWAYS_TRAP_PF) 3764 3994 if (pVM->hm.s.fNestedPaging) 3765 3995 { 3766 /* A genuine guest #PF, reflect it to the guest. */ 3767 Log4(("#PF: Guest page fault at %04X:%RGv FaultAddr=%RGv ErrCode=%#x\n", pCtx->cs, (RTGCPTR)pCtx->rip, uFaultAddress, 3768 u32ErrCode)); 3769 hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress); 3996 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */ 3997 if (!pSvmTransient->fVectoringPF) 3998 { 3999 /* A genuine guest #PF, reflect it to the guest. */ 4000 hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress); 4001 Log4(("#PF: Guest page fault at %04X:%RGv FaultAddr=%RGv ErrCode=%#x\n", pCtx->cs, (RTGCPTR)pCtx->rip, uFaultAddress, 4002 u32ErrCode)); 4003 } 4004 else 4005 { 4006 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */ 4007 hmR0VmxSetPendingXcptDF(pVCpu); 4008 Log4(("Pending #DF due to vectoring #PF. NP\n")); 4009 } 4010 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); 3770 4011 return VINF_SUCCESS; 3771 4012 } … … 3805 4046 3806 4047 TRPMAssertXcptPF(pVCpu, uFaultAddress, u32ErrCode); 3807 intrc = PGMTrap0eHandler(pVCpu, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);4048 rc = PGMTrap0eHandler(pVCpu, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress); 3808 4049 3809 4050 Log2(("#PF rc=%Rrc\n", rc)); … … 3817 4058 else if (rc == VINF_EM_RAW_GUEST_TRAP) 3818 4059 { 3819 /* It's a guest page fault and needs to be reflected to the guest. */ 3820 3821 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); 3822 u32ErrCode = TRPMGetErrorCode(pVCpu); /* The error code might have been changed. */ 3823 TRPMResetTrap(pVCpu); 3824 3825 hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress); 4060 if (!pSvmTransient->fVectoringPF) 4061 { 4062 /* It's a guest page fault and needs to be reflected to the guest. */ 4063 u32ErrCode = TRPMGetErrorCode(pVCpu); /* The error code might have been changed. */ 4064 TRPMResetTrap(pVCpu); 4065 4066 hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress); 4067 } 4068 else 4069 { 4070 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */ 4071 TRPMResetTrap(pVCpu); 4072 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */ 4073 hmR0SvmSetPendingXcptDF(pVCpu); 4074 Log4(("#PF: Pending #DF due to vectoring #PF\n")); 4075 } 4076 3826 4077 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); 3827 4078 return VINF_SUCCESS; … … 3842 4093 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3843 4094 3844 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS 4095 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 4096 4097 #ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS 3845 4098 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 3846 4099 #endif … … 3870 4123 HMSVM_EXIT_DECL hmR0SvmExitXcptMF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 3871 4124 { 3872 int rc; 3873 if (!(pMixedCtx->cr0 & X86_CR0_NE)) 4125 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 4126 4127 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 4128 4129 if (!(pCtx->cr0 & X86_CR0_NE)) 3874 4130 { 3875 4131 /* Old-style FPU error reporting needs some extra work. */
Note:
See TracChangeset
for help on using the changeset viewer.

