Index: /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 74331)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 74332)
@@ -410,40 +410,4 @@
 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
 /**
- * Check the common SVM instruction preconditions.
- */
-# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
-    do { \
-        if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
-        { \
-            Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
-            return iemRaiseUndefinedOpcode(a_pVCpu); \
-        } \
-        if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \
-        { \
-            Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
-            return iemRaiseUndefinedOpcode(a_pVCpu); \
-        } \
-        if ((a_pVCpu)->iem.s.uCpl != 0) \
-        { \
-            Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
-            return iemRaiseGeneralProtectionFault0(a_pVCpu); \
-        } \
-    } while (0)
-
-/**
- * Updates the NextRIP (NRI) field in the nested-guest VMCB.
- */
-# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
-    do { \
-        if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
-            CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
-    } while (0)
-
-/**
- * Check if SVM is enabled.
- */
-# define IEM_IS_SVM_ENABLED(a_pVCpu)                         (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
-
-/**
  * Check if an SVM control/instruction intercept is set.
  */
@@ -476,12 +440,7 @@
 
 /**
- * Get the SVM pause-filter count.
- */
-# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu)             (CPUMGetGuestSvmPauseFilterCount(a_pVCpu, IEM_GET_CTX(a_pVCpu)))
-
-/**
  * Invokes the SVM \#VMEXIT handler for the nested-guest.
  */
-# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
+# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
     do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
 
@@ -490,5 +449,5 @@
  * corresponding decode assist information.
  */
-# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
+# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
     do \
     { \
@@ -499,11 +458,43 @@
         else \
             uExitInfo1 = 0; \
-        IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
+        IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
     } while (0)
 
+/** Check and handles SVM nested-guest instruction intercept and updates
+ *  NRIP if needed.
+ */
+# define IEM_CHECK_SVM_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
+    do \
+    { \
+        if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
+        { \
+            IEM_UPDATE_SVM_NRIP(a_pVCpu); \
+            IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
+        } \
+    } while (0)
+
+/** Checks and handles SVM nested-guest CR0 read intercept. */
+# define IEM_CHECK_SVM_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
+    do \
+    { \
+        if (!IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
+        { /* probably likely */ } \
+        else \
+        { \
+            IEM_UPDATE_SVM_NRIP(a_pVCpu); \
+            IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
+        } \
+    } while (0)
+
+/**
+ * Updates the NextRIP (NRI) field in the nested-guest VMCB.
+ */
+# define IEM_UPDATE_SVM_NRIP(a_pVCpu) \
+    do { \
+        if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
+            CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
+    } while (0)
+
 #else
-# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr)                                    do { } while (0)
-# define IEM_SVM_UPDATE_NRIP(a_pVCpu)                                                     do { } while (0)
-# define IEM_IS_SVM_ENABLED(a_pVCpu)                                                      (false)
 # define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)                              (false)
 # define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)                                 (false)
@@ -512,7 +503,9 @@
 # define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr)                                (false)
 # define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector)                                (false)
-# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu)                                          (0)
-# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2)          do { return VERR_SVM_IPE_1; } while (0)
-# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg)         do { return VERR_SVM_IPE_1; } while (0)
+# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2)             do { return VERR_SVM_IPE_1; } while (0)
+# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg)            do { return VERR_SVM_IPE_1; } while (0)
+# define IEM_CHECK_SVM_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2)   do { } while (0)
+# define IEM_CHECK_SVM_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2)                          do { } while (0)
+# define IEM_UPDATE_SVM_NRIP(a_pVCpu)                                                     do { } while (0)
 
 #endif
@@ -3418,5 +3411,5 @@
     {
         Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -4004,5 +3997,5 @@
 
         Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
         RT_NOREF2(uExitInfo1, uExitInfo2);
     }
@@ -5505,5 +5498,5 @@
             /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
             if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
-                IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+                IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
         }
         else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 74331)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 74332)
@@ -551,6 +551,6 @@
     {
         Log2(("pushf: Guest intercept -> #VMEXIT\n"));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -618,6 +618,6 @@
     {
         Log2(("popf: Guest intercept -> #VMEXIT\n"));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -3856,6 +3856,6 @@
     {
         Log(("iret: Guest intercept -> #VMEXIT\n"));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -4635,6 +4635,6 @@
     {
         Log(("lgdt: Guest intercept -> #VMEXIT\n"));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -4680,6 +4680,6 @@
     {
         Log(("sgdt: Guest intercept -> #VMEXIT\n"));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_GDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -4708,6 +4708,6 @@
     {
         Log(("lidt: Guest intercept -> #VMEXIT\n"));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -4752,6 +4752,6 @@
     {
         Log(("sidt: Guest intercept -> #VMEXIT\n"));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -4799,6 +4799,6 @@
         {
             Log(("lldt: Guest intercept -> #VMEXIT\n"));
-            IEM_SVM_UPDATE_NRIP(pVCpu);
-            IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+            IEM_UPDATE_SVM_NRIP(pVCpu);
+            IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
         }
 
@@ -4875,6 +4875,6 @@
     {
         Log(("lldt: Guest intercept -> #VMEXIT\n"));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -4903,5 +4903,5 @@
 IEM_CIMPL_DEF_2(iemCImpl_sldt_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
 {
-    IEMCIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);
+    IEM_CHECK_SVM_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);
 
     IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
@@ -4927,5 +4927,5 @@
 IEM_CIMPL_DEF_2(iemCImpl_sldt_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
 {
-    IEMCIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);
+    IEM_CHECK_SVM_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);
 
     IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
@@ -4970,6 +4970,6 @@
     {
         Log(("ltr: Guest intercept -> #VMEXIT\n"));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -5068,5 +5068,5 @@
 IEM_CIMPL_DEF_2(iemCImpl_str_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
 {
-    IEMCIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);
+    IEM_CHECK_SVM_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);
 
     IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
@@ -5092,5 +5092,5 @@
 IEM_CIMPL_DEF_2(iemCImpl_str_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
 {
-    IEMCIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);
+    IEM_CHECK_SVM_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);
 
     IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
@@ -5117,6 +5117,6 @@
     {
         Log(("iemCImpl_mov_Rd_Cd: Guest intercept CR%u -> #VMEXIT\n", iCrReg));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg);
     }
 
@@ -5187,5 +5187,5 @@
 IEM_CIMPL_DEF_2(iemCImpl_smsw_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
 {
-    IEMCIMPL_HLP_SVM_READ_CR_INTERCEPT(pVCpu, /*cr*/ 0, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    IEM_CHECK_SVM_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
 
     switch (enmEffOpSize)
@@ -5225,5 +5225,5 @@
 IEM_CIMPL_DEF_2(iemCImpl_smsw_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
 {
-    IEMCIMPL_HLP_SVM_READ_CR_INTERCEPT(pVCpu, /*cr*/ 0, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    IEM_CHECK_SVM_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
 
     uint16_t u16Value;
@@ -5355,6 +5355,6 @@
             {
                 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
-                IEM_SVM_UPDATE_NRIP(pVCpu);
-                IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg);
+                IEM_UPDATE_SVM_NRIP(pVCpu);
+                IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg);
             }
             if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
@@ -5366,6 +5366,6 @@
                     Assert(enmAccessCrX != IEMACCESSCRX_CLTS);
                     Log(("iemCImpl_load_Cr%#x: lmsw or bits other than TS/MP changed: Guest intercept -> #VMEXIT\n", iCrReg));
-                    IEM_SVM_UPDATE_NRIP(pVCpu);
-                    IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_CR0_SEL_WRITE, enmAccessCrX, iGReg);
+                    IEM_UPDATE_SVM_NRIP(pVCpu);
+                    IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_CR0_SEL_WRITE, enmAccessCrX, iGReg);
                 }
             }
@@ -5423,6 +5423,6 @@
             {
                 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
-                IEM_SVM_UPDATE_NRIP(pVCpu);
-                IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg);
+                IEM_UPDATE_SVM_NRIP(pVCpu);
+                IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg);
             }
             pVCpu->cpum.GstCtx.cr2 = uNewCrX;
@@ -5479,6 +5479,6 @@
             {
                 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
-                IEM_SVM_UPDATE_NRIP(pVCpu);
-                IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg);
+                IEM_UPDATE_SVM_NRIP(pVCpu);
+                IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg);
             }
 
@@ -5558,6 +5558,6 @@
             {
                 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
-                IEM_SVM_UPDATE_NRIP(pVCpu);
-                IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);
+                IEM_UPDATE_SVM_NRIP(pVCpu);
+                IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);
             }
 
@@ -5623,6 +5623,6 @@
                 {
                     Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
-                    IEM_SVM_UPDATE_NRIP(pVCpu);
-                    IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg);
+                    IEM_UPDATE_SVM_NRIP(pVCpu);
+                    IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg);
                 }
 
@@ -5797,6 +5797,6 @@
     {
         Log(("mov r%u,dr%u: Guest intercept -> #VMEXIT\n", iGReg, iDrReg));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf),
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf),
                               IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
     }
@@ -5898,6 +5898,6 @@
     {
         Log2(("mov dr%u,r%u: Guest intercept -> #VMEXIT\n", iDrReg, iGReg));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf),
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf),
                               IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
     }
@@ -5936,6 +5936,6 @@
     {
         Log(("invlpg: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_INVLPG,
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_INVLPG,
                               IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? GCPtrPage : 0, 0 /* uExitInfo2 */);
     }
@@ -6071,5 +6071,5 @@
     }
 
-    IEMCIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(pVCpu, SVM_CTRL_INTERCEPT_INVD, SVM_EXIT_INVD, 0, 0);
+    IEM_CHECK_SVM_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_INVD, SVM_EXIT_INVD, 0, 0);
 
     /* We currently take no action here. */
@@ -6090,5 +6090,5 @@
     }
 
-    IEMCIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(pVCpu, SVM_CTRL_INTERCEPT_WBINVD, SVM_EXIT_WBINVD, 0, 0);
+    IEM_CHECK_SVM_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_WBINVD, SVM_EXIT_WBINVD, 0, 0);
 
     /* We currently take no action here. */
@@ -6101,5 +6101,5 @@
 IEM_CIMPL_DEF_0(iemCImpl_rsm)
 {
-    IEMCIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(pVCpu, SVM_CTRL_INTERCEPT_RSM, SVM_EXIT_RSM, 0, 0);
+    IEM_CHECK_SVM_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_RSM, SVM_EXIT_RSM, 0, 0);
     NOREF(cbInstr);
     return iemRaiseUndefinedOpcode(pVCpu);
@@ -6131,6 +6131,6 @@
     {
         Log(("rdtsc: Guest intercept -> #VMEXIT\n"));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -6174,6 +6174,6 @@
     {
         Log(("rdtscp: Guest intercept -> #VMEXIT\n"));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -6216,6 +6216,6 @@
     {
         Log(("rdpmc: Guest intercept -> #VMEXIT\n"));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -6653,6 +6653,6 @@
     {
         Log2(("hlt: Guest intercept -> #VMEXIT\n"));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -6707,6 +6707,6 @@
     {
         Log2(("monitor: Guest intercept -> #VMEXIT\n"));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -6774,12 +6774,12 @@
     {
         Log2(("mwait: Guest intercept (monitor hardware armed) -> #VMEXIT\n"));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))
     {
         Log2(("mwait: Guest intercept -> #VMEXIT\n"));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -6831,6 +6831,6 @@
     {
         Log2(("cpuid: Guest intercept -> #VMEXIT\n"));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -7171,6 +7171,6 @@
         {
             Log2(("xsetbv: Guest intercept -> #VMEXIT\n"));
-            IEM_SVM_UPDATE_NRIP(pVCpu);
-            IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+            IEM_UPDATE_SVM_NRIP(pVCpu);
+            IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
         }
 
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h	(revision 74331)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h	(revision 74332)
@@ -18,37 +18,26 @@
 
 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
-/** Check and handles SVM nested-guest instruction intercept and updates
- *  NRIP if needed.
- */
-# define IEMCIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
-    do \
-    { \
-        if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
+/**
+ * Check the common SVM instruction preconditions.
+ */
+# define IEM_CHECK_SVM_INSTR_COMMON(a_pVCpu, a_Instr) \
+    do { \
+        if (!CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu))) \
         { \
-            IEM_SVM_UPDATE_NRIP(a_pVCpu); \
-            IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
+            Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
+            return iemRaiseUndefinedOpcode(a_pVCpu); \
+        } \
+        if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \
+        { \
+            Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
+            return iemRaiseUndefinedOpcode(a_pVCpu); \
+        } \
+        if ((a_pVCpu)->iem.s.uCpl != 0) \
+        { \
+            Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
+            return iemRaiseGeneralProtectionFault0(a_pVCpu); \
         } \
     } while (0)
 
-/** Checks and handles SVM nested-guest CR0 read intercept. */
-# define IEMCIMPL_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
-    do \
-    { \
-        if (!IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
-        { /* probably likely */ } \
-        else \
-        { \
-            IEM_SVM_UPDATE_NRIP(a_pVCpu); \
-            IEM_RETURN_SVM_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
-        } \
-    } while (0)
-
-#else  /* !VBOX_WITH_NESTED_HWVIRT_SVM */
-# define IEMCIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2)  do { } while (0)
-# define IEMCIMPL_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2)                         do { } while (0)
-#endif /* !VBOX_WITH_NESTED_HWVIRT_SVM */
-
-
-#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
 
 /**
@@ -898,5 +887,5 @@
     {
         Log2(("iemHandleSvmNstGstEventIntercept: NMI intercept -> #VMEXIT\n"));
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -906,6 +895,6 @@
     {
         Log2(("iemHandleSvmNstGstEventIntercept: ICEBP intercept -> #VMEXIT\n"));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_ICEBP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_ICEBP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -937,9 +926,9 @@
         }
         if (u8Vector == X86_XCPT_BR)
-            IEM_SVM_UPDATE_NRIP(pVCpu);
+            IEM_UPDATE_SVM_NRIP(pVCpu);
         Log2(("iemHandleSvmNstGstEventIntercept: Xcpt intercept u32InterceptXcpt=%#RX32 u8Vector=%#x "
               "uExitInfo1=%#RX64 uExitInfo2=%#RX64 -> #VMEXIT\n", pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb)->ctrl.u32InterceptXcpt,
               u8Vector, uExitInfo1, uExitInfo2));
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XCPT_0 + u8Vector, uExitInfo1, uExitInfo2);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_0 + u8Vector, uExitInfo1, uExitInfo2);
     }
 
@@ -953,6 +942,6 @@
         uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? u8Vector : 0;
         Log2(("iemHandleSvmNstGstEventIntercept: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
     }
 
@@ -1000,5 +989,5 @@
     {
         Log3(("iemSvmHandleIOIntercept: u16Port=%#x (%u) -> #VMEXIT\n", u16Port, u16Port));
-        IEM_SVM_UPDATE_NRIP(pVCpu);
+        IEM_UPDATE_SVM_NRIP(pVCpu);
         return iemSvmVmexit(pVCpu, SVM_EXIT_IOIO, IoExitInfo.u, pVCpu->cpum.GstCtx.rip + cbInstr);
     }
@@ -1059,5 +1048,5 @@
         if (*pbMsrpm & RT_BIT(uMsrpmBit))
         {
-            IEM_SVM_UPDATE_NRIP(pVCpu);
+            IEM_UPDATE_SVM_NRIP(pVCpu);
             return iemSvmVmexit(pVCpu, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
         }
@@ -1086,5 +1075,5 @@
 # else
     LogFlow(("iemCImpl_vmrun\n"));
-    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmrun);
+    IEM_CHECK_SVM_INSTR_COMMON(pVCpu, vmrun);
 
     /** @todo Check effective address size using address size prefix. */
@@ -1100,5 +1089,5 @@
     {
         Log(("vmrun: Guest intercept -> #VMEXIT\n"));
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -1111,4 +1100,263 @@
     return rcStrict;
 # endif
+}
+
+
+/**
+ * Implements 'VMLOAD'.
+ */
+IEM_CIMPL_DEF_0(iemCImpl_vmload)
+{
+# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
+    RT_NOREF2(pVCpu, cbInstr);
+    return VINF_EM_RAW_EMULATE_INSTR;
+# else
+    LogFlow(("iemCImpl_vmload\n"));
+    IEM_CHECK_SVM_INSTR_COMMON(pVCpu, vmload);
+
+    /** @todo Check effective address size using address size prefix. */
+    RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
+    if (   (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
+        || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
+    {
+        Log(("vmload: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
+        return iemRaiseGeneralProtectionFault0(pVCpu);
+    }
+
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
+    {
+        Log(("vmload: Guest intercept -> #VMEXIT\n"));
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
+    SVMVMCBSTATESAVE VmcbNstGst;
+    VBOXSTRICTRC rcStrict = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcbNstGst, GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest),
+                                                    sizeof(SVMVMCBSTATESAVE));
+    if (rcStrict == VINF_SUCCESS)
+    {
+        LogFlow(("vmload: Loading VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode));
+        HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs);
+        HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs);
+        HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, TR, tr);
+        HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, LDTR, ldtr);
+
+        pVCpu->cpum.GstCtx.msrKERNELGSBASE = VmcbNstGst.u64KernelGSBase;
+        pVCpu->cpum.GstCtx.msrSTAR         = VmcbNstGst.u64STAR;
+        pVCpu->cpum.GstCtx.msrLSTAR        = VmcbNstGst.u64LSTAR;
+        pVCpu->cpum.GstCtx.msrCSTAR        = VmcbNstGst.u64CSTAR;
+        pVCpu->cpum.GstCtx.msrSFMASK       = VmcbNstGst.u64SFMASK;
+
+        pVCpu->cpum.GstCtx.SysEnter.cs     = VmcbNstGst.u64SysEnterCS;
+        pVCpu->cpum.GstCtx.SysEnter.esp    = VmcbNstGst.u64SysEnterESP;
+        pVCpu->cpum.GstCtx.SysEnter.eip    = VmcbNstGst.u64SysEnterEIP;
+
+        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+    }
+    return rcStrict;
+# endif
+}
+
+
+/**
+ * Implements 'VMSAVE'.
+ */
+IEM_CIMPL_DEF_0(iemCImpl_vmsave)
+{
+# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
+    RT_NOREF2(pVCpu, cbInstr);
+    return VINF_EM_RAW_EMULATE_INSTR;
+# else
+    LogFlow(("iemCImpl_vmsave\n"));
+    IEM_CHECK_SVM_INSTR_COMMON(pVCpu, vmsave);
+
+    /** @todo Check effective address size using address size prefix. */
+    RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
+    if (   (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
+        || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
+    {
+        Log(("vmsave: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
+        return iemRaiseGeneralProtectionFault0(pVCpu);
+    }
+
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
+    {
+        Log(("vmsave: Guest intercept -> #VMEXIT\n"));
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
+    SVMVMCBSTATESAVE VmcbNstGst;
+    VBOXSTRICTRC rcStrict = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcbNstGst, GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest),
+                                                    sizeof(SVMVMCBSTATESAVE));
+    if (rcStrict == VINF_SUCCESS)
+    {
+        LogFlow(("vmsave: Saving VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode));
+        IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_FS | CPUMCTX_EXTRN_GS | CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_LDTR
+                                | CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_SYSENTER_MSRS);
+
+        HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs);
+        HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs);
+        HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, TR, tr);
+        HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, LDTR, ldtr);
+
+        VmcbNstGst.u64KernelGSBase  = pVCpu->cpum.GstCtx.msrKERNELGSBASE;
+        VmcbNstGst.u64STAR          = pVCpu->cpum.GstCtx.msrSTAR;
+        VmcbNstGst.u64LSTAR         = pVCpu->cpum.GstCtx.msrLSTAR;
+        VmcbNstGst.u64CSTAR         = pVCpu->cpum.GstCtx.msrCSTAR;
+        VmcbNstGst.u64SFMASK        = pVCpu->cpum.GstCtx.msrSFMASK;
+
+        VmcbNstGst.u64SysEnterCS    = pVCpu->cpum.GstCtx.SysEnter.cs;
+        VmcbNstGst.u64SysEnterESP   = pVCpu->cpum.GstCtx.SysEnter.esp;
+        VmcbNstGst.u64SysEnterEIP   = pVCpu->cpum.GstCtx.SysEnter.eip;
+
+        rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest), &VmcbNstGst,
+                                            sizeof(SVMVMCBSTATESAVE));
+        if (rcStrict == VINF_SUCCESS)
+            iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+    }
+    return rcStrict;
+# endif
+}
+
+
+/**
+ * Implements 'CLGI'.
+ */
+IEM_CIMPL_DEF_0(iemCImpl_clgi)
+{
+# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
+    RT_NOREF2(pVCpu, cbInstr);
+    return VINF_EM_RAW_EMULATE_INSTR;
+# else
+    LogFlow(("iemCImpl_clgi\n"));
+    IEM_CHECK_SVM_INSTR_COMMON(pVCpu, clgi);
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
+    {
+        Log(("clgi: Guest intercept -> #VMEXIT\n"));
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
+    pVCpu->cpum.GstCtx.hwvirt.fGif = false;
+    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+
+#  if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
+    return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
+#  else
+    return VINF_SUCCESS;
+#  endif
+# endif
+}
+
+
+/**
+ * Implements 'STGI'.
+ */
+IEM_CIMPL_DEF_0(iemCImpl_stgi)
+{
+# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
+    RT_NOREF2(pVCpu, cbInstr);
+    return VINF_EM_RAW_EMULATE_INSTR;
+# else
+    LogFlow(("iemCImpl_stgi\n"));
+    IEM_CHECK_SVM_INSTR_COMMON(pVCpu, stgi);
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI))
+    {
+        Log2(("stgi: Guest intercept -> #VMEXIT\n"));
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
+    pVCpu->cpum.GstCtx.hwvirt.fGif = true;
+    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+
+#  if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
+    return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
+#  else
+    return VINF_SUCCESS;
+#  endif
+# endif
+}
+
+
+/**
+ * Implements 'INVLPGA'.
+ */
+IEM_CIMPL_DEF_0(iemCImpl_invlpga)
+{
+    /** @todo Check effective address size using address size prefix. */
+    RTGCPTR  const GCPtrPage = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
+    /** @todo PGM needs virtual ASID support. */
+# if 0
+    uint32_t const uAsid     = pVCpu->cpum.GstCtx.ecx;
+# endif
+
+    IEM_CHECK_SVM_INSTR_COMMON(pVCpu, invlpga);
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
+    {
+        Log2(("invlpga: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
+    PGMInvalidatePage(pVCpu, GCPtrPage);
+    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Implements 'SKINIT'.
+ */
+IEM_CIMPL_DEF_0(iemCImpl_skinit)
+{
+    IEM_CHECK_SVM_INSTR_COMMON(pVCpu, invlpga);
+
+    uint32_t uIgnore;
+    uint32_t fFeaturesECX;
+    CPUMGetGuestCpuId(pVCpu, 0x80000001, 0 /* iSubLeaf */, &uIgnore, &uIgnore, &fFeaturesECX, &uIgnore);
+    if (!(fFeaturesECX & X86_CPUID_AMD_FEATURE_ECX_SKINIT))
+        return iemRaiseUndefinedOpcode(pVCpu);
+
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SKINIT))
+    {
+        Log2(("skinit: Guest intercept -> #VMEXIT\n"));
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SKINIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
+    RT_NOREF(cbInstr);
+    return VERR_IEM_INSTR_NOT_IMPLEMENTED;
+}
+
+
+/**
+ * Implements SVM's implementation of PAUSE.
+ */
+IEM_CIMPL_DEF_0(iemCImpl_svm_pause)
+{
+    bool fCheckIntercept = true;
+    if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilter)
+    {
+        IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
+
+        /* TSC based pause-filter thresholding. */
+        if (   IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilterThreshold
+            && pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold > 0)
+        {
+            uint64_t const uTick = TMCpuTickGet(pVCpu);
+            if (uTick - pVCpu->cpum.GstCtx.hwvirt.svm.uPrevPauseTick > pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold)
+                pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter = CPUMGetGuestSvmPauseFilterCount(pVCpu, IEM_GET_CTX(pVCpu));
+            pVCpu->cpum.GstCtx.hwvirt.svm.uPrevPauseTick = uTick;
+        }
+
+        /* Simple pause-filter counter. */
+        if (pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter > 0)
+        {
+            --pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter;
+            fCheckIntercept = false;
+        }
+    }
+
+    if (fCheckIntercept)
+        IEM_CHECK_SVM_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_PAUSE, SVM_EXIT_PAUSE, 0, 0);
+
+    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+    return VINF_SUCCESS;
 }
 
@@ -1146,4 +1394,5 @@
 }
 
+
 /**
  * Implements 'VMMCALL'.
@@ -1154,5 +1403,5 @@
     {
         Log(("vmmcall: Guest intercept -> #VMEXIT\n"));
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -1175,264 +1424,2 @@
 }
 
-#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
-
-/**
- * Implements 'VMLOAD'.
- */
-IEM_CIMPL_DEF_0(iemCImpl_vmload)
-{
-# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
-    RT_NOREF2(pVCpu, cbInstr);
-    return VINF_EM_RAW_EMULATE_INSTR;
-# else
-    LogFlow(("iemCImpl_vmload\n"));
-    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload);
-
-    /** @todo Check effective address size using address size prefix. */
-    RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
-    if (   (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
-        || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
-    {
-        Log(("vmload: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
-        return iemRaiseGeneralProtectionFault0(pVCpu);
-    }
-
-    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
-    {
-        Log(("vmload: Guest intercept -> #VMEXIT\n"));
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
-    }
-
-    SVMVMCBSTATESAVE VmcbNstGst;
-    VBOXSTRICTRC rcStrict = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcbNstGst, GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest),
-                                                    sizeof(SVMVMCBSTATESAVE));
-    if (rcStrict == VINF_SUCCESS)
-    {
-        LogFlow(("vmload: Loading VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode));
-        HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs);
-        HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs);
-        HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, TR, tr);
-        HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, LDTR, ldtr);
-
-        pVCpu->cpum.GstCtx.msrKERNELGSBASE = VmcbNstGst.u64KernelGSBase;
-        pVCpu->cpum.GstCtx.msrSTAR         = VmcbNstGst.u64STAR;
-        pVCpu->cpum.GstCtx.msrLSTAR        = VmcbNstGst.u64LSTAR;
-        pVCpu->cpum.GstCtx.msrCSTAR        = VmcbNstGst.u64CSTAR;
-        pVCpu->cpum.GstCtx.msrSFMASK       = VmcbNstGst.u64SFMASK;
-
-        pVCpu->cpum.GstCtx.SysEnter.cs     = VmcbNstGst.u64SysEnterCS;
-        pVCpu->cpum.GstCtx.SysEnter.esp    = VmcbNstGst.u64SysEnterESP;
-        pVCpu->cpum.GstCtx.SysEnter.eip    = VmcbNstGst.u64SysEnterEIP;
-
-        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
-    }
-    return rcStrict;
-# endif
-}
-
-
-/**
- * Implements 'VMSAVE'.
- */
-IEM_CIMPL_DEF_0(iemCImpl_vmsave)
-{
-# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
-    RT_NOREF2(pVCpu, cbInstr);
-    return VINF_EM_RAW_EMULATE_INSTR;
-# else
-    LogFlow(("iemCImpl_vmsave\n"));
-    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmsave);
-
-    /** @todo Check effective address size using address size prefix. */
-    RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
-    if (   (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
-        || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
-    {
-        Log(("vmsave: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
-        return iemRaiseGeneralProtectionFault0(pVCpu);
-    }
-
-    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
-    {
-        Log(("vmsave: Guest intercept -> #VMEXIT\n"));
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
-    }
-
-    SVMVMCBSTATESAVE VmcbNstGst;
-    VBOXSTRICTRC rcStrict = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcbNstGst, GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest),
-                                                    sizeof(SVMVMCBSTATESAVE));
-    if (rcStrict == VINF_SUCCESS)
-    {
-        LogFlow(("vmsave: Saving VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode));
-        IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_FS | CPUMCTX_EXTRN_GS | CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_LDTR
-                                | CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_SYSENTER_MSRS);
-
-        HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs);
-        HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs);
-        HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, TR, tr);
-        HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, LDTR, ldtr);
-
-        VmcbNstGst.u64KernelGSBase  = pVCpu->cpum.GstCtx.msrKERNELGSBASE;
-        VmcbNstGst.u64STAR          = pVCpu->cpum.GstCtx.msrSTAR;
-        VmcbNstGst.u64LSTAR         = pVCpu->cpum.GstCtx.msrLSTAR;
-        VmcbNstGst.u64CSTAR         = pVCpu->cpum.GstCtx.msrCSTAR;
-        VmcbNstGst.u64SFMASK        = pVCpu->cpum.GstCtx.msrSFMASK;
-
-        VmcbNstGst.u64SysEnterCS    = pVCpu->cpum.GstCtx.SysEnter.cs;
-        VmcbNstGst.u64SysEnterESP   = pVCpu->cpum.GstCtx.SysEnter.esp;
-        VmcbNstGst.u64SysEnterEIP   = pVCpu->cpum.GstCtx.SysEnter.eip;
-
-        rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest), &VmcbNstGst,
-                                            sizeof(SVMVMCBSTATESAVE));
-        if (rcStrict == VINF_SUCCESS)
-            iemRegAddToRipAndClearRF(pVCpu, cbInstr);
-    }
-    return rcStrict;
-# endif
-}
-
-
-/**
- * Implements 'CLGI'.
- */
-IEM_CIMPL_DEF_0(iemCImpl_clgi)
-{
-# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
-    RT_NOREF2(pVCpu, cbInstr);
-    return VINF_EM_RAW_EMULATE_INSTR;
-# else
-    LogFlow(("iemCImpl_clgi\n"));
-    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, clgi);
-    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
-    {
-        Log(("clgi: Guest intercept -> #VMEXIT\n"));
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
-    }
-
-    pVCpu->cpum.GstCtx.hwvirt.fGif = false;
-    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
-
-#  if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
-    return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
-#  else
-    return VINF_SUCCESS;
-#  endif
-# endif
-}
-
-
-/**
- * Implements 'STGI'.
- */
-IEM_CIMPL_DEF_0(iemCImpl_stgi)
-{
-# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
-    RT_NOREF2(pVCpu, cbInstr);
-    return VINF_EM_RAW_EMULATE_INSTR;
-# else
-    LogFlow(("iemCImpl_stgi\n"));
-    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, stgi);
-    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI))
-    {
-        Log2(("stgi: Guest intercept -> #VMEXIT\n"));
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
-    }
-
-    pVCpu->cpum.GstCtx.hwvirt.fGif = true;
-    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
-
-#  if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
-    return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
-#  else
-    return VINF_SUCCESS;
-#  endif
-# endif
-}
-
-
-/**
- * Implements 'INVLPGA'.
- */
-IEM_CIMPL_DEF_0(iemCImpl_invlpga)
-{
-    /** @todo Check effective address size using address size prefix. */
-    RTGCPTR  const GCPtrPage = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
-    /** @todo PGM needs virtual ASID support. */
-# if 0
-    uint32_t const uAsid     = pVCpu->cpum.GstCtx.ecx;
-# endif
-
-    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
-    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
-    {
-        Log2(("invlpga: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
-    }
-
-    PGMInvalidatePage(pVCpu, GCPtrPage);
-    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
-    return VINF_SUCCESS;
-}
-
-
-/**
- * Implements 'SKINIT'.
- */
-IEM_CIMPL_DEF_0(iemCImpl_skinit)
-{
-    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
-
-    uint32_t uIgnore;
-    uint32_t fFeaturesECX;
-    CPUMGetGuestCpuId(pVCpu, 0x80000001, 0 /* iSubLeaf */, &uIgnore, &uIgnore, &fFeaturesECX, &uIgnore);
-    if (!(fFeaturesECX & X86_CPUID_AMD_FEATURE_ECX_SKINIT))
-        return iemRaiseUndefinedOpcode(pVCpu);
-
-    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SKINIT))
-    {
-        Log2(("skinit: Guest intercept -> #VMEXIT\n"));
-        IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SKINIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
-    }
-
-    RT_NOREF(cbInstr);
-    return VERR_IEM_INSTR_NOT_IMPLEMENTED;
-}
-
-
-/**
- * Implements SVM's implementation of PAUSE.
- */
-IEM_CIMPL_DEF_0(iemCImpl_svm_pause)
-{
-    bool fCheckIntercept = true;
-    if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilter)
-    {
-        IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
-
-        /* TSC based pause-filter thresholding. */
-        if (   IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilterThreshold
-            && pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold > 0)
-        {
-            uint64_t const uTick = TMCpuTickGet(pVCpu);
-            if (uTick - pVCpu->cpum.GstCtx.hwvirt.svm.uPrevPauseTick > pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold)
-                pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter = IEM_GET_SVM_PAUSE_FILTER_COUNT(pVCpu);
-            pVCpu->cpum.GstCtx.hwvirt.svm.uPrevPauseTick = uTick;
-        }
-
-        /* Simple pause-filter counter. */
-        if (pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter > 0)
-        {
-            --pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter;
-            fCheckIntercept = false;
-        }
-    }
-
-    if (fCheckIntercept)
-        IEMCIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(pVCpu, SVM_CTRL_INTERCEPT_PAUSE, SVM_EXIT_PAUSE, 0, 0);
-
-    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
-    return VINF_SUCCESS;
-}
-
-#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
-
