Index: /trunk/include/VBox/vmm/cpum.h
===================================================================
--- /trunk/include/VBox/vmm/cpum.h	(revision 66580)
+++ /trunk/include/VBox/vmm/cpum.h	(revision 66581)
@@ -1042,49 +1042,32 @@
     uint32_t        fPadding : 23;
 
-    /** Hardware virtualization features.
-     *
-     *  @todo r=bird: Please drop the unions and flatten this as much as possible.
-     *        Prefix the names with 'Svm' 'Vmx' if there is any confusion. Group the
-     *        flags into common and specific bunches.
-     *
-     */
-    union
-    {
-        /** SVM features.  */
-        struct
-        {
-            /** Features as reported by CPUID 0x8000000a.EDX.  */
-            union
-            {
-                struct
-                {
-                    uint32_t fNestedPaging         : 1;
-                    uint32_t fLbrVirt              : 1;
-                    uint32_t fSvmLock              : 1;
-                    uint32_t fNextRipSave          : 1;
-                    uint32_t fTscRateMsr           : 1;
-                    uint32_t fVmcbClean            : 1;
-                    uint32_t fFlusbByAsid          : 1;
-                    uint32_t fDecodeAssist         : 1;
-                    uint32_t u2Reserved0           : 2;
-                    uint32_t fPauseFilter          : 1;
-                    uint32_t u1Reserved0           : 1;
-                    uint32_t fPauseFilterThreshold : 1;
-                    uint32_t fAvic                 : 1;
-                    uint32_t u18Reserved0          : 18;
-                } n;
-                uint32_t    u;
-            } feat;
-            /** Maximum supported ASID. */
-            uint32_t        uMaxAsid;
-        } svm;
-
-        /** VMX features. */
-        struct
-        {
-            uint32_t    uDummy1;
-            uint32_t    uDummy2;
-        } vmx;
-    } CPUM_UNION_NM(hwvirt);
+    /** SVM: Supports Nested-paging. */
+    uint32_t        fSvmNestedPaging : 1;
+    /** SVM: Support LBR (Last Branch Record) virtualization. */
+    uint32_t        fSvmLbrVirt : 1;
+    /** SVM: Supports SVM lock. */
+    uint32_t        fSvmSvmLock : 1;
+    /** SVM: Supports Next RIP save. */
+    uint32_t        fSvmNextRipSave : 1;
+    /** SVM: Supports TSC rate MSR. */
+    uint32_t        fSvmTscRateMsr : 1;
+    /** SVM: Supports VMCB clean bits. */
+    uint32_t        fSvmVmcbClean : 1;
+    /** SVM: Supports Flush-by-ASID. */
+    uint32_t        fSvmFlusbByAsid : 1;
+    /** SVM: Supports decode assist. */
+    uint32_t        fSvmDecodeAssist : 1;
+    /** SVM: Supports Pause filter. */
+    uint32_t        fSvmPauseFilter : 1;
+    /** SVM: Supports Pause filter threshold. */
+    uint32_t        fSvmPauseFilterThreshold : 1;
+    /** SVM: Supports AVIC (Advanced Virtual Interrupt Controller). */
+    uint32_t        fSvmAvic : 1;
+    /** SVM: Padding / reserved for future features. */
+    uint32_t        fSvmPadding0 : 21;
+    /** SVM: Maximum supported ASID. */
+    uint32_t        uSvmMaxAsid;
+
+    /** @todo VMX features. */
     uint32_t        auPadding[1];
 } CPUMFEATURES;
@@ -1396,9 +1379,10 @@
  * @returns true if in intercept is active, false otherwise.
  * @param   pCtx        Pointer to the context.
- * @param   enmXcpt     The exception.
- */
-DECLINLINE(bool) CPUMIsGuestSvmXcptInterceptSet(PCCPUMCTX pCtx, X86XCPT enmXcpt)
-{
-    return RT_BOOL(pCtx->hwvirt.svm.VmcbCtrl.u32InterceptXcpt & enmXcpt);
+ * @param   uVector     The exception / interrupt vector.
+ */
+DECLINLINE(bool) CPUMIsGuestSvmXcptInterceptSet(PCCPUMCTX pCtx, uint8_t uVector)
+{
+    Assert(uVector < 32);
+    return RT_BOOL(pCtx->hwvirt.svm.VmcbCtrl.u32InterceptXcpt & (UINT32_C(1) << uVector));
 }
 
@@ -1554,6 +1538,6 @@
 VMMDECL(uint32_t)       CPUMGetGuestMxCsrMask(PVM pVM);
 VMMDECL(uint64_t)       CPUMGetGuestScalableBusFrequency(PVM pVM);
-VMMDECL(int)            CPUMGetValidateEfer(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer,
-                                            uint64_t *puValidEfer);
+VMMDECL(int)            CPUMQueryValidatedGuestEfer(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer,
+                                                    uint64_t *puValidEfer);
 
 /** @name Typical scalable bus frequency values.
Index: /trunk/include/VBox/vmm/cpum.mac
===================================================================
--- /trunk/include/VBox/vmm/cpum.mac	(revision 66580)
+++ /trunk/include/VBox/vmm/cpum.mac	(revision 66581)
@@ -258,18 +258,20 @@
     .abPadding          resb    12
 %endif
-    .hwvirt.svm.uMsrHSavePa         resq    1
-    .hwvirt.svm.GCPhysVmcb          resq    1
-    .hwvirt.svm.VmcbCtrl            resb  256
-    .hwvirt.svm.HostState           resb  184
-    .hwvirt.svm.fGif                resb    1
-    .hwvirt.svm.abPadding0          resb    7
-    .hwvirt.svm.pvMsrBitmapR0       RTR0PTR_RES 1
-    .hwvirt.svm.pvMsrBitmapR3       RTR3PTR_RES 1
-    .hwvirt.svm.pvIoBitmapR0        RTR0PTR_RES 1
-    .hwvirt.svm.pvIoBitmapR3        RTR3PTR_RES 1
+    .hwvirt.svm.uMsrHSavePa              resq    1
+    .hwvirt.svm.GCPhysVmcb               resq    1
+    .hwvirt.svm.VmcbCtrl                 resb  256
+    .hwvirt.svm.HostState                resb  184
+    .hwvirt.svm.fGif                     resb    1
+    .hwvirt.svm.cPauseFilter             resw    1
+    .hwvirt.svm.cPauseFilterThreshold    resw    1
+    .hwvirt.svm.abPadding0               resb    3
+    .hwvirt.svm.pvMsrBitmapR0            RTR0PTR_RES 1
+    .hwvirt.svm.pvMsrBitmapR3            RTR3PTR_RES 1
+    .hwvirt.svm.pvIoBitmapR0             RTR0PTR_RES 1
+    .hwvirt.svm.pvIoBitmapR3             RTR3PTR_RES 1
 %if HC_ARCH_BITS == 32
-    .hwvirt.svm.abPadding1          resb   16
-%endif
-    .hwvirt.fLocalForcedActions     resd    1
+    .hwvirt.svm.abPadding1               resb   16
+%endif
+    .hwvirt.fLocalForcedActions          resd    1
     alignb 64
 endstruc
Index: /trunk/include/VBox/vmm/cpumctx.h
===================================================================
--- /trunk/include/VBox/vmm/cpumctx.h	(revision 66580)
+++ /trunk/include/VBox/vmm/cpumctx.h	(revision 66581)
@@ -485,6 +485,10 @@
                 /** 1184 - Global interrupt flag. */
                 uint8_t             fGif;
-                /** 1185 - Padding. */
-                uint8_t             abPadding0[7];
+                /** 1185 - Pause filter count. */
+                uint16_t            cPauseFilter;
+                /** 1187 - Pause filter count. */
+                uint16_t            cPauseFilterThreshold;
+                /** 1189 - Padding. */
+                uint8_t             abPadding0[3];
                 /** 1192 - MSR permission bitmap - R0 ptr. */
                 R0PTRTYPE(void *)   pvMsrBitmapR0;
@@ -567,14 +571,16 @@
 AssertCompileMemberOffset(CPUMCTX,                 aoffXState, HC_ARCH_BITS == 64 ? 596 : 588);
 AssertCompileMemberOffset(CPUMCTX, hwvirt, 728);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.uMsrHSavePa,      728);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.GCPhysVmcb,       736);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.VmcbCtrl,         744);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.HostState,       1000);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.fGif,            1184);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvMsrBitmapR0,   1192);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvMsrBitmapR3,   HC_ARCH_BITS == 64 ? 1200 : 1196);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvIoBitmapR0,    HC_ARCH_BITS == 64 ? 1208 : 1200);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvIoBitmapR3,    HC_ARCH_BITS == 64 ? 1216 : 1204);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) fLocalForcedActions, 1224);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.uMsrHSavePa,            728);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.GCPhysVmcb,             736);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.VmcbCtrl,               744);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.HostState,             1000);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.fGif,                  1184);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.cPauseFilter,          1185);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.cPauseFilterThreshold, 1187);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvMsrBitmapR0,         1192);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvMsrBitmapR3,         HC_ARCH_BITS == 64 ? 1200 : 1196);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvIoBitmapR0,          HC_ARCH_BITS == 64 ? 1208 : 1200);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvIoBitmapR3,          HC_ARCH_BITS == 64 ? 1216 : 1204);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) fLocalForcedActions,       1224);
 
 AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_NM(g.) aGRegs);
Index: /trunk/include/VBox/vmm/em.h
===================================================================
--- /trunk/include/VBox/vmm/em.h	(revision 66580)
+++ /trunk/include/VBox/vmm/em.h	(revision 66581)
@@ -199,4 +199,5 @@
 VMM_INT_DECL(bool)              EMMonitorWaitShouldContinue(PVMCPU pVCpu, PCPUMCTX pCtx);
 VMM_INT_DECL(int)               EMMonitorWaitPrepare(PVMCPU pVCpu, uint64_t rax, uint64_t rcx, uint64_t rdx, RTGCPHYS GCPhys);
+VMM_INT_DECL(bool)              EMMonitorIsArmed(PVMCPU pVCpu);
 VMM_INT_DECL(int)               EMMonitorWaitPerform(PVMCPU pVCpu, uint64_t rax, uint64_t rcx);
 VMM_INT_DECL(int)               EMUnhaltAndWakeUp(PVM pVM, PVMCPU pVCpuDst);
Index: /trunk/include/VBox/vmm/hm_svm.h
===================================================================
--- /trunk/include/VBox/vmm/hm_svm.h	(revision 66580)
+++ /trunk/include/VBox/vmm/hm_svm.h	(revision 66581)
@@ -45,33 +45,4 @@
  * @{
  */
-
-/** @name SVM features for cpuid 0x8000000a
- * @{
- */
-/** Bit 0 - NP - Nested Paging supported. */
-#define AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING             RT_BIT(0)
-/** Bit 1 - LbrVirt - Support for saving five debug MSRs. */
-#define AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT                  RT_BIT(1)
-/** Bit 2 - SVML - SVM locking bit supported. */
-#define AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK                  RT_BIT(2)
-/** Bit 3 - NRIPS - Saving the next instruction pointer is supported. */
-#define AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE                 RT_BIT(3)
-/** Bit 4 - TscRateMsr - Support for MSR TSC ratio. */
-#define AMD_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR              RT_BIT(4)
-/** Bit 5 - VmcbClean - Support VMCB clean bits. */
-#define AMD_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN                RT_BIT(5)
-/** Bit 6 - FlushByAsid - Indicate TLB flushing for current ASID only, and that
- *  VMCB.TLB_Control is supported. */
-#define AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID             RT_BIT(6)
-/** Bit 7 - DecodeAssist - Indicate decode assist is supported. */
-#define AMD_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST             RT_BIT(7)
-/** Bit 10 - PauseFilter - Indicates support for the PAUSE intercept filter. */
-#define AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER              RT_BIT(10)
-/** Bit 12 - PauseFilterThreshold - Indicates support for the PAUSE
- *  intercept filter cycle count threshold. */
-#define AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD    RT_BIT(12)
-/** Bit 13 - AVIC - Advanced Virtual Interrupt Controller. */
-#define AMD_CPUID_SVM_FEATURE_EDX_AVIC                      RT_BIT(13)
-/** @} */
 
 /** @name SVM generic / convenient defines.
@@ -326,4 +297,11 @@
 /** @} */
 
+/** @name SVMVMCB.u64ExitInfo1 for Mov CRX accesses.
+ * @{
+ */
+/** The access was via Mov CRx instruction bit number. */
+#define SVM_EXIT1_MOV_CRX_MASK                RT_BIT_64(63)
+/** @} */
+
 
 /** @name SVMVMCB.ctrl.u64InterceptCtrl
@@ -601,7 +579,7 @@
         uint32_t    u1OP16              : 1;   /**< Bit 5: 16-bit operand. */
         uint32_t    u1OP32              : 1;   /**< Bit 6: 32-bit operand. */
-        uint32_t    u1ADDR16            : 1;   /**< Bit 7: 16-bit operand. */
-        uint32_t    u1ADDR32            : 1;   /**< Bit 8: 32-bit operand. */
-        uint32_t    u1ADDR64            : 1;   /**< Bit 9: 64-bit operand. */
+        uint32_t    u1ADDR16            : 1;   /**< Bit 7: 16-bit address size. */
+        uint32_t    u1ADDR32            : 1;   /**< Bit 8: 32-bit address size. */
+        uint32_t    u1ADDR64            : 1;   /**< Bit 9: 64-bit address size. */
         uint32_t    u3SEG               : 3;   /**< BITS 12:10: Effective segment number. Added w/ decode assist in APM v3.17. */
         uint32_t    u3Reserved          : 3;
@@ -615,10 +593,34 @@
 typedef const SVMIOIOEXITINFO *PCSVMIOIOEXITINFO;
 
-/** @name SVMIOIOEXITINFO.u1Type
- *  @{ */
+/** 8-bit IO transfer. */
+#define SVM_IOIO_8_BIT_OP               RT_BIT_32(4)
+/** 16-bit IO transfer. */
+#define SVM_IOIO_16_BIT_OP              RT_BIT_32(5)
+/** 32-bit IO transfer. */
+#define SVM_IOIO_32_BIT_OP              RT_BIT_32(6)
+/** Mask of all possible IO transfer sizes. */
+#define SVM_IOIO_OP_SIZE_MASK           (SVM_IOIO_8_BIT_OP | SVM_IOIO_16_BIT_OP | SVM_IOIO_32_BIT_OP)
+/** 16-bit address for the IO buffer. */
+#define SVM_IOIO_16_BIT_ADDR            RT_BIT_32(7)
+/** 32-bit address for the IO buffer. */
+#define SVM_IOIO_32_BIT_ADDR            RT_BIT_32(8)
+/** 64-bit address for the IO buffer. */
+#define SVM_IOIO_64_BIT_ADDR            RT_BIT_32(9)
+/** Mask of all the IO address sizes. */
+#define SVM_IOIO_ADDR_SIZE_MASK         (SVM_IOIO_16_BIT_ADDR | SVM_IOIO_32_BIT_ADDR | SVM_IOIO_64_BIT_ADDR)
+/** Number of bits to left shift to get the IO port number. */
+#define SVM_IOIO_PORT_SHIFT             16
 /** IO write. */
 #define SVM_IOIO_WRITE                  0
 /** IO read. */
 #define SVM_IOIO_READ                   1
+/**
+ * SVM IOIO transfer type.
+ */
+typedef enum
+{
+    SVMIOIOTYPE_OUT = SVM_IOIO_WRITE,
+    SVMIOIOTYPE_IN  = SVM_IOIO_READ
+} SVMIOIOTYPE;
 /** @}*/
 
Index: /trunk/include/VBox/vmm/iem.h
===================================================================
--- /trunk/include/VBox/vmm/iem.h	(revision 66580)
+++ /trunk/include/VBox/vmm/iem.h	(revision 66581)
@@ -47,4 +47,24 @@
 #define IEMMODE_64BIT 2
 /** @} */
+
+
+/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
+ * @{ */
+/** CPU exception. */
+#define IEM_XCPT_FLAGS_T_CPU_XCPT       RT_BIT_32(0)
+/** External interrupt (from PIC, APIC, whatever). */
+#define IEM_XCPT_FLAGS_T_EXT_INT        RT_BIT_32(1)
+/** Software interrupt (int or into, not bound).
+ * Returns to the following instruction */
+#define IEM_XCPT_FLAGS_T_SOFT_INT       RT_BIT_32(2)
+/** Takes an error code. */
+#define IEM_XCPT_FLAGS_ERR              RT_BIT_32(3)
+/** Takes a CR2. */
+#define IEM_XCPT_FLAGS_CR2              RT_BIT_32(4)
+/** Generated by the breakpoint instruction. */
+#define IEM_XCPT_FLAGS_BP_INSTR         RT_BIT_32(5)
+/** Generated by a DRx instruction breakpoint and RF should be cleared. */
+#define IEM_XCPT_FLAGS_DRx_INSTR_BP     RT_BIT_32(6)
+/** @}  */
 
 
@@ -117,7 +137,6 @@
 VMM_INT_DECL(void)          IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr);
 VMM_INT_DECL(void)          IEMTlbInvalidateAllPhysical(PVMCPU pVCpu);
-#ifdef VBOX_WITH_NESTED_HWVIRT
-VMM_INT_DECL(bool)          IEMIsRaisingIntOrXcpt(PVMCPU pVCpu);
-#endif
+VMM_INT_DECL(bool)          IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr,
+                                              uint64_t *puCr2);
 
 /** @name Given Instruction Interpreters
Index: /trunk/include/iprt/x86.h
===================================================================
--- /trunk/include/iprt/x86.h	(revision 66580)
+++ /trunk/include/iprt/x86.h	(revision 66581)
@@ -739,4 +739,35 @@
 /** Bit 12 - PA - Processor accumulator (MSR c001_007a). */
 #define X86_CPUID_AMD_ADVPOWER_EDX_PA        RT_BIT_32(12)
+/** @} */
+
+
+/** @name CPUID AMD SVM Feature information.
+ * CPUID query with EAX=0x8000000a.
+ * @{
+ */
+/** Bit 0 - NP - Nested Paging supported. */
+#define X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING             RT_BIT(0)
+/** Bit 1 - LbrVirt - Support for saving five debug MSRs. */
+#define X86_CPUID_SVM_FEATURE_EDX_LBR_VIRT                  RT_BIT(1)
+/** Bit 2 - SVML - SVM locking bit supported. */
+#define X86_CPUID_SVM_FEATURE_EDX_SVM_LOCK                  RT_BIT(2)
+/** Bit 3 - NRIPS - Saving the next instruction pointer is supported. */
+#define X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE                 RT_BIT(3)
+/** Bit 4 - TscRateMsr - Support for MSR TSC ratio. */
+#define X86_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR              RT_BIT(4)
+/** Bit 5 - VmcbClean - Support VMCB clean bits. */
+#define X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN                RT_BIT(5)
+/** Bit 6 - FlushByAsid - Indicate TLB flushing for current ASID only, and that
+ *  VMCB.TLB_Control is supported. */
+#define X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID             RT_BIT(6)
+/** Bit 7 - DecodeAssist - Indicate decode assist is supported. */
+#define X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST             RT_BIT(7)
+/** Bit 10 - PauseFilter - Indicates support for the PAUSE intercept filter. */
+#define X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER              RT_BIT(10)
+/** Bit 12 - PauseFilterThreshold - Indicates support for the PAUSE
+ *  intercept filter cycle count threshold. */
+#define X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD    RT_BIT(12)
+/** Bit 13 - AVIC - Advanced Virtual Interrupt Controller. */
+#define X86_CPUID_SVM_FEATURE_EDX_AVIC                      RT_BIT(13)
 /** @} */
 
Index: /trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp
===================================================================
--- /trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp	(revision 66580)
+++ /trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp	(revision 66581)
@@ -65,5 +65,4 @@
 #include <VBox/log.h>
 #include <VBox/err.h>
-#include <VBox/vmm/hm_svm.h>
 #include <VBox/vmm/hm_vmx.h>
 
@@ -4145,5 +4144,5 @@
                     /* Query AMD-V features. */
                     ASMCpuId(0x8000000a, &uDummy, &uDummy, &uDummy, &fSvmFeatures);
-                    if (fSvmFeatures & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
+                    if (fSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
                         *pfCaps |= SUPVTCAPS_NESTED_PAGING;
                 }
Index: /trunk/src/VBox/Main/src-server/HostImpl.cpp
===================================================================
--- /trunk/src/VBox/Main/src-server/HostImpl.cpp	(revision 66580)
+++ /trunk/src/VBox/Main/src-server/HostImpl.cpp	(revision 66581)
@@ -156,14 +156,5 @@
 #endif
 
-/* XXX Solaris: definitions in /usr/include/sys/regset.h clash with hm_svm.h */
-#undef DS
-#undef ES
-#undef CS
-#undef SS
-#undef FS
-#undef GS
-
 #include <VBox/usb.h>
-#include <VBox/vmm/hm_svm.h>
 #include <VBox/err.h>
 #include <VBox/settings.h>
@@ -365,5 +356,5 @@
                         uint32_t fSVMFeaturesEdx;
                         ASMCpuId(0x8000000a, &uDummy, &uDummy, &uDummy, &fSVMFeaturesEdx);
-                        if (fSVMFeaturesEdx & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
+                        if (fSVMFeaturesEdx & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
                             m->fNestedPagingSupported = true;
                     }
Index: /trunk/src/VBox/VMM/Makefile.kmk
===================================================================
--- /trunk/src/VBox/VMM/Makefile.kmk	(revision 66580)
+++ /trunk/src/VBox/VMM/Makefile.kmk	(revision 66581)
@@ -525,4 +525,5 @@
  VMMRC_DEFS      = IN_VMM_RC IN_RT_RC IN_DIS DIS_CORE_ONLY VBOX_WITH_RAW_MODE VBOX_WITH_RAW_MODE_NOT_R0 IN_SUP_RC \
  	$(VMM_COMMON_DEFS)
+ VMMRC_DEFS := $(filter-out VBOX_WITH_NESTED_HWVIRT,$(VMMRC_DEFS))
  ifdef VBOX_WITH_VMM_R0_SWITCH_STACK
   VMMRC_DEFS    += VMM_R0_SWITCH_STACK
Index: /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp	(revision 66580)
+++ /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp	(revision 66581)
@@ -1438,5 +1438,5 @@
     uint64_t uValidatedEfer;
     uint64_t const uOldEfer = pVCpu->cpum.s.Guest.msrEFER;
-    int rc = CPUMGetValidateEfer(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.s.Guest.cr0, uOldEfer, uValue, &uValidatedEfer);
+    int rc = CPUMQueryValidatedGuestEfer(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.s.Guest.cr0, uOldEfer, uValue, &uValidatedEfer);
     if (RT_FAILURE(rc))
         return VERR_CPUM_RAISE_GP_0;
@@ -6114,5 +6114,5 @@
  *                          this function returns VINF_SUCCESS).
  */
-VMMDECL(int) CPUMGetValidateEfer(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer, uint64_t *puValidEfer)
+VMMDECL(int) CPUMQueryValidatedGuestEfer(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer, uint64_t *puValidEfer)
 {
     uint32_t const  fExtFeatures = pVM->cpum.s.aGuestCpuIdPatmExt[0].uEax >= 0x80000001
Index: /trunk/src/VBox/VMM/VMMAll/EMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/EMAll.cpp	(revision 66580)
+++ /trunk/src/VBox/VMM/VMMAll/EMAll.cpp	(revision 66581)
@@ -193,4 +193,16 @@
     /** @todo Complete MONITOR implementation.  */
     return VINF_SUCCESS;
+}
+
+
+/**
+ * Checks if the monitor hardware is armed / active.
+ *
+ * @returns true if armed, false otherwise.
+ * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
+ */
+VMM_INT_DECL(bool) EMMonitorIsArmed(PVMCPU pVCpu)
+{
+    return RT_BOOL(pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_MONITOR_ACTIVE);
 }
 
Index: /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 66580)
+++ /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 66581)
@@ -176,5 +176,54 @@
 
 
+/**
+ * Converts an SVM event type to a TRPM event type.
+ *
+ * @returns The TRPM event type.
+ * @retval  TRPM_32BIT_HACK if the specified type of event isn't among the set
+ *          of recognized trap types.
+ *
+ * @param   pEvent       Pointer to the SVM event.
+ */
+VMM_INT_DECL(TRPMEVENT) hmSvmEventToTrpmEventType(PCSVMEVENT pEvent)
+{
+    uint8_t const uType = pEvent->n.u3Type;
+    switch (uType)
+    {
+        case SVM_EVENT_EXTERNAL_IRQ:    return TRPM_HARDWARE_INT;
+        case SVM_EVENT_SOFTWARE_INT:    return TRPM_SOFTWARE_INT;
+        case SVM_EVENT_EXCEPTION:
+        case SVM_EVENT_NMI:             return TRPM_TRAP;
+        default:
+            break;
+    }
+    AssertMsgFailed(("HMSvmEventToTrpmEvent: Invalid pending-event type %#x\n", uType));
+    return TRPM_32BIT_HACK;
+}
+
+
 #ifndef IN_RC
+/**
+ * Converts an IEM exception event type to an SVM event type.
+ *
+ * @returns The SVM event type.
+ * @retval  UINT8_MAX if the specified type of event isn't among the set
+ *          of recognized IEM event types.
+ *
+ * @param   uVector         The vector of the event.
+ * @param   fIemXcptFlags   The IEM exception / interrupt flags.
+ */
+static uint8_t hmSvmEventTypeFromIemEvent(uint32_t uVector, uint32_t fIemXcptFlags)
+{
+    if (fIemXcptFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
+        return SVM_EVENT_EXCEPTION;
+    if (fIemXcptFlags & IEM_XCPT_FLAGS_T_EXT_INT)
+        return uVector != X86_XCPT_NMI ? SVM_EVENT_EXTERNAL_IRQ : SVM_EVENT_NMI;
+    if (fIemXcptFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
+        return SVM_EVENT_SOFTWARE_INT;
+    AssertMsgFailed(("hmSvmEventTypeFromIemEvent: Invalid IEM xcpt/int. type %#x, uVector=%#x\n", fIemXcptFlags, uVector));
+    return UINT8_MAX;
+}
+
+
 /**
  * Performs the operations necessary that are part of the vmrun instruction
@@ -247,5 +296,5 @@
             /* Nested paging. */
             if (    pVmcbCtrl->NestedPaging.n.u1NestedPaging
-                && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fNestedPaging)
+                && !pVM->cpum.ro.GuestFeatures.fSvmNestedPaging)
             {
                 Log(("HMSvmVmRun: Nested paging not supported -> #VMEXIT\n"));
@@ -255,5 +304,5 @@
             /* AVIC. */
             if (    pVmcbCtrl->IntCtrl.n.u1AvicEnable
-                && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fAvic)
+                && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
             {
                 Log(("HMSvmVmRun: AVIC not supported -> #VMEXIT\n"));
@@ -263,5 +312,5 @@
             /* Last branch record (LBR) virtualization. */
             if (    (pVmcbCtrl->u64LBRVirt & SVM_LBR_VIRT_ENABLE)
-                && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fLbrVirt)
+                && !pVM->cpum.ro.GuestFeatures.fSvmLbrVirt)
             {
                 Log(("HMSvmVmRun: LBR virtualization not supported -> #VMEXIT\n"));
@@ -350,5 +399,5 @@
             /* EFER, CR0 and CR4. */
             uint64_t uValidEfer;
-            rc = CPUMGetValidateEfer(pVM, VmcbNstGst.u64CR0, 0 /* uOldEfer */, VmcbNstGst.u64EFER, &uValidEfer);
+            rc = CPUMQueryValidatedGuestEfer(pVM, VmcbNstGst.u64CR0, 0 /* uOldEfer */, VmcbNstGst.u64EFER, &uValidEfer);
             if (RT_FAILURE(rc))
             {
@@ -592,4 +641,27 @@
         pCtx->hwvirt.svm.VmcbCtrl.u64ExitInfo1 = uExitInfo1;
         pCtx->hwvirt.svm.VmcbCtrl.u64ExitInfo2 = uExitInfo2;
+
+        /*
+         * Update the exit interrupt information field if this #VMEXIT happened as a result
+         * of delivering an event.
+         */
+        {
+            uint8_t  uExitIntVector;
+            uint32_t uExitIntErr;
+            uint32_t fExitIntFlags;
+            bool const fRaisingEvent = IEMGetCurrentXcpt(pVCpu, &uExitIntVector, &fExitIntFlags, &uExitIntErr,
+                                                         NULL /* uExitIntCr2 */);
+            pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u1Valid = fRaisingEvent;
+            if (fRaisingEvent)
+            {
+                pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u8Vector = uExitIntVector;
+                pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u3Type   = hmSvmEventTypeFromIemEvent(uExitIntVector, fExitIntFlags);
+                if (fExitIntFlags & IEM_XCPT_FLAGS_ERR)
+                {
+                    pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u1ErrorCodeValid = true;
+                    pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u32ErrorCode     = uExitIntErr;
+                }
+            }
+        }
 
         /*
@@ -920,30 +992,31 @@
      * Check if any IO accesses are being intercepted.
      */
-    if (CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_IOIO_PROT))
-    {
-        Assert(CPUMIsGuestInNestedHwVirtMode(pCtx));
-
-        /*
-         * The IOPM layout:
-         * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
-         * two 4K pages. However, since it's possible to do a 32-bit port IO at port
-         * 65534 (thus accessing 4 bytes), we need 3 extra bits beyond the two 4K page.
-         *
-         * For IO instructions that access more than a single byte, the permission bits
-         * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
-         */
-        uint8_t *pbIopm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap);
-
-        uint16_t const u16Port     = pIoExitInfo->n.u16Port;
-        uint16_t const offIoBitmap = u16Port >> 3;
-        uint16_t const fSizeMask   = pIoExitInfo->n.u1OP32 ? 0xf : pIoExitInfo->n.u1OP16 ? 3 : 1;
-        uint8_t  const cShift      = u16Port - (offIoBitmap << 3);
-        uint16_t const fIopmMask   = (1 << cShift) | (fSizeMask << cShift);
-
-        pbIopm += offIoBitmap;
-        uint16_t const fIopmBits = *(uint16_t *)pbIopm;
-        if (fIopmBits & fIopmMask)
-            return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_IOIO, pIoExitInfo->u, uNextRip);
-    }
+    Assert(CPUMIsGuestInNestedHwVirtMode(pCtx));
+    Assert(CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_IOIO_PROT));
+
+    /*
+     * The IOPM layout:
+     * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
+     * two 4K pages.
+     *
+     * For IO instructions that access more than a single byte, the permission bits
+     * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
+     *
+     * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),
+     * we need 3 extra bits beyond the second 4K page.
+     */
+    uint8_t const *pbIopm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap);
+
+    uint16_t const u16Port   = pIoExitInfo->n.u16Port;
+    uint16_t const offIopm   = u16Port >> 3;
+    uint16_t const fSizeMask = pIoExitInfo->n.u1OP32 ? 0xf : pIoExitInfo->n.u1OP16 ? 3 : 1;
+    uint8_t  const cShift    = u16Port - (offIopm << 3);
+    uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);
+
+    pbIopm += offIopm;
+    uint16_t const fIopmBits = *(uint16_t *)pbIopm;
+    if (fIopmBits & fIopmMask)
+        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_IOIO, pIoExitInfo->u, uNextRip);
+
     return VINF_HM_INTERCEPT_NOT_ACTIVE;
 }
@@ -955,6 +1028,6 @@
  *
  * @returns Strict VBox status code.
- * @retval  VINF_SVM_INTERCEPT_NOT_ACTIVE if the intercept is not active or
- *          we're not executing a nested-guest.
+ * @retval  VINF_SVM_INTERCEPT_NOT_ACTIVE if the MSR permission bitmap does not
+ *          specify interception of the accessed MSR @a idMsr.
  * @retval  VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred
  *          successfully.
@@ -973,38 +1046,37 @@
      * Check if any MSRs are being intercepted.
      */
-    if (CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_MSR_PROT))
+    Assert(CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_MSR_PROT));
+    Assert(CPUMIsGuestInNestedHwVirtMode(pCtx));
+
+    uint64_t const uExitInfo1 = fWrite ? SVM_EXIT1_MSR_WRITE : SVM_EXIT1_MSR_READ;
+
+    /*
+     * Get the byte and bit offset of the permission bits corresponding to the MSR.
+     */
+    uint16_t offMsrpm;
+    uint32_t uMsrpmBit;
+    int rc = hmSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
+    if (RT_SUCCESS(rc))
     {
-        Assert(CPUMIsGuestInNestedHwVirtMode(pCtx));
-        uint64_t const uExitInfo1 = fWrite ? SVM_EXIT1_MSR_WRITE : SVM_EXIT1_MSR_READ;
+        Assert(uMsrpmBit < 0x3fff);
+        Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
+        if (fWrite)
+            ++uMsrpmBit;
 
         /*
-         * Get the byte and bit offset of the permission bits corresponding to the MSR.
+         * Check if the bit is set, if so, trigger a #VMEXIT.
          */
-        uint16_t offMsrpm;
-        uint32_t uMsrpmBit;
-        int rc = hmSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
-        if (RT_SUCCESS(rc))
-        {
-            Assert(uMsrpmBit < 0x3fff);
-            Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
-            if (fWrite)
-                ++uMsrpmBit;
-
-            /*
-             * Check if the bit is set, if so, trigger a #VMEXIT.
-             */
-            uint8_t *pbMsrpm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);
-            pbMsrpm += offMsrpm;
-            if (ASMBitTest(pbMsrpm, uMsrpmBit))
-                return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
-        }
-        else
-        {
-            /*
-             * This shouldn't happen, but if it does, cause a #VMEXIT and let the "host" (guest hypervisor) deal with it.
-             */
-            Log(("HMSvmNstGstHandleIntercept: Invalid/out-of-range MSR %#RX32 fWrite=%RTbool\n", idMsr, fWrite));
+        uint8_t *pbMsrpm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);
+        pbMsrpm += offMsrpm;
+        if (ASMBitTest(pbMsrpm, uMsrpmBit))
             return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
-        }
+    }
+    else
+    {
+        /*
+         * This shouldn't happen, but if it does, cause a #VMEXIT and let the "host" (guest hypervisor) deal with it.
+         */
+        Log(("HMSvmNstGstHandleIntercept: Invalid/out-of-range MSR %#RX32 fWrite=%RTbool\n", idMsr, fWrite));
+        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
     }
     return VINF_HM_INTERCEPT_NOT_ACTIVE;
@@ -1069,28 +1141,2 @@
 #endif /* !IN_RC */
 
-
-/**
- * Converts an SVM event type to a TRPM event type.
- *
- * @returns The TRPM event type.
- * @retval  TRPM_32BIT_HACK if the specified type of event isn't among the set
- *          of recognized trap types.
- *
- * @param   pEvent       Pointer to the SVM event.
- */
-VMM_INT_DECL(TRPMEVENT) hmSvmEventToTrpmEventType(PCSVMEVENT pEvent)
-{
-    uint8_t const uType = pEvent->n.u3Type;
-    switch (uType)
-    {
-        case SVM_EVENT_EXTERNAL_IRQ:    return TRPM_HARDWARE_INT;
-        case SVM_EVENT_SOFTWARE_INT:    return TRPM_SOFTWARE_INT;
-        case SVM_EVENT_EXCEPTION:
-        case SVM_EVENT_NMI:             return TRPM_TRAP;
-        default:
-            break;
-    }
-    AssertMsgFailed(("HMSvmEventToTrpmEvent: Invalid pending-event type %#x\n", uType));
-    return TRPM_32BIT_HACK;
-}
-
Index: /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 66580)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 66581)
@@ -370,5 +370,5 @@
  * Check the common SVM instruction preconditions.
  */
-#define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
+# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
     do { \
         if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
@@ -392,36 +392,83 @@
  * Check if an SVM is enabled.
  */
-#define IEM_IS_SVM_ENABLED(a_pVCpu)                         (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
+# define IEM_IS_SVM_ENABLED(a_pVCpu)                         (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
 
 /**
  * Check if an SVM control/instruction intercept is set.
  */
-#define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
+# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
 
 /**
  * Check if an SVM read CRx intercept is set.
  */
-#define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)    (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
+# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)    (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
 
 /**
  * Check if an SVM write CRx intercept is set.
  */
-#define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr)   (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
+# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr)   (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
 
 /**
  * Check if an SVM read DRx intercept is set.
  */
-#define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr)    (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
+# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr)    (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
 
 /**
  * Check if an SVM write DRx intercept is set.
  */
-#define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr)   (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
+# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr)   (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
 
 /**
  * Check if an SVM exception intercept is set.
  */
-#define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_enmXcpt)   (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_enmXcpt)))
-#endif /* VBOX_WITH_NESTED_HWVIRT */
+# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector)   (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uVector)))
+
+/**
+ * Invokes the SVM \#VMEXIT handler for the nested-guest.
+ */
+# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
+    do \
+    { \
+        VBOXSTRICTRC rcStrictVmExit = HMSvmNstGstVmExit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), \
+                                                        (a_uExitInfo2)); \
+        return rcStrictVmExit == VINF_SVM_VMEXIT ? VINF_SUCCESS : rcStrictVmExit; \
+    } while (0)
+
+/**
+ * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
+ * corresponding decode assist information.
+ */
+# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
+    do \
+    { \
+        uint64_t uExitInfo1; \
+        if (   IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssist \
+            && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
+            uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
+        else \
+            uExitInfo1 = 0; \
+        IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
+    } while (0)
+
+/**
+ * Checks and handles an SVM MSR intercept.
+ */
+# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) \
+    HMSvmNstGstHandleMsrIntercept((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_idMsr), (a_fWrite))
+
+#else
+# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr)                                    do { } while (0)
+# define IEM_IS_SVM_ENABLED(a_pVCpu)                                                      (false)
+# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)                              (false)
+# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)                                 (false)
+# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr)                                (false)
+# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr)                                 (false)
+# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr)                                (false)
+# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector)                                (false)
+# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2)  do { return VERR_SVM_IPE_1; } while (0)
+# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
+# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite)                        (VERR_SVM_IPE_1)
+
+#endif /* VBOX_WITH_NESTED_HWVIRT && !IN_RC */
 
 
@@ -834,4 +881,54 @@
 IEM_STATIC VBOXSTRICTRC     iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
 
+#if defined(VBOX_WITH_NESTED_HWVIRT) && !defined(IN_RC)
+/**
+ * Checks if the intercepted IO instruction causes a \#VMEXIT and handles it
+ * accordingly.
+ *
+ * @returns VBox strict status code.
+ * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
+ * @param   u16Port         The IO port being accessed.
+ * @param   enmIoType       The type of IO access.
+ * @param   cbReg           The IO operand size in bytes.
+ * @param   cAddrSizeBits   The address size bits (for 16, 32 or 64).
+ * @param   iEffSeg         The effective segment number.
+ * @param   fRep            Whether this is a repeating IO instruction (REP prefix).
+ * @param   fStrIo          Whether this is a string IO instruction.
+ * @param   cbInstr         The length of the IO instruction in bytes.
+ *
+ * @remarks This must be called only when IO instructions are intercepted by the
+ *          nested-guest hypervisor.
+ */
+IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
+                                                uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
+{
+    Assert(IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT));
+    Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
+    Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
+
+    static const uint32_t s_auIoOpSize[]   = { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
+    static const uint32_t s_auIoAddrSize[] = { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
+
+    SVMIOIOEXITINFO IoExitInfo;
+    IoExitInfo.u         = s_auIoOpSize[cbReg & 7];
+    IoExitInfo.u        |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
+    IoExitInfo.n.u1STR   = fStrIo;
+    IoExitInfo.n.u1REP   = fRep;
+    IoExitInfo.n.u3SEG   = iEffSeg & 0x7;
+    IoExitInfo.n.u1Type  = enmIoType;
+    IoExitInfo.n.u16Port = u16Port;
+
+    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
+    return HMSvmNstGstHandleIOIntercept(pVCpu, pCtx, &IoExitInfo, pCtx->rip + cbInstr);
+}
+
+#else
+IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
+                                                uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
+{
+    RT_NOREF9(pVCpu, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo, cbInstr);
+    return VERR_IEM_IPE_9;
+}
+#endif /* VBOX_WITH_NESTED_HWVIRT && !IN_RC */
 
 
@@ -3126,4 +3223,10 @@
 IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
 {
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
+    {
+        Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
     RT_NOREF_PV(pVCpu);
     /** @todo Probably need a separate error code and handling for this to
@@ -3249,23 +3352,4 @@
  * @{
  */
-
-/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
- * @{ */
-/** CPU exception. */
-#define IEM_XCPT_FLAGS_T_CPU_XCPT       RT_BIT_32(0)
-/** External interrupt (from PIC, APIC, whatever). */
-#define IEM_XCPT_FLAGS_T_EXT_INT        RT_BIT_32(1)
-/** Software interrupt (int or into, not bound).
- * Returns to the following instruction */
-#define IEM_XCPT_FLAGS_T_SOFT_INT       RT_BIT_32(2)
-/** Takes an error code. */
-#define IEM_XCPT_FLAGS_ERR              RT_BIT_32(3)
-/** Takes a CR2. */
-#define IEM_XCPT_FLAGS_CR2              RT_BIT_32(4)
-/** Generated by the breakpoint instruction. */
-#define IEM_XCPT_FLAGS_BP_INSTR         RT_BIT_32(5)
-/** Generated by a DRx instruction breakpoint and RF should be cleared. */
-#define IEM_XCPT_FLAGS_DRx_INSTR_BP     RT_BIT_32(6)
-/** @}  */
 
 
@@ -5165,4 +5249,65 @@
 #endif
 
+#if defined(VBOX_WITH_NESTED_HWVIRT) && !defined(IN_RC)
+    if (IEM_IS_SVM_ENABLED(pVCpu))
+    {
+        /*
+         * Handle nested-guest SVM exception and software interrupt intercepts,
+         * see AMD spec. 15.12 "Exception Intercepts".
+         *
+         *   - NMI intercepts have their own exit code and do not cause SVM_EXIT_EXCEPTION_2 #VMEXITs.
+         *   - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts
+         *     even when they use a vector in the range 0 to 31.
+         *   - ICEBP should not trigger #DB intercept, but its own intercept, so we catch it early in iemOp_int1.
+         *   - For #PF exceptions, its intercept is checked before CR2 is written by the exception.
+         */
+        /* Check NMI intercept */
+        if (   u8Vector == X86_XCPT_NMI
+            && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI))
+        {
+            Log(("iemRaiseXcptOrInt: NMI intercept -> #VMEXIT\n"));
+            IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        }
+
+        /* Check CPU exception intercepts. */
+        if (   IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, u8Vector)
+            && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
+        {
+            Assert(u8Vector <= 31 /* X86_XCPT_MAX */);
+            uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0;
+            uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0;
+            if (   IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist
+                && u8Vector == X86_XCPT_PF
+                && !(uErr & X86_TRAP_PF_ID))
+            {
+                /** @todo Nested-guest SVM - figure out fetching op-code bytes from IEM. */
+#ifdef IEM_WITH_CODE_TLB
+#else
+                uint8_t const offOpCode = pVCpu->iem.s.offOpcode;
+                uint8_t const cbCurrent = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode;
+                if (   cbCurrent > 0
+                    && cbCurrent < sizeof(pCtx->hwvirt.svm.VmcbCtrl.abInstr))
+                {
+                    Assert(cbCurrent <= sizeof(pVCpu->iem.s.abOpcode));
+                    memcpy(&pCtx->hwvirt.svm.VmcbCtrl.abInstr[0], &pVCpu->iem.s.abOpcode[offOpCode], cbCurrent);
+                }
+#endif
+            }
+            Log(("iemRaiseXcptOrInt: Xcpt intercept (u8Vector=%#x uExitInfo1=%#RX64, uExitInfo2=%#RX64 -> #VMEXIT\n", u8Vector,
+                 uExitInfo1, uExitInfo2));
+            IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + u8Vector, uExitInfo1, uExitInfo2);
+        }
+
+        /* Check software interrupt (INTn) intercepts. */
+        if (   IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN)
+            && (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
+        {
+            uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? u8Vector : 0;
+            Log(("iemRaiseXcptOrInt: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));
+            IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
+        }
+    }
+#endif
+
     /*
      * Do recursion accounting.
@@ -5179,4 +5324,6 @@
 
         /** @todo double and tripple faults. */
+        /** @todo When implementing #DF, the SVM nested-guest #DF intercepts needs some
+         *        care. See AMD spec. 15.12 "Exception Intercepts". */
         if (pVCpu->iem.s.cXcptRecursions >= 3)
         {
@@ -5194,6 +5341,8 @@
     }
     pVCpu->iem.s.cXcptRecursions++;
-    pVCpu->iem.s.uCurXcpt = u8Vector;
-    pVCpu->iem.s.fCurXcpt = fFlags;
+    pVCpu->iem.s.uCurXcpt    = u8Vector;
+    pVCpu->iem.s.fCurXcpt    = fFlags;
+    pVCpu->iem.s.uCurXcptErr = uErr;
+    pVCpu->iem.s.uCurXcptCr2 = uCr2;
 
     /*
@@ -9669,9 +9818,16 @@
 iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
 {
+    VBOXSTRICTRC rcStrict;
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
+    {
+        Log(("sidt/sgdt: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
     /*
      * The SIDT and SGDT instructions actually stores the data using two
      * independent writes.  The instructions does not respond to opsize prefixes.
      */
-    VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
+    rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
     if (rcStrict == VINF_SUCCESS)
     {
@@ -11756,16 +11912,4 @@
     } while (0)
 
-#if 0
-#ifdef VBOX_WITH_NESTED_HWVIRT
-/** The instruction raises an \#UD when SVM is not enabled. */
-#define IEMOP_HLP_NEEDS_SVM_ENABLED() \
-    do \
-    { \
-        if (IEM_IS_SVM_ENABLED(pVCpu)) \
-            return IEMOP_RAISE_INVALID_OPCODE(); \
-    } while (0)
-#endif
-#endif
-
 /** The instruction is not available in 64-bit mode, throw \#UD if we're in
  * 64-bit mode. */
@@ -11910,4 +12054,31 @@
             return IEMOP_RAISE_INVALID_OPCODE(); \
     } while (0)
+
+#if defined(VBOX_WITH_NESTED_HWVIRT) && !defined(IN_RC)
+/** Check and handles SVM nested-guest control & instruction intercept. */
+# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
+    do \
+    { \
+        if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
+            IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
+    } while (0)
+
+/** Check and handle SVM nested-guest CR0 read intercept. */
+# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
+    do \
+    { \
+        if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
+            IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
+    } while (0)
+
+#else
+# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
+    do { RT_NOREF5(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2); } while (0)
+
+# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
+    do { RT_NOREF4(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2); } while (0)
+
+#endif /* VBOX_WITH_NESTED_HWVIRT && !IN_RC */
+
 
 /**
@@ -15040,18 +15211,40 @@
 
 
-#ifdef VBOX_WITH_NESTED_HWVIRT
 /**
  * Checks if IEM is in the process of delivering an event (interrupt or
  * exception).
  *
- * @returns true if it's raising an interrupt or exception, false otherwise.
- * @param   pVCpu       The cross context virtual CPU structure.
- */
-VMM_INT_DECL(bool) IEMIsRaisingIntOrXcpt(PVMCPU pVCpu)
-{
-    return pVCpu->iem.s.cXcptRecursions > 0;
-}
-
-
+ * @returns true if we're in the process of raising an interrupt or exception,
+ *          false otherwise.
+ * @param   pVCpu           The cross context virtual CPU structure.
+ * @param   puVector        Where to store the vector associated with the
+ *                          currently delivered event, optional.
+ * @param   pfFlags         Where to store th event delivery flags (see
+ *                          IEM_XCPT_FLAGS_XXX), optional.
+ * @param   puErr           Where to store the error code associated with the
+ *                          event, optional.
+ * @param   puCr2           Where to store the CR2 associated with the event,
+ *                          optional.
+ */
+VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
+{
+    bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
+    if (fRaisingXcpt)
+    {
+        if (puVector)
+            *puVector = pVCpu->iem.s.uCurXcpt;
+        if (pfFlags)
+            *pfFlags = pVCpu->iem.s.fCurXcpt;
+        /* The caller should check the flags to determine if the error code & CR2 are valid for the event. */
+        if (puErr)
+            *puErr = pVCpu->iem.s.uCurXcptErr;
+        if (puCr2)
+            *puCr2 = pVCpu->iem.s.uCurXcptCr2;
+    }
+    return fRaisingXcpt;
+}
+
+
+#ifdef VBOX_WITH_NESTED_HWVIRT
 /**
  * Interface for HM and EM to emulate the STGI instruction.
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 66580)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 66581)
@@ -558,4 +558,10 @@
     VBOXSTRICTRC rcStrict;
 
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_PUSHF))
+    {
+        Log2(("pushf: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
     /*
      * If we're in V8086 mode some care is required (which is why we're in
@@ -618,4 +624,10 @@
     VBOXSTRICTRC    rcStrict;
     uint32_t        fEflNew;
+
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_POPF))
+    {
+        Log2(("popf: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
 
     /*
@@ -3856,4 +3868,14 @@
 
     /*
+     * The SVM nested-guest intercept for iret takes priority over all exceptions,
+     * see AMD spec. "15.9 Instruction Intercepts".
+     */
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IRET))
+    {
+        Log(("iret: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
+    /*
      * Call a mode specific worker.
      */
@@ -4632,4 +4654,10 @@
     Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
 
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_WRITES))
+    {
+        Log(("lgdt: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
     /*
      * Fetch the limit and base address.
@@ -4698,4 +4726,10 @@
     Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
 
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_WRITES))
+    {
+        Log(("lidt: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
     /*
      * Fetch the limit and base address.
@@ -4783,4 +4817,11 @@
     if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
     {
+        /* Nested-guest SVM intercept. */
+        if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
+        {
+            Log(("lldt: Guest intercept -> #VMEXIT\n"));
+            IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        }
+
         Log(("lldt %04x: Loading NULL selector.\n",  uNewLdt));
         if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
@@ -4855,4 +4896,11 @@
         Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
         return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewLdt);
+    }
+
+    /* Nested-guest SVM intercept. */
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
+    {
+        Log(("lldt: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -4908,4 +4956,9 @@
         return iemRaiseGeneralProtectionFault0(pVCpu);
     }
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TR_WRITES))
+    {
+        Log(("ltr: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
 
     /*
@@ -5010,4 +5063,10 @@
     Assert(!pCtx->eflags.Bits.u1VM);
 
+    if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(pVCpu, iCrReg))
+    {
+        Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
+        IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg);
+    }
+
     /* read it */
     uint64_t crX;
@@ -5051,10 +5110,16 @@
  * @param   iCrReg          The CRx register to write (valid).
  * @param   uNewCrX         The new value.
- */
-IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
+ * @param   enmAccessCrx    The instruction that caused the CrX load.
+ * @param   iGReg           The general register in case of a 'mov CRx,GReg'
+ *                          instruction.
+ */
+IEM_CIMPL_DEF_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg)
 {
     PCPUMCTX        pCtx  = IEM_GET_CTX(pVCpu);
     VBOXSTRICTRC    rcStrict;
     int             rc;
+#ifndef VBOX_WITH_NESTED_HWVIRT
+    RT_NOREF2(iGReg, enmAccessCrX);
+#endif
 
     /*
@@ -5128,4 +5193,24 @@
 
             /*
+             * SVM nested-guest CR0 write intercepts.
+             */
+            if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, iCrReg))
+            {
+                Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
+                IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg);
+            }
+            if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITES))
+            {
+                /* 'lmsw' intercepts regardless of whether the TS/MP bits are actually toggled. */
+                if (   enmAccessCrX == IEMACCESSCRX_LMSW
+                    || (uNewCrX & ~(X86_CR0_TS | X86_CR0_MP)) != (uOldCrX & ~(X86_CR0_TS | X86_CR0_MP)))
+                {
+                    Assert(enmAccessCrX != IEMACCESSCRX_CLTS);
+                    Log(("iemCImpl_load_Cr%#x: TS/MP bit changed or lmsw instr: Guest intercept -> #VMEXIT\n", iCrReg));
+                    IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_CR0_SEL_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+                }
+            }
+
+            /*
              * Change CR0.
              */
@@ -5186,7 +5271,14 @@
          */
         case 2:
+        {
+            if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 2))
+            {
+                Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
+                IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg);
+            }
             pCtx->cr2 = uNewCrX;
             rcStrict  = VINF_SUCCESS;
             break;
+        }
 
         /*
@@ -5219,4 +5311,10 @@
                      uNewCrX, uNewCrX & ~fValid));
                 uNewCrX &= fValid;
+            }
+
+            if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 3))
+            {
+                Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
+                IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg);
             }
 
@@ -5284,4 +5382,9 @@
             }
 
+            if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 4))
+            {
+                Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
+                IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);
+            }
 
             /*
@@ -5337,4 +5440,10 @@
             }
 
+            if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 8))
+            {
+                Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
+                IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg);
+            }
+
             if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
                 APICSetTpr(pVCpu, (uint8_t)uNewCrX << 4);
@@ -5379,5 +5488,5 @@
     else
         uNewCrX = iemGRegFetchU32(pVCpu, iGReg);
-    return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
+    return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, iCrReg, uNewCrX, IEMACCESSCRX_MOV_CRX, iGReg);
 }
 
@@ -5401,5 +5510,5 @@
     uint64_t uNewCr0 = pCtx->cr0     & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
     uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
-    return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
+    return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_LMSW, UINT8_MAX /* iGReg */);
 }
 
@@ -5416,5 +5525,5 @@
     uint64_t uNewCr0 = pCtx->cr0;
     uNewCr0 &= ~X86_CR0_TS;
-    return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
+    return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_CLTS, UINT8_MAX /* iGReg */);
 }
 
@@ -5478,4 +5587,15 @@
     }
 
+    /** @todo SVM nested-guest intercept for DR8-DR15? */
+    /*
+     * Check for any SVM nested-guest intercepts for the DRx read.
+     */
+    if (IEM_IS_SVM_READ_DR_INTERCEPT_SET(pVCpu, iDrReg))
+    {
+        Log(("mov r%u,dr%u: Guest intercept -> #VMEXIT\n", iGReg, iDrReg));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf),
+                               IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
+    }
+
     if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
         *(uint64_t *)iemGRegRef(pVCpu, iGReg) = drX;
@@ -5568,4 +5688,15 @@
     }
 
+    /** @todo SVM nested-guest intercept for DR8-DR15? */
+    /*
+     * Check for any SVM nested-guest intercepts for the DRx write.
+     */
+    if (IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(pVCpu, iDrReg))
+    {
+        Log2(("mov dr%u,r%u: Guest intercept -> #VMEXIT\n", iDrReg, iGReg));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf),
+                               IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
+    }
+
     /*
      * Do the actual setting.
@@ -5597,4 +5728,11 @@
     Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
 
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPG))
+    {
+        Log(("invlpg: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_INVLPG,
+                               IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? GCPtrPage : 0, 0 /* uExitInfo2 */);
+    }
+
     int rc = PGMInvalidatePage(pVCpu, GCPtrPage);
     iemRegAddToRipAndClearRF(pVCpu, cbInstr);
@@ -5629,4 +5767,10 @@
         Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
         return iemRaiseGeneralProtectionFault0(pVCpu);
+    }
+
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSC))
+    {
+        Log(("rdtsc: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -5647,4 +5791,75 @@
 
 /**
+ * Implements RDTSC.
+ */
+IEM_CIMPL_DEF_0(iemCImpl_rdtscp)
+{
+    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
+
+    /*
+     * Check preconditions.
+     */
+    if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fRdTscP)
+        return iemRaiseUndefinedOpcode(pVCpu);
+
+    if (   (pCtx->cr4 & X86_CR4_TSD)
+        && pVCpu->iem.s.uCpl != 0)
+    {
+        Log(("rdtscp: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
+        return iemRaiseGeneralProtectionFault0(pVCpu);
+    }
+
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP))
+    {
+        Log(("rdtscp: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
+    /*
+     * Do the job.
+     * Query the MSR first in case of trips to ring-3.
+     */
+    VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pCtx->rcx);
+    if (rcStrict == VINF_SUCCESS)
+    {
+        /* Low dword of the TSC_AUX msr only. */
+        pCtx->rcx &= UINT32_C(0xffffffff);
+
+        uint64_t uTicks = TMCpuTickGet(pVCpu);
+        pCtx->rax = (uint32_t)uTicks;
+        pCtx->rdx = uTicks >> 32;
+#ifdef IEM_VERIFICATION_MODE_FULL
+        pVCpu->iem.s.fIgnoreRaxRdx = true;
+#endif
+        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+    }
+    return rcStrict;
+}
+
+
+/**
+ * Implements RDPMC.
+ */
+IEM_CIMPL_DEF_0(iemCImpl_rdpmc)
+{
+    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
+    if (   pVCpu->iem.s.uCpl != 0
+        && !(pCtx->cr4 & X86_CR4_PCE))
+        return iemRaiseGeneralProtectionFault0(pVCpu);
+
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDPMC))
+    {
+        Log(("rdpmc: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
+    /** @todo Implement RDPMC for the regular guest execution case (the above only
+     *        handles nested-guest intercepts). */
+    RT_NOREF(cbInstr);
+    return VERR_IEM_INSTR_NOT_IMPLEMENTED;
+}
+
+
+/**
  * Implements RDMSR.
  */
@@ -5665,5 +5880,18 @@
      */
     RTUINT64U uValue;
-    VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pCtx->ecx, &uValue.u);
+    VBOXSTRICTRC rcStrict;
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
+    {
+        rcStrict = IEM_SVM_NST_GST_MSR_INTERCEPT(pVCpu, pCtx->ecx, false /* fWrite */);
+        if (rcStrict == VINF_SVM_VMEXIT)
+            return VINF_SUCCESS;
+        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
+        {
+            Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pCtx->ecx, VBOXSTRICTRC_VAL(rcStrict)));
+            return rcStrict;
+        }
+    }
+
+    rcStrict = CPUMQueryGuestMsr(pVCpu, pCtx->ecx, &uValue.u);
     if (rcStrict == VINF_SUCCESS)
     {
@@ -5718,4 +5946,16 @@
 
     VBOXSTRICTRC rcStrict;
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
+    {
+        rcStrict = IEM_SVM_NST_GST_MSR_INTERCEPT(pVCpu, pCtx->ecx, true /* fWrite */);
+        if (rcStrict == VINF_SVM_VMEXIT)
+            return VINF_SUCCESS;
+        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
+        {
+            Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pCtx->ecx, VBOXSTRICTRC_VAL(rcStrict)));
+            return rcStrict;
+        }
+    }
+
     if (!IEM_VERIFICATION_ENABLED(pVCpu))
         rcStrict = CPUMSetGuestMsr(pVCpu, pCtx->ecx, uValue.u);
@@ -5776,4 +6016,21 @@
 
     /*
+     * Check SVM nested-guest IO intercept.
+     */
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
+    {
+        rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, cbReg, 0 /* N/A - cAddrSizeBits */,
+                                           0 /* N/A - iEffSeg */, false /* fRep */, false /* fStrIo */, cbInstr);
+        if (rcStrict == VINF_SVM_VMEXIT)
+            return VINF_SUCCESS;
+        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
+        {
+            Log(("iemCImpl_in: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
+                 VBOXSTRICTRC_VAL(rcStrict)));
+            return rcStrict;
+        }
+    }
+
+    /*
      * Perform the I/O.
      */
@@ -5846,4 +6103,21 @@
 
     /*
+     * Check SVM nested-guest IO intercept.
+     */
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
+    {
+        rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, cbReg, 0 /* N/A - cAddrSizeBits */,
+                                           0 /* N/A - iEffSeg */, false /* fRep */, false /* fStrIo */, cbInstr);
+        if (rcStrict == VINF_SVM_VMEXIT)
+            return VINF_SUCCESS;
+        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
+        {
+            Log(("iemCImpl_out: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
+                 VBOXSTRICTRC_VAL(rcStrict)));
+            return rcStrict;
+        }
+    }
+
+    /*
      * Perform the I/O.
      */
@@ -5914,11 +6188,9 @@
     }
 
-#ifndef IN_RC
     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMRUN))
     {
         Log(("vmrun: Guest intercept -> #VMEXIT\n"));
-        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
-    }
-#endif
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
 
     VBOXSTRICTRC rcStrict = HMSvmVmrun(pVCpu, pCtx, GCPhysVmcb);
@@ -5941,11 +6213,9 @@
 {
     PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
-#ifndef IN_RC
     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL))
     {
-        Log(("vmrun: Guest intercept -> #VMEXIT\n"));
-        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
-    }
-#endif
+        Log(("vmmcall: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
 
     bool fUpdatedRipAndRF;
@@ -5969,11 +6239,4 @@
     PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload);
-#ifndef IN_RC
-    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
-    {
-        Log(("vmload: Guest intercept -> #VMEXIT\n"));
-        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
-    }
-#endif
 
     RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
@@ -5983,4 +6246,10 @@
         Log(("vmload: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
         return iemRaiseGeneralProtectionFault0(pVCpu);
+    }
+
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
+    {
+        Log(("vmload: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -6020,11 +6289,4 @@
     PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmsave);
-#ifndef IN_RC
-    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
-    {
-        Log(("vmsave: Guest intercept -> #VMEXIT\n"));
-        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
-    }
-#endif
 
     RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
@@ -6034,4 +6296,10 @@
         Log(("vmsave: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
         return iemRaiseGeneralProtectionFault0(pVCpu);
+    }
+
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
+    {
+        Log(("vmsave: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 
@@ -6071,11 +6339,9 @@
     PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, clgi);
-#ifndef IN_RC
     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
     {
         Log(("clgi: Guest intercept -> #VMEXIT\n"));
-        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
-    }
-#endif
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
 
     pCtx->hwvirt.svm.fGif = 0;
@@ -6092,11 +6358,9 @@
     PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, stgi);
-#ifndef IN_RC
     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI))
     {
         Log2(("stgi: Guest intercept -> #VMEXIT\n"));
-        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
-    }
-#endif
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
 
     pCtx->hwvirt.svm.fGif = 1;
@@ -6112,13 +6376,4 @@
 {
     PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
-    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
-#ifndef IN_RC
-    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
-    {
-        Log2(("invlpga: Guest intercept -> #VMEXIT\n"));
-        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
-    }
-#endif
-
     RTGCPTR  const GCPtrPage = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
     /** @todo PGM needs virtual ASID support. */
@@ -6126,7 +6381,39 @@
     uint32_t const uAsid     = pCtx->ecx;
 #endif
+
+    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
+    {
+        Log2(("invlpga: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
     PGMInvalidatePage(pVCpu, GCPtrPage);
     iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     return VINF_SUCCESS;
+}
+
+
+/**
+ * Implements 'SKINIT'.
+ */
+IEM_CIMPL_DEF_0(iemCImpl_skinit)
+{
+    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
+
+    uint32_t uIgnore;
+    uint32_t fFeaturesECX;
+    CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &uIgnore, &uIgnore, &fFeaturesECX, &uIgnore);
+    if (!(fFeaturesECX & X86_CPUID_AMD_FEATURE_ECX_SKINIT))
+        return iemRaiseUndefinedOpcode(pVCpu);
+
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SKINIT))
+    {
+        Log2(("skinit: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SKINIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
+    RT_NOREF(cbInstr);
+    return VERR_IEM_INSTR_NOT_IMPLEMENTED;
 }
 #endif /* VBOX_WITH_NESTED_HWVIRT */
@@ -6228,4 +6515,11 @@
     if (pVCpu->iem.s.uCpl != 0)
         return iemRaiseGeneralProtectionFault0(pVCpu);
+
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_HLT))
+    {
+        Log2(("hlt: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
     iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     return VINF_EM_HALT;
@@ -6276,4 +6570,10 @@
         return rcStrict;
 
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MONITOR))
+    {
+        Log2(("monitor: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
     /*
      * Call EM to prepare the monitor/wait.
@@ -6334,4 +6634,19 @@
 
     /*
+     * Check SVM nested-guest mwait intercepts.
+     */
+    if (   IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT_ARMED)
+        && EMMonitorIsArmed(pVCpu))
+    {
+        Log2(("mwait: Guest intercept (monitor hardware armed) -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))
+    {
+        Log2(("mwait: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
+    /*
      * Call EM to prepare the monitor/wait.
      */
@@ -6378,4 +6693,10 @@
 {
     PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
+
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CPUID))
+    {
+        Log2(("cpuid: Guest intercept -> #VMEXIT\n"));
+        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
 
     CPUMGetGuestCpuId(pVCpu, pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
@@ -6726,4 +7047,10 @@
     if (pCtx->cr4 & X86_CR4_OSXSAVE)
     {
+        if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_XSETBV))
+        {
+            Log2(("xsetbv: Guest intercept -> #VMEXIT\n"));
+            IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        }
+
         if (pVCpu->iem.s.uCpl == 0)
         {
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h	(revision 66580)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h	(revision 66581)
@@ -1218,4 +1218,21 @@
     }
 
+    /*
+     * Check SVM nested-guest IO intercept.
+     */
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
+    {
+        rcStrict = iemSvmHandleIOIntercept(pVCpu, pCtx->dx, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES, false /* fRep */,
+                                           true /* fStrIo */, cbInstr);
+        if (rcStrict == VINF_SVM_VMEXIT)
+            return VINF_SUCCESS;
+        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
+        {
+            Log(("iemCImpl_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pCtx->dx, OP_SIZE / 8,
+                 VBOXSTRICTRC_VAL(rcStrict)));
+            return rcStrict;
+        }
+    }
+
     OP_TYPE        *puMem;
     rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
@@ -1269,4 +1286,21 @@
         if (rcStrict != VINF_SUCCESS)
             return rcStrict;
+    }
+
+    /*
+     * Check SVM nested-guest IO intercept.
+     */
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
+    {
+        rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES, true /* fRep */,
+                                           true /* fStrIo */, cbInstr);
+        if (rcStrict == VINF_SVM_VMEXIT)
+            return VINF_SUCCESS;
+        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
+        {
+            Log(("iemCImpl_rep_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8,
+                 VBOXSTRICTRC_VAL(rcStrict)));
+            return rcStrict;
+        }
     }
 
@@ -1455,4 +1489,21 @@
     }
 
+    /*
+     * Check SVM nested-guest IO intercept.
+     */
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
+    {
+        rcStrict = iemSvmHandleIOIntercept(pVCpu, pCtx->dx, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg, false /* fRep */,
+                                           true /* fStrIo */, cbInstr);
+        if (rcStrict == VINF_SVM_VMEXIT)
+            return VINF_SUCCESS;
+        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
+        {
+            Log(("iemCImpl_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pCtx->dx, OP_SIZE / 8,
+                 VBOXSTRICTRC_VAL(rcStrict)));
+            return rcStrict;
+        }
+    }
+
     OP_TYPE uValue;
     rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
@@ -1496,4 +1547,21 @@
         if (rcStrict != VINF_SUCCESS)
             return rcStrict;
+    }
+
+    /*
+     * Check SVM nested-guest IO intercept.
+     */
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
+    {
+        rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg, true /* fRep */,
+                                           true /* fStrIo */, cbInstr);
+        if (rcStrict == VINF_SVM_VMEXIT)
+            return VINF_SUCCESS;
+        if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
+        {
+            Log(("iemCImpl_rep_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8,
+                 VBOXSTRICTRC_VAL(rcStrict)));
+            return rcStrict;
+        }
     }
 
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h	(revision 66580)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h	(revision 66581)
@@ -4454,5 +4454,13 @@
 
     if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)
+    {
         IEMOP_MNEMONIC(pause, "pause");
+#ifdef VBOX_WITH_NESTED_HWVIRT
+        /** @todo Pause filter count and threshold with SVM nested hardware virt. */
+        Assert(!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilter);
+        Assert(!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilterThreshold);
+#endif
+        IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_PAUSE, SVM_EXIT_PAUSE, 0, 0);
+    }
     else
         IEMOP_MNEMONIC(nop, "nop");
@@ -10582,4 +10590,5 @@
     IEMOP_HLP_MIN_386(); /** @todo does not generate #UD on 286, or so they say... */
     /** @todo testcase! */
+    IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_ICEBP, SVM_EXIT_ICEBP, 0, 0);
     return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_DB, false /*fIsBpInstr*/);
 }
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h	(revision 66580)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h	(revision 66581)
@@ -35,4 +35,5 @@
     {
         IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
+        IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);
         switch (pVCpu->iem.s.enmEffOpSize)
         {
@@ -74,4 +75,5 @@
         IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
         IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
+        IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);
         IEM_MC_FETCH_LDTR_U16(u16Ldtr);
         IEM_MC_STORE_MEM_U16(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u16Ldtr);
@@ -93,4 +95,5 @@
     {
         IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
+        IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);
         switch (pVCpu->iem.s.enmEffOpSize)
         {
@@ -132,4 +135,5 @@
         IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
         IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
+        IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);
         IEM_MC_FETCH_TR_U16(u16Tr);
         IEM_MC_STORE_MEM_U16(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u16Tr);
@@ -482,4 +486,12 @@
     return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_invlpga);
 }
+
+
+/** Opcode 0x0f 0x01 0xde. */
+FNIEMOP_DEF(iemOp_Grp7_Amd_skinit)
+{
+    IEMOP_MNEMONIC(skinit, "skinit");
+    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_skinit);
+}
 #else
 /** Opcode 0x0f 0x01 0xd8. */
@@ -503,8 +515,8 @@
 /** Opcode 0x0f 0x01 0xdf. */
 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_invlpga);
-#endif /* VBOX_WITH_NESTED_HWVIRT */
 
 /** Opcode 0x0f 0x01 0xde. */
 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_skinit);
+#endif /* VBOX_WITH_NESTED_HWVIRT */
 
 /** Opcode 0x0f 0x01 /4. */
@@ -516,4 +528,5 @@
     {
         IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+        IEMOP_HLP_SVM_READ_CR_INTERCEPT(pVCpu, /*cr*/ 0, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
         switch (pVCpu->iem.s.enmEffOpSize)
         {
@@ -562,4 +575,5 @@
         IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
         IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+        IEMOP_HLP_SVM_READ_CR_INTERCEPT(pVCpu, /*cr*/ 0, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
         IEM_MC_FETCH_CR0_U16(u16Tmp);
         if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
@@ -636,7 +650,9 @@
 FNIEMOP_DEF(iemOp_Grp7_rdtscp)
 {
-    NOREF(pVCpu);
-    IEMOP_BITCH_ABOUT_STUB();
-    return VERR_IEM_INSTR_NOT_IMPLEMENTED;
+    IEMOP_MNEMONIC(rdtscp, "rdtscp");
+    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+    /** @todo SVM intercept removal from here. */
+    IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP, SVM_EXIT_RDTSCP, 0, 0);
+    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdtscp);
 }
 
@@ -868,5 +884,17 @@
 
 /** Opcode 0x0f 0x08. */
-FNIEMOP_STUB(iemOp_invd);
+FNIEMOP_DEF(iemOp_invd)
+{
+    IEMOP_MNEMONIC(invd, "invd");
+#ifdef VBOX_WITH_NESTED_HWVIRT
+    IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
+    IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_INVD, SVM_EXIT_INVD, 0, 0);
+#endif
+    /** @todo implement invd for the regular case (above only handles nested SVM
+     *        exits). */
+    IEMOP_BITCH_ABOUT_STUB();
+    return VERR_IEM_INSTR_NOT_IMPLEMENTED;
+}
+
 // IEMOP_HLP_MIN_486();
 
@@ -880,4 +908,5 @@
     IEM_MC_BEGIN(0, 0);
     IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
+    IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_WBINVD, SVM_EXIT_WBINVD, 0, 0);
     IEM_MC_ADVANCE_RIP();
     IEM_MC_END();
@@ -2031,5 +2060,12 @@
 
 /** Opcode 0x0f 0x34. */
-FNIEMOP_STUB(iemOp_rdpmc);
+FNIEMOP_DEF(iemOp_rdpmc)
+{
+    IEMOP_MNEMONIC(rdpmc, "rdpmc");
+    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
+    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdpmc);
+}
+
+
 /** Opcode 0x0f 0x34. */
 FNIEMOP_STUB(iemOp_sysenter);
@@ -5722,5 +5758,14 @@
 
 /** Opcode 0x0f 0xaa. */
-FNIEMOP_STUB(iemOp_rsm);
+FNIEMOP_DEF(iemOp_rsm)
+{
+    IEMOP_MNEMONIC(rsm, "rsm");
+    IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_RSM, SVM_EXIT_RSM, 0, 0);
+    /** @todo rsm - for the regular case (above handles only the SVM nested-guest
+     *        intercept). */
+    IEMOP_BITCH_ABOUT_STUB();
+    return IEMOP_RAISE_INVALID_OPCODE();
+}
+
 //IEMOP_HLP_MIN_386();
 
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 66580)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 66581)
@@ -625,6 +625,6 @@
     Assert(pVM->hm.s.svm.fSupported);
 
-    bool const fPauseFilter          = RT_BOOL(pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER);
-    bool const fPauseFilterThreshold = RT_BOOL(pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD);
+    bool const fPauseFilter          = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER);
+    bool const fPauseFilterThreshold = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD);
     bool const fUsePauseFilter       = fPauseFilter && pVM->hm.s.svm.cPauseFilter && pVM->hm.s.svm.cPauseFilterThresholdTicks;
 
@@ -890,5 +890,5 @@
                 fHitASIDLimit             = true;
 
-                if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
+                if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
                 {
                     pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
@@ -905,5 +905,5 @@
                 && pCpu->fFlushAsidBeforeUse)
             {
-                if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
+                if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
                     pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
                 else
@@ -920,5 +920,5 @@
         else
         {
-            if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
+            if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
                 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
             else
@@ -3149,5 +3149,5 @@
 
     /* If VMCB Clean bits isn't supported by the CPU, simply mark all state-bits as dirty, indicating (re)load-from-VMCB. */
-    if (!(pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN))
+    if (!(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN))
         pVmcb->ctrl.u64VmcbCleanBits = 0;
 }
@@ -4148,5 +4148,5 @@
 DECLINLINE(void) hmR0SvmAdvanceRipHwAssist(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t cb)
 {
-    if (pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
+    if (pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
     {
         PCSVMVMCB pVmcb = (PCSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
@@ -4174,5 +4174,5 @@
 {
     Assert(cbLikely <= 15);   /* See Intel spec. 2.3.11 "AVX Instruction Length" */
-    if (pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
+    if (pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
     {
         PCSVMVMCB pVmcb = (PCSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
@@ -4569,5 +4569,5 @@
         }
 
-        if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
+        if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
         {
             rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx));
@@ -4616,5 +4616,5 @@
         Assert(pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_READ);
 
-        if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
+        if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
         {
             rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx));
@@ -4801,5 +4801,5 @@
                        only enabling it for Bulldozer and later with NRIP.  OS/2 broke on
                        2384 Opterons when only checking NRIP. */
-                    if (   (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
+                    if (   (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
                         && pVM->cpum.ro.GuestFeatures.enmMicroarch >= kCpumMicroarch_AMD_15h_First)
                     {
Index: /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp	(revision 66580)
+++ /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp	(revision 66581)
@@ -1712,6 +1712,16 @@
                 PCCPUMCPUIDLEAF pSvmLeaf = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 0x8000000a);
                 AssertLogRelReturn(pSvmLeaf, VERR_CPUM_IPE_1);
-                pFeatures->svm.feat.u   = pSvmLeaf->uEdx;
-                pFeatures->svm.uMaxAsid = pSvmLeaf->uEbx;
+                pFeatures->fSvmNestedPaging         = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING);
+                pFeatures->fSvmLbrVirt              = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_LBR_VIRT);
+                pFeatures->fSvmSvmLock              = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_SVM_LOCK);
+                pFeatures->fSvmNextRipSave          = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE);
+                pFeatures->fSvmTscRateMsr           = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR);
+                pFeatures->fSvmVmcbClean            = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN);
+                pFeatures->fSvmFlusbByAsid          = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID);
+                pFeatures->fSvmDecodeAssist         = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST);
+                pFeatures->fSvmPauseFilter          = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER);
+                pFeatures->fSvmPauseFilterThreshold = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD);
+                pFeatures->fSvmAvic                 = RT_BOOL(pSvmLeaf->uEdx & X86_CPUID_SVM_FEATURE_EDX_AVIC);
+                pFeatures->uSvmMaxAsid              = pSvmLeaf->uEbx;
             }
         }
Index: /trunk/src/VBox/VMM/VMMR3/HM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/HM.cpp	(revision 66580)
+++ /trunk/src/VBox/VMM/VMMR3/HM.cpp	(revision 66581)
@@ -1618,15 +1618,15 @@
     {
 #define HMSVM_REPORT_FEATURE(a_StrDesc, a_Define) { a_Define, a_StrDesc }
-        HMSVM_REPORT_FEATURE("NESTED_PAGING",          AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
-        HMSVM_REPORT_FEATURE("LBR_VIRT",               AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT),
-        HMSVM_REPORT_FEATURE("SVM_LOCK",               AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK),
-        HMSVM_REPORT_FEATURE("NRIP_SAVE",              AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE),
-        HMSVM_REPORT_FEATURE("TSC_RATE_MSR",           AMD_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR),
-        HMSVM_REPORT_FEATURE("VMCB_CLEAN",             AMD_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN),
-        HMSVM_REPORT_FEATURE("FLUSH_BY_ASID",          AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID),
-        HMSVM_REPORT_FEATURE("DECODE_ASSIST",          AMD_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST),
-        HMSVM_REPORT_FEATURE("PAUSE_FILTER",           AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER),
-        HMSVM_REPORT_FEATURE("PAUSE_FILTER_THRESHOLD", AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD),
-        HMSVM_REPORT_FEATURE("AVIC",                   AMD_CPUID_SVM_FEATURE_EDX_AVIC),
+        HMSVM_REPORT_FEATURE("NESTED_PAGING",          X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
+        HMSVM_REPORT_FEATURE("LBR_VIRT",               X86_CPUID_SVM_FEATURE_EDX_LBR_VIRT),
+        HMSVM_REPORT_FEATURE("SVM_LOCK",               X86_CPUID_SVM_FEATURE_EDX_SVM_LOCK),
+        HMSVM_REPORT_FEATURE("NRIP_SAVE",              X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE),
+        HMSVM_REPORT_FEATURE("TSC_RATE_MSR",           X86_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR),
+        HMSVM_REPORT_FEATURE("VMCB_CLEAN",             X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN),
+        HMSVM_REPORT_FEATURE("FLUSH_BY_ASID",          X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID),
+        HMSVM_REPORT_FEATURE("DECODE_ASSIST",          X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST),
+        HMSVM_REPORT_FEATURE("PAUSE_FILTER",           X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER),
+        HMSVM_REPORT_FEATURE("PAUSE_FILTER_THRESHOLD", X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD),
+        HMSVM_REPORT_FEATURE("AVIC",                   X86_CPUID_SVM_FEATURE_EDX_AVIC),
 #undef HMSVM_REPORT_FEATURE
     };
@@ -1648,5 +1648,5 @@
      */
     AssertLogRelReturn(   !pVM->hm.s.fNestedPaging
-                       || (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
+                       || (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
                        VERR_HM_IPE_1);
 
Index: /trunk/src/VBox/VMM/include/CPUMInternal.mac
===================================================================
--- /trunk/src/VBox/VMM/include/CPUMInternal.mac	(revision 66580)
+++ /trunk/src/VBox/VMM/include/CPUMInternal.mac	(revision 66581)
@@ -231,18 +231,20 @@
     .Guest.abPadding          resb    12
 %endif
-    .Guest.hwvirt.svm.uMsrHSavePa       resq    1
-    .Guest.hwvirt.svm.GCPhysVmcb        resq    1
-    .Guest.hwvirt.svm.VmcbCtrl          resb  256
-    .Guest.hwvirt.svm.HostState         resb  184
-    .Guest.hwvirt.svm.fGif              resb    1
-    .Guest.hwvirt.svm.abPadding0        resb    7
-    .Guest.hwvirt.svm.pvMsrBitmapR0     RTR0PTR_RES 1
-    .Guest.hwvirt.svm.pvMsrBitmapR3     RTR3PTR_RES 1
-    .Guest.hwvirt.svm.pvIoBitmapR0      RTR0PTR_RES 1
-    .Guest.hwvirt.svm.pvIoBitmapR3      RTR3PTR_RES 1
+    .Guest.hwvirt.svm.uMsrHSavePa               resq    1
+    .Guest.hwvirt.svm.GCPhysVmcb                resq    1
+    .Guest.hwvirt.svm.VmcbCtrl                  resb  256
+    .Guest.hwvirt.svm.HostState                 resb  184
+    .Guest.hwvirt.svm.fGif                      resb    1
+    .Guest.hwvirt.svm.cPauseFilter              resw    1
+    .Guest.hwvirt.svm.cPauseFilterThreshold     resw    1
+    .Guest.hwvirt.svm.abPadding0                resb    3
+    .Guest.hwvirt.svm.pvMsrBitmapR0             RTR0PTR_RES 1
+    .Guest.hwvirt.svm.pvMsrBitmapR3             RTR3PTR_RES 1
+    .Guest.hwvirt.svm.pvIoBitmapR0              RTR0PTR_RES 1
+    .Guest.hwvirt.svm.pvIoBitmapR3              RTR3PTR_RES 1
 %if HC_ARCH_BITS == 32
-    .Guest.hwvirt.svm.abPadding1        resb    16
-%endif
-    .Guest.hwvirt.fLocalForcedActions   resd    1
+    .Guest.hwvirt.svm.abPadding1                resb    16
+%endif
+    .Guest.hwvirt.fLocalForcedActions           resd    1
     alignb 64
 
@@ -508,18 +510,20 @@
     .Hyper.abPadding          resb    12
 %endif
-    .Hyper.hwvirt.svm.uMsrHSavePa       resq    1
-    .Hyper.hwvirt.svm.GCPhysVmcb        resq    1
-    .Hyper.hwvirt.svm.VmcbCtrl          resb  256
-    .Hyper.hwvirt.svm.HostState         resb  184
-    .Hyper.hwvirt.svm.fGif              resb    1
-    .Hyper.hwvirt.svm.abPadding0        resb    7
-    .Hyper.hwvirt.svm.pvMsrBitmapR0     RTR0PTR_RES 1
-    .Hyper.hwvirt.svm.pvMsrBitmapR3     RTR3PTR_RES 1
-    .Hyper.hwvirt.svm.pvIoBitmapR0      RTR0PTR_RES 1
-    .Hyper.hwvirt.svm.pvIoBitmapR3      RTR3PTR_RES 1
+    .Hyper.hwvirt.svm.uMsrHSavePa               resq    1
+    .Hyper.hwvirt.svm.GCPhysVmcb                resq    1
+    .Hyper.hwvirt.svm.VmcbCtrl                  resb  256
+    .Hyper.hwvirt.svm.HostState                 resb  184
+    .Hyper.hwvirt.svm.fGif                      resb    1
+    .Hyper.hwvirt.svm.cPauseFilter              resw    1
+    .Hyper.hwvirt.svm.cPauseFilterThreshold     resw    1
+    .Hyper.hwvirt.svm.abPadding0                resb    3
+    .Hyper.hwvirt.svm.pvMsrBitmapR0             RTR0PTR_RES 1
+    .Hyper.hwvirt.svm.pvMsrBitmapR3             RTR3PTR_RES 1
+    .Hyper.hwvirt.svm.pvIoBitmapR0              RTR0PTR_RES 1
+    .Hyper.hwvirt.svm.pvIoBitmapR3              RTR3PTR_RES 1
 %if HC_ARCH_BITS == 32
-    .Hyper.hwvirt.svm.abPadding1        resb   16
-%endif
-    .Hyper.hwvirt.fLocalForcedActions   resd    1
+    .Hyper.hwvirt.svm.abPadding1                resb   16
+%endif
+    .Hyper.hwvirt.fLocalForcedActions           resd    1
     alignb 64
 
Index: /trunk/src/VBox/VMM/include/IEMInternal.h
===================================================================
--- /trunk/src/VBox/VMM/include/IEMInternal.h	(revision 66580)
+++ /trunk/src/VBox/VMM/include/IEMInternal.h	(revision 66581)
@@ -502,7 +502,7 @@
 
 #else
-    /** The size of what has currently been fetched into abOpcodes. */
+    /** The size of what has currently been fetched into abOpcode. */
     uint8_t                 cbOpcode;                                                                       /*       0x08 */
-    /** The current offset into abOpcodes. */
+    /** The current offset into abOpcode. */
     uint8_t                 offOpcode;                                                                      /*       0x09 */
 
@@ -951,4 +951,18 @@
 AssertCompileSize(IEMTASKSWITCH, 4);
 
+/**
+ * Possible CrX load (write) sources.
+ */
+typedef enum IEMACCESSCRX
+{
+    /** CrX access caused by 'mov crX' instruction. */
+    IEMACCESSCRX_MOV_CRX,
+    /** CrX (CR0) write caused by 'lmsw' instruction. */
+    IEMACCESSCRX_LMSW,
+    /** CrX (CR0) write caused by 'clts' instruction. */
+    IEMACCESSCRX_CLTS,
+    /** CrX (CR0) read caused by 'smsw' instruction. */
+    IEMACCESSCRX_SMSW
+} IEMACCESSCRX;
 
 /**
Index: /trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp	(revision 66580)
+++ /trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp	(revision 66581)
@@ -124,4 +124,7 @@
 #define IEMOP_HLP_DONE_DECODING()                           do { } while (0)
 #define IEMOP_HLP_DONE_VEX_DECODING()                       do { } while (0)
+
+#define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
+#define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2)                 do { } while (0)
 
 #define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType)               do { } while (0)
Index: /trunk/src/VBox/VMM/testcase/tstVMStruct.h
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstVMStruct.h	(revision 66580)
+++ /trunk/src/VBox/VMM/testcase/tstVMStruct.h	(revision 66581)
@@ -137,4 +137,6 @@
     GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.HostState);
     GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.fGif);
+    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.cPauseFilter);
+    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.cPauseFilterThreshold);
     GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvMsrBitmapR0);
     GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvMsrBitmapR3);
