Index: /trunk/Config.kmk
===================================================================
--- /trunk/Config.kmk	(revision 65903)
+++ /trunk/Config.kmk	(revision 65904)
@@ -397,4 +397,6 @@
 # Enables the third step using IEM (the interpreter).
 VBOX_WITH_3RD_IEM_STEP = 1
+# Enables nested hardware virtualization support (mainly for IEM)
+VBOX_WITH_NESTED_HWVIRT = 1
 ## @}
 
Index: /trunk/include/VBox/vmm/cpum.h
===================================================================
--- /trunk/include/VBox/vmm/cpum.h	(revision 65903)
+++ /trunk/include/VBox/vmm/cpum.h	(revision 65904)
@@ -1025,6 +1025,9 @@
     uint32_t        fLeakyFxSR : 1;
 
+    /** AMD64: Supports AMD SVM. */
+    uint32_t        fSvm : 1;
+
     /** Alignment padding / reserved for future use. */
-    uint32_t        fPadding : 27;
+    uint32_t        fPadding : 26;
     uint32_t        auPadding[3];
 } CPUMFEATURES;
@@ -1254,4 +1257,83 @@
 }
 
+/**
+ * Checks if the guest has the specified ctrl/instruction
+ * intercept active.
+ *
+ * @returns @c true if in intercept is set, @c false otherwise.
+ * @param   pCtx          Pointer to the context. 
+ * @param   Intercept     The SVM control/instruction intercept,
+ *                        see SVM_CTRL_INTERCEPT_*.
+ */
+DECLINLINE(bool) CPUMIsGuestSvmCtrlInterceptSet(PCPUMCTX pCtx, uint64_t fIntercept)
+{
+    return RT_BOOL(pCtx->hwvirt.svm.u64InterceptCtrl & fIntercept);
+}
+
+/**
+ * Checks if the guest has the specified CR read intercept
+ * active.
+ *
+ * @returns @c true if in intercept is set, @c false otherwise.
+ * @param   pCtx          Pointer to the context. 
+ * @param   uCr           The CR register number (0 to 15).
+ */
+DECLINLINE(bool) CPUMIsGuestSvmReadCRxInterceptSet(PCPUMCTX pCtx, uint8_t uCr)
+{
+    return RT_BOOL(pCtx->hwvirt.svm.u16InterceptRdCRx & (1 << uCr));
+}
+
+/**
+ * Checks if the guest has the specified CR write intercept
+ * active.
+ *
+ * @returns @c true if in intercept is set, @c false otherwise.
+ * @param   pCtx          Pointer to the context. 
+ * @param   uCr           The CR register number (0 to 15).
+ */
+DECLINLINE(bool) CPUMIsGuestSvmWriteCRxInterceptSet(PCPUMCTX pCtx, uint8_t uCr)
+{
+    return RT_BOOL(pCtx->hwvirt.svm.u16InterceptWrCRx & (1 << uCr)); 
+}
+
+/**
+ * Checks if the guest has the specified DR read intercept
+ * active.
+ *
+ * @returns @c true if in intercept is set, @c false otherwise.
+ * @param   pCtx    Pointer to the context. 
+ * @param   uDr     The DR register number (0 to 15).
+ */
+DECLINLINE(bool) CPUMIsGuestSvmReadDRxInterceptSet(PCPUMCTX pCtx, uint8_t uDr)
+{
+    return RT_BOOL(pCtx->hwvirt.svm.u16InterceptRdDRx & (1 << uDr)); 
+}
+
+/**
+ * Checks if the guest has the specified DR write intercept
+ * active.
+ *
+ * @returns @c true if in intercept is set, @c false otherwise.
+ * @param   pCtx    Pointer to the context. 
+ * @param   uDr     The DR register number (0 to 15).
+ */
+DECLINLINE(bool) CPUMIsGuestSvmWriteDRxInterceptSet(PCPUMCTX pCtx, uint8_t uDr)
+{
+    return RT_BOOL(pCtx->hwvirt.svm.u16InterceptWrDRx & (1 << uDr)); 
+}
+
+/**
+ * Checks if the guest has the specified exception
+ * intercept active.
+ *
+ * @returns true if in intercept is active, false otherwise.
+ * @param   pCtx        Pointer to the context. 
+ * @param   enmXcpt     The exception.
+ */
+DECLINLINE(bool) CPUMIsGuestSvmXcptInterceptSet(PCPUMCTX pCtx, X86XCPT enmXcpt)
+{
+    return RT_BOOL(pCtx->hwvirt.svm.u32InterceptXcpt & enmXcpt); 
+}
+
 #endif /* VBOX_WITHOUT_UNNAMED_UNIONS */
 
Index: /trunk/include/VBox/vmm/cpum.mac
===================================================================
--- /trunk/include/VBox/vmm/cpum.mac	(revision 65903)
+++ /trunk/include/VBox/vmm/cpum.mac	(revision 65904)
@@ -253,4 +253,13 @@
     .pXStateRC      RTRCPTR_RES 1
     .aoffXState         resw    64
+    alignb 8
+    .hwvirt.svm.uMsrHSavePa         resq    1
+    .hwvirt.svm.u64InterceptCtrl    resq    1
+    .hwvirt.svm.u32InterceptXcpt    resd    1
+    .hwvirt.svm.u16InterceptRdCRx   resw    1
+    .hwvirt.svm.u16InterceptWrCRx   resw    1
+    .hwvirt.svm.u16InterceptRdDRx   resw    1
+    .hwvirt.svm.u16InterceptWrDRx   resw    1
+    .hwvirt.svm.fGif                resb    1
     alignb 64
 endstruc
Index: /trunk/include/VBox/vmm/cpumctx.h
===================================================================
--- /trunk/include/VBox/vmm/cpumctx.h	(revision 65903)
+++ /trunk/include/VBox/vmm/cpumctx.h	(revision 65904)
@@ -431,6 +431,44 @@
     uint16_t                    aoffXState[64];
 
-    /** Size padding. */
-    uint32_t        au32SizePadding[HC_ARCH_BITS == 32 ? 13 : 11];
+    /** 724 - Size padding. */
+    uint32_t                    u32Padding;
+
+    /** 728 - Hardware virtualization state.   */
+    struct
+    {
+        union   /* no tag! */
+        {
+            struct
+            {
+                /** 728 - MSR holding physical address of the Guest's 'host-state'. */
+                uint64_t            uMsrHSavePa;
+
+                /** @name Cache of the nested-guest VMCB controls.
+                 * @{ */
+                /** 736 - Control intercepts. */
+                uint64_t            u64InterceptCtrl;
+                /** 744 - Exception intercepts. */
+                uint32_t            u32InterceptXcpt;
+                /** 748 - CR0-CR15 read intercepts. */
+                uint16_t            u16InterceptRdCRx;
+                /** 750 - CR0-CR15 write intercepts. */
+                uint16_t            u16InterceptWrCRx;
+                /** 752 - DR0-DR15 read intercepts. */
+                uint16_t            u16InterceptRdDRx;
+                /** 754 - DR0-DR15 write intercepts. */
+                uint16_t            u16InterceptWrDRx;
+                /** @} */
+
+                /** 756 - Global interrupt flag. */
+                uint8_t            fGif;
+                /** 757 - Padding. */
+                uint8_t            abPadding[11];
+            } svm;
+            struct
+            {
+            } vmx;
+        } CPUM_UNION_NM(s); 
+    } hwvirt;
+    /** @} */
 } CPUMCTX;
 #pragma pack()
@@ -485,4 +523,14 @@
 AssertCompileMemberOffset(CPUMCTX,                  pXStateRC, HC_ARCH_BITS == 64 ? 592 : 584);
 AssertCompileMemberOffset(CPUMCTX,                 aoffXState, HC_ARCH_BITS == 64 ? 596 : 588);
+AssertCompileMemberOffset(CPUMCTX, hwvirt, 728);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.uMsrHSavePa,       728);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u64InterceptCtrl,  736);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u32InterceptXcpt,  744);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u16InterceptRdCRx, 748);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u16InterceptWrCRx, 750);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u16InterceptRdDRx, 752);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u16InterceptWrDRx, 754);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.fGif,              756);
+
 AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_NM(g.) aGRegs);
 AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.)  r0);
Index: /trunk/include/VBox/vmm/hm.h
===================================================================
--- /trunk/include/VBox/vmm/hm.h	(revision 65903)
+++ /trunk/include/VBox/vmm/hm.h	(revision 65904)
@@ -147,4 +147,7 @@
 VMM_INT_DECL(void)              HMHypercallsEnable(PVMCPU pVCpu);
 VMM_INT_DECL(void)              HMHypercallsDisable(PVMCPU pVCpu);
+
+VMM_INT_DECL(void)              HMNstGstSvmVmExit(PVMCPU pVCpu, uint64_t uExitCode);
+VMM_INT_DECL(void)              HMNstGstVmxVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason);
 
 #ifndef IN_RC
Index: /trunk/include/VBox/vmm/hm_svm.h
===================================================================
--- /trunk/include/VBox/vmm/hm_svm.h	(revision 65903)
+++ /trunk/include/VBox/vmm/hm_svm.h	(revision 65904)
@@ -301,105 +301,135 @@
 /** @} */
 
-/** @name SVMVMCB.ctrl.u32InterceptCtrl1
- * @{
- */
-/** 0 Intercept INTR (physical maskable interrupt). */
-#define SVM_CTRL1_INTERCEPT_INTR              RT_BIT(0)
-/** 1 Intercept NMI. */
-#define SVM_CTRL1_INTERCEPT_NMI               RT_BIT(1)
-/** 2 Intercept SMI. */
-#define SVM_CTRL1_INTERCEPT_SMI               RT_BIT(2)
-/** 3 Intercept INIT. */
-#define SVM_CTRL1_INTERCEPT_INIT              RT_BIT(3)
-/** 4 Intercept VINTR (virtual maskable interrupt). */
-#define SVM_CTRL1_INTERCEPT_VINTR             RT_BIT(4)
-/** 5 Intercept CR0 writes that change bits other than CR0.TS or CR0.MP */
-#define SVM_CTRL1_INTERCEPT_CR0               RT_BIT(5)
-/** 6 Intercept reads of IDTR. */
-#define SVM_CTRL1_INTERCEPT_IDTR_READS        RT_BIT(6)
-/** 7 Intercept reads of GDTR. */
-#define SVM_CTRL1_INTERCEPT_GDTR_READS        RT_BIT(7)
-/** 8 Intercept reads of LDTR. */
-#define SVM_CTRL1_INTERCEPT_LDTR_READS        RT_BIT(8)
-/** 9 Intercept reads of TR. */
-#define SVM_CTRL1_INTERCEPT_TR_READS          RT_BIT(9)
-/** 10 Intercept writes of IDTR. */
-#define SVM_CTRL1_INTERCEPT_IDTR_WRITES       RT_BIT(10)
-/** 11 Intercept writes of GDTR. */
-#define SVM_CTRL1_INTERCEPT_GDTR_WRITES       RT_BIT(11)
-/** 12 Intercept writes of LDTR. */
-#define SVM_CTRL1_INTERCEPT_LDTR_WRITES       RT_BIT(12)
-/** 13 Intercept writes of TR. */
-#define SVM_CTRL1_INTERCEPT_TR_WRITES         RT_BIT(13)
-/** 14 Intercept RDTSC instruction. */
-#define SVM_CTRL1_INTERCEPT_RDTSC             RT_BIT(14)
-/** 15 Intercept RDPMC instruction. */
-#define SVM_CTRL1_INTERCEPT_RDPMC             RT_BIT(15)
-/** 16 Intercept PUSHF instruction. */
-#define SVM_CTRL1_INTERCEPT_PUSHF             RT_BIT(16)
-/** 17 Intercept POPF instruction. */
-#define SVM_CTRL1_INTERCEPT_POPF              RT_BIT(17)
-/** 18 Intercept CPUID instruction. */
-#define SVM_CTRL1_INTERCEPT_CPUID             RT_BIT(18)
-/** 19 Intercept RSM instruction. */
-#define SVM_CTRL1_INTERCEPT_RSM               RT_BIT(19)
-/** 20 Intercept IRET instruction. */
-#define SVM_CTRL1_INTERCEPT_IRET              RT_BIT(20)
-/** 21 Intercept INTn instruction. */
-#define SVM_CTRL1_INTERCEPT_INTN              RT_BIT(21)
-/** 22 Intercept INVD instruction. */
-#define SVM_CTRL1_INTERCEPT_INVD              RT_BIT(22)
-/** 23 Intercept PAUSE instruction. */
-#define SVM_CTRL1_INTERCEPT_PAUSE             RT_BIT(23)
-/** 24 Intercept HLT instruction. */
-#define SVM_CTRL1_INTERCEPT_HLT               RT_BIT(24)
-/** 25 Intercept INVLPG instruction. */
-#define SVM_CTRL1_INTERCEPT_INVLPG            RT_BIT(25)
-/** 26 Intercept INVLPGA instruction. */
-#define SVM_CTRL1_INTERCEPT_INVLPGA           RT_BIT(26)
-/** 27 IOIO_PROT Intercept IN/OUT accesses to selected ports. */
-#define SVM_CTRL1_INTERCEPT_INOUT_BITMAP      RT_BIT(27)
-/** 28 MSR_PROT Intercept RDMSR or WRMSR accesses to selected MSRs. */
-#define SVM_CTRL1_INTERCEPT_MSR_SHADOW        RT_BIT(28)
-/** 29 Intercept task switches. */
-#define SVM_CTRL1_INTERCEPT_TASK_SWITCH       RT_BIT(29)
-/** 30 FERR_FREEZE: intercept processor "freezing" during legacy FERR handling. */
-#define SVM_CTRL1_INTERCEPT_FERR_FREEZE       RT_BIT(30)
-/** 31 Intercept shutdown events. */
-#define SVM_CTRL1_INTERCEPT_SHUTDOWN          RT_BIT(31)
-/** @} */
-
-
-/** @name SVMVMCB.ctrl.u32InterceptCtrl2
- * @{
- */
-/** 0 Intercept VMRUN instruction. */
-#define SVM_CTRL2_INTERCEPT_VMRUN             RT_BIT(0)
-/** 1 Intercept VMMCALL instruction. */
-#define SVM_CTRL2_INTERCEPT_VMMCALL           RT_BIT(1)
-/** 2 Intercept VMLOAD instruction. */
-#define SVM_CTRL2_INTERCEPT_VMLOAD            RT_BIT(2)
-/** 3 Intercept VMSAVE instruction. */
-#define SVM_CTRL2_INTERCEPT_VMSAVE            RT_BIT(3)
-/** 4 Intercept STGI instruction. */
-#define SVM_CTRL2_INTERCEPT_STGI              RT_BIT(4)
-/** 5 Intercept CLGI instruction. */
-#define SVM_CTRL2_INTERCEPT_CLGI              RT_BIT(5)
-/** 6 Intercept SKINIT instruction. */
-#define SVM_CTRL2_INTERCEPT_SKINIT            RT_BIT(6)
-/** 7 Intercept RDTSCP instruction. */
-#define SVM_CTRL2_INTERCEPT_RDTSCP            RT_BIT(7)
-/** 8 Intercept ICEBP instruction. */
-#define SVM_CTRL2_INTERCEPT_ICEBP             RT_BIT(8)
-/** 9 Intercept WBINVD instruction. */
-#define SVM_CTRL2_INTERCEPT_WBINVD            RT_BIT(9)
-/** 10 Intercept MONITOR instruction. */
-#define SVM_CTRL2_INTERCEPT_MONITOR           RT_BIT(10)
-/** 11 Intercept MWAIT instruction unconditionally. */
-#define SVM_CTRL2_INTERCEPT_MWAIT             RT_BIT(11)
-/** 12 Intercept MWAIT instruction when armed. */
-#define SVM_CTRL2_INTERCEPT_MWAIT_ARMED       RT_BIT(12)
-/** 13 Intercept XSETBV instruction. */
-#define SVM_CTRL2_INTERCEPT_XSETBV            RT_BIT(13)
+
+/** @name SVMVMCB.ctrl.u64InterceptCtrl
+ * @{
+ */
+/** Intercept INTR (physical maskable interrupt). */
+#define SVM_CTRL_INTERCEPT_INTR               RT_BIT_64(0)
+/** Intercept NMI. */
+#define SVM_CTRL_INTERCEPT_NMI                RT_BIT_64(1)
+/** Intercept SMI. */
+#define SVM_CTRL_INTERCEPT_SMI                RT_BIT_64(2)
+/** Intercept INIT. */ 
+#define SVM_CTRL_INTERCEPT_INIT               RT_BIT_64(3)
+/** Intercept VINTR (virtual maskable interrupt). */
+#define SVM_CTRL_INTERCEPT_VINTR              RT_BIT_64(4)
+/** Intercept CR0 writes that change bits other than CR0.TS or CR0.MP */
+#define SVM_CTRL_INTERCEPT_CR0                RT_BIT_64(5)
+/** Intercept reads of IDTR. */
+#define SVM_CTRL_INTERCEPT_IDTR_READS         RT_BIT_64(6)
+/** Intercept reads of GDTR. */
+#define SVM_CTRL_INTERCEPT_GDTR_READS         RT_BIT_64(7)
+/** Intercept reads of LDTR. */
+#define SVM_CTRL_INTERCEPT_LDTR_READS         RT_BIT_64(8)
+/** Intercept reads of TR. */
+#define SVM_CTRL_INTERCEPT_TR_READS           RT_BIT_64(9)
+/** Intercept writes of IDTR. */           
+#define SVM_CTRL_INTERCEPT_IDTR_WRITES        RT_BIT_64(10)
+/** Intercept writes of GDTR. */           
+#define SVM_CTRL_INTERCEPT_GDTR_WRITES        RT_BIT_64(11)
+/** Intercept writes of LDTR. */           
+#define SVM_CTRL_INTERCEPT_LDTR_WRITES        RT_BIT_64(12)
+/** Intercept writes of TR. */             
+#define SVM_CTRL_INTERCEPT_TR_WRITES          RT_BIT_64(13)
+/** Intercept RDTSC instruction. */        
+#define SVM_CTRL_INTERCEPT_RDTSC              RT_BIT_64(14)
+/** Intercept RDPMC instruction. */        
+#define SVM_CTRL_INTERCEPT_RDPMC              RT_BIT_64(15)
+/** Intercept PUSHF instruction. */        
+#define SVM_CTRL_INTERCEPT_PUSHF              RT_BIT_64(16)
+/** Intercept POPF instruction. */         
+#define SVM_CTRL_INTERCEPT_POPF               RT_BIT_64(17)
+/** Intercept CPUID instruction. */        
+#define SVM_CTRL_INTERCEPT_CPUID              RT_BIT_64(18)
+/** Intercept RSM instruction. */          
+#define SVM_CTRL_INTERCEPT_RSM                RT_BIT_64(19)
+/** Intercept IRET instruction. */         
+#define SVM_CTRL_INTERCEPT_IRET               RT_BIT_64(20)
+/** Intercept INTn instruction. */         
+#define SVM_CTRL_INTERCEPT_INTN               RT_BIT_64(21)
+/** Intercept INVD instruction. */         
+#define SVM_CTRL_INTERCEPT_INVD               RT_BIT_64(22)
+/** Intercept PAUSE instruction. */        
+#define SVM_CTRL_INTERCEPT_PAUSE              RT_BIT_64(23)
+/** Intercept HLT instruction. */          
+#define SVM_CTRL_INTERCEPT_HLT                RT_BIT_64(24)
+/** Intercept INVLPG instruction. */       
+#define SVM_CTRL_INTERCEPT_INVLPG             RT_BIT_64(25)
+/** Intercept INVLPGA instruction. */      
+#define SVM_CTRL_INTERCEPT_INVLPGA            RT_BIT_64(26)
+/** IOIO_PROT Intercept IN/OUT accesses to selected ports. */
+#define SVM_CTRL_INTERCEPT_INOUT_BITMAP       RT_BIT_64(27)
+/** MSR_PROT Intercept RDMSR or WRMSR accesses to selected MSRs. */
+#define SVM_CTRL_INTERCEPT_MSR_SHADOW         RT_BIT_64(28)
+/** Intercept task switches. */
+#define SVM_CTRL_INTERCEPT_TASK_SWITCH        RT_BIT_64(29)
+/** FERR_FREEZE: intercept processor "freezing" during legacy FERR handling. */
+#define SVM_CTRL_INTERCEPT_FERR_FREEZE        RT_BIT_64(30)
+/** Intercept shutdown events. */                
+#define SVM_CTRL_INTERCEPT_SHUTDOWN           RT_BIT_64(31)
+/** Intercept VMRUN instruction. */              
+#define SVM_CTRL_INTERCEPT_VMRUN              RT_BIT_64(32 + 0)
+/** Intercept VMMCALL instruction. */            
+#define SVM_CTRL_INTERCEPT_VMMCALL            RT_BIT_64(32 + 1)
+/** Intercept VMLOAD instruction. */                  
+#define SVM_CTRL_INTERCEPT_VMLOAD             RT_BIT_64(32 + 2)
+/** Intercept VMSAVE instruction. */             
+#define SVM_CTRL_INTERCEPT_VMSAVE             RT_BIT_64(32 + 3)
+/** Intercept STGI instruction. */               
+#define SVM_CTRL_INTERCEPT_STGI               RT_BIT_64(32 + 4)
+/** Intercept CLGI instruction. */               
+#define SVM_CTRL_INTERCEPT_CLGI               RT_BIT_64(32 + 5)
+/** Intercept SKINIT instruction. */             
+#define SVM_CTRL_INTERCEPT_SKINIT             RT_BIT_64(32 + 6)
+/** Intercept RDTSCP instruction. */             
+#define SVM_CTRL_INTERCEPT_RDTSCP             RT_BIT_64(32 + 7)
+/** Intercept ICEBP instruction. */              
+#define SVM_CTRL_INTERCEPT_ICEBP              RT_BIT_64(32 + 8)
+/** Intercept WBINVD instruction. */             
+#define SVM_CTRL_INTERCEPT_WBINVD             RT_BIT_64(32 + 9)
+/** Intercept MONITOR instruction. */            
+#define SVM_CTRL_INTERCEPT_MONITOR            RT_BIT_64(32 + 10)
+/** Intercept MWAIT instruction unconditionally. */   
+#define SVM_CTRL_INTERCEPT_MWAIT              RT_BIT_64(32 + 11)
+/** Intercept MWAIT instruction when armed. */   
+#define SVM_CTRL_INTERCEPT_MWAIT_ARMED        RT_BIT_64(32 + 12)
+/** Intercept XSETBV instruction. */
+#define SVM_CTRL_INTERCEPT_XSETBV             RT_BIT_64(32 + 13)
+/* Bit 14 - Reserved, SBZ. */
+/** Intercept EFER writes after guest instruction finishes. */
+#define SVM_CTRL_INTERCEPT_EFER_WRITES_TRAP   RT_BIT_64(32 + 15)
+/** Intercept CR0 writes after guest instruction finishes. */
+#define SVM_CTRL_INTERCEPT_CR0_WRITES_TRAP    RT_BIT_64(32 + 16)
+/** Intercept CR0 writes after guest instruction finishes. */
+#define SVM_CTRL_INTERCEPT_CR1_WRITES_TRAP    RT_BIT_64(32 + 17)
+/** Intercept CR0 writes after guest instruction finishes. */
+#define SVM_CTRL_INTERCEPT_CR2_WRITES_TRAP    RT_BIT_64(32 + 18)
+/** Intercept CR0 writes after guest instruction finishes. */
+#define SVM_CTRL_INTERCEPT_CR3_WRITES_TRAP    RT_BIT_64(32 + 19)
+/** Intercept CR0 writes after guest instruction finishes. */
+#define SVM_CTRL_INTERCEPT_CR4_WRITES_TRAP    RT_BIT_64(32 + 20)
+/** Intercept CR0 writes after guest instruction finishes. */
+#define SVM_CTRL_INTERCEPT_CR5_WRITES_TRAP    RT_BIT_64(32 + 21)
+/** Intercept CR0 writes after guest instruction finishes. */
+#define SVM_CTRL_INTERCEPT_CR6_WRITES_TRAP    RT_BIT_64(32 + 22)
+/** Intercept CR0 writes after guest instruction finishes. */
+#define SVM_CTRL_INTERCEPT_CR7_WRITES_TRAP    RT_BIT_64(32 + 23)
+/** Intercept CR0 writes after guest instruction finishes. */
+#define SVM_CTRL_INTERCEPT_CR8_WRITES_TRAP    RT_BIT_64(32 + 24)
+/** Intercept CR0 writes after guest instruction finishes. */
+#define SVM_CTRL_INTERCEPT_CR9_WRITES_TRAP    RT_BIT_64(32 + 25)
+/** Intercept CR0 writes after guest instruction finishes. */
+#define SVM_CTRL_INTERCEPT_CR10_WRITES_TRAP   RT_BIT_64(32 + 26)
+/** Intercept CR0 writes after guest instruction finishes. */
+#define SVM_CTRL_INTERCEPT_CR11_WRITES_TRAP   RT_BIT_64(32 + 27)
+/** Intercept CR0 writes after guest instruction finishes. */
+#define SVM_CTRL_INTERCEPT_CR12_WRITES_TRAP   RT_BIT_64(32 + 28)
+/** Intercept CR0 writes after guest instruction finishes. */
+#define SVM_CTRL_INTERCEPT_CR13_WRITES_TRAP   RT_BIT_64(32 + 29)
+/** Intercept CR0 writes after guest instruction finishes. */
+#define SVM_CTRL_INTERCEPT_CR14_WRITES_TRAP   RT_BIT_64(32 + 30)
+/** Intercept CR0 writes after guest instruction finishes. */
+#define SVM_CTRL_INTERCEPT_CR15_WRITES_TRAP   RT_BIT_64(32 + 31)
 /** @} */
 
@@ -603,4 +633,5 @@
  * SVM VM Control Block. (VMCB)
  */
+#pragma pack(1)
 typedef struct SVMVMCB
 {
@@ -608,18 +639,16 @@
     struct
     {
-        /** Offset 0x00 - Intercept reads of CR0-15. */
+        /** Offset 0x00 - Intercept reads of CR0-CR15. */
         uint16_t    u16InterceptRdCRx;
-        /** Offset 0x02 - Intercept writes to CR0-15. */
+        /** Offset 0x02 - Intercept writes to CR0-CR15. */
         uint16_t    u16InterceptWrCRx;
-        /** Offset 0x04 - Intercept reads of DR0-15. */
+        /** Offset 0x04 - Intercept reads of DR0-DR15. */
         uint16_t    u16InterceptRdDRx;
-        /** Offset 0x06 - Intercept writes to DR0-15. */
+        /** Offset 0x06 - Intercept writes to DR0-DR15. */
         uint16_t    u16InterceptWrDRx;
         /** Offset 0x08 - Intercept exception vectors 0-31. */
         uint32_t    u32InterceptException;
-        /** Offset 0x0C - Intercept control field 1. */
-        uint32_t    u32InterceptCtrl1;
-        /** Offset 0x10 - Intercept control field 2. */
-        uint32_t    u32InterceptCtrl2;
+        /** Offset 0x0C - Intercept control. */
+        uint64_t    u64InterceptCtrl;
         /** Offset 0x14-0x3F - Reserved. */
         uint8_t     u8Reserved[0x3c - 0x14];
@@ -773,4 +802,5 @@
     uint8_t     u8Reserved10[0x1000-0x698];
 } SVMVMCB;
+#pragma pack()
 /** Pointer to the SVMVMCB structure. */
 typedef SVMVMCB *PSVMVMCB;
@@ -781,6 +811,5 @@
 AssertCompileMemberOffset(SVMVMCB, ctrl.u16InterceptWrDRx, 0x06);
 AssertCompileMemberOffset(SVMVMCB, ctrl.u32InterceptException, 0x08);
-AssertCompileMemberOffset(SVMVMCB, ctrl.u32InterceptCtrl1, 0x0C);
-AssertCompileMemberOffset(SVMVMCB, ctrl.u32InterceptCtrl2, 0x10);
+AssertCompileMemberOffset(SVMVMCB, ctrl.u64InterceptCtrl, 0x0C);
 AssertCompileMemberOffset(SVMVMCB, ctrl.u8Reserved, 0x14);
 AssertCompileMemberOffset(SVMVMCB, ctrl.u16PauseFilterThreshold, 0x3c);
Index: /trunk/include/VBox/vmm/iem.h
===================================================================
--- /trunk/include/VBox/vmm/iem.h	(revision 65903)
+++ /trunk/include/VBox/vmm/iem.h	(revision 65904)
@@ -132,4 +132,8 @@
 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue);
 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr);
+#ifdef VBOX_WITH_NESTED_HWVIRT
+VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr);
+VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr);
+#endif
 /** @}  */
 
Index: /trunk/include/iprt/x86.h
===================================================================
--- /trunk/include/iprt/x86.h	(revision 65903)
+++ /trunk/include/iprt/x86.h	(revision 65904)
@@ -1459,5 +1459,19 @@
  * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors" */
 #define MSR_K8_INT_PENDING                  UINT32_C(0xc0010055)
+
+/** SVM Control. */
 #define MSR_K8_VM_CR                        UINT32_C(0xc0010114)
+/** Disables HDT (Hardware Debug Tool) and certain internal debug
+ *  features. */
+#define MSR_K8_VM_CR_DPD                    RT_BIT_32(0)
+/** If set, non-intercepted INIT signals are converted to \#SX
+ *  exceptions. */
+#define MSR_K8_VM_CR_R_INIT                 RT_BIT_32(1)
+/** Disables A20 masking.  */
+#define MSR_K8_VM_CR_DIS_A20M               RT_BIT_32(2)
+/** Lock bit for this MSR controlling bits 3 (LOCK) and 4 (SVMDIS). */
+#define MSR_K8_VM_CR_LOCK                   RT_BIT_32(3)
+/** SVM disable. When set, writes to EFER.SVME are treated as MBZ. When
+ *  clear, EFER.SVME can be written normally. */
 #define MSR_K8_VM_CR_SVM_DISABLE            RT_BIT_32(4)
 
Index: /trunk/src/VBox/VMM/Makefile.kmk
===================================================================
--- /trunk/src/VBox/VMM/Makefile.kmk	(revision 65903)
+++ /trunk/src/VBox/VMM/Makefile.kmk	(revision 65904)
@@ -48,4 +48,7 @@
 ifdef VBOX_WITH_3RD_IEM_STEP
  VMM_COMMON_DEFS += VBOX_WITH_3RD_IEM_STEP
+endif
+ifdef VBOX_WITH_NESTED_HWVIRT
+ VMM_COMMON_DEFS += VBOX_WITH_NESTED_HWVIRT
 endif
 #ifdef VBOX_WITH_IEM
Index: /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp	(revision 65903)
+++ /trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp	(revision 65904)
@@ -1453,4 +1453,6 @@
     if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
         fMask |= MSR_K6_EFER_FFXSR;
+    if (pVM->cpum.s.GuestFeatures.fSvm)
+        fMask |= MSR_K6_EFER_SVME;
 
     /* #GP(0) If anything outside the allowed bits is set. */
@@ -1471,6 +1473,11 @@
 
     /* There are a few more: e.g. MSR_K6_EFER_LMSLE */
-    AssertMsg(!(uValue & ~(MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA /* ignored anyway */ | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)),
-              ("Unexpected value %RX64\n", uValue));
+    AssertMsg(!(uValue & ~(  MSR_K6_EFER_NXE
+                           | MSR_K6_EFER_LME
+                           | MSR_K6_EFER_LMA /* ignored anyway */
+                           | MSR_K6_EFER_SCE
+                           | MSR_K6_EFER_FFXSR
+                           | MSR_K6_EFER_SVME)),
+              ("Unexpected value %#RX64\n", uValue));
     pVCpu->cpum.s.Guest.msrEFER = (uOldEfer & ~fMask) | (uValue & fMask);
 
@@ -3743,7 +3750,10 @@
 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_AmdK8VmCr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
 {
-    RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
-    /** @todo AMD SVM. */
-    *puValue = 0;
+    RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
+    PVM pVM = pVCpu->CTX_SUFF(pVM);
+    if (pVM->cpum.s.GuestFeatures.fSvm)
+        *puValue = MSR_K8_VM_CR_LOCK;
+    else
+        *puValue = 0;
     return VINF_SUCCESS;
 }
@@ -3753,7 +3763,14 @@
 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrWr_AmdK8VmCr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue)
 {
-    RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uValue); RT_NOREF_PV(uRawValue);
-    /** @todo AMD SVM. */
-    return VINF_SUCCESS;
+    RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue);
+    PVM pVM = pVCpu->CTX_SUFF(pVM);
+    if (pVM->cpum.s.GuestFeatures.fSvm)
+    {
+        /* Silently ignore writes to LOCK and SVM_DISABLE bit when the LOCK bit is set (see cpumMsrRd_AmdK8VmCr). */
+        if (uValue & (MSR_K8_VM_CR_DPD | MSR_K8_VM_CR_R_INIT | MSR_K8_VM_CR_DIS_A20M))
+            return VERR_CPUM_RAISE_GP_0;
+        return VINF_SUCCESS;
+    }
+    return VERR_CPUM_RAISE_GP_0;
 }
 
@@ -3801,6 +3818,5 @@
 {
     RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
-    /** @todo AMD SVM. */
-    *puValue = 0;
+    *puValue = pVCpu->cpum.s.Guest.hwvirt.svm.uMsrHSavePa;
     return VINF_SUCCESS;
 }
@@ -3810,6 +3826,20 @@
 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrWr_AmdK8VmHSavePa(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue)
 {
-    RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uValue); RT_NOREF_PV(uRawValue);
-    /** @todo AMD SVM. */
+    RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue);
+    if (uValue & UINT64_C(0xfff))
+    {
+        Log(("CPUM: Invalid setting of low 12 bits set writing host-state save area MSR %#x: %#llx\n", idMsr, uValue));
+        return VERR_CPUM_RAISE_GP_0;
+    }
+
+    uint64_t fInvPhysMask = ~(RT_BIT_64(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.cMaxPhysAddrWidth) - 1U);
+    if (fInvPhysMask & uValue)
+    {
+        Log(("CPUM: Invalid physical address bits set writing host-state save area MSR %#x: %#llx (%#llx)\n",
+             idMsr, uValue, uValue & fInvPhysMask));
+        return VERR_CPUM_RAISE_GP_0;
+    }
+
+    pVCpu->cpum.s.Guest.hwvirt.svm.uMsrHSavePa = uValue;
     return VINF_SUCCESS;
 }
Index: /trunk/src/VBox/VMM/VMMAll/HMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HMAll.cpp	(revision 65903)
+++ /trunk/src/VBox/VMM/VMMAll/HMAll.cpp	(revision 65904)
@@ -557,2 +557,27 @@
 }
 
+
+
+/**
+ * SVM nested-guest #VMEXIT handler.
+ * 
+ * @param   pVCpu       The cross context virtual CPU structure. 
+ * @param   uExitCode   The exit reason.
+ */
+VMM_INT_DECL(void) HMNstGstSvmVmExit(PVMCPU pVCpu, uint64_t uExitCode)
+{
+    RT_NOREF2(pVCpu, uExitCode);
+}
+
+
+/**
+ * VMX nested-guest VM-exit handler.
+ *  
+ * @param   pVCpu              The cross context virtual CPU structure. 
+ * @param   uBasicExitReason   The basic exit reason. 
+ */
+VMM_INT_DECL(void) HMNstGstVmxVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason)
+{
+    RT_NOREF2(pVCpu, uBasicExitReason);
+}
+
Index: /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 65903)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 65904)
@@ -102,4 +102,7 @@
 #include <VBox/vmm/em.h>
 #include <VBox/vmm/hm.h>
+#ifdef VBOX_WITH_NESTED_HWVIRT
+# include <VBox/vmm/hm_svm.h>
+#endif
 #include <VBox/vmm/tm.h>
 #include <VBox/vmm/dbgf.h>
@@ -362,4 +365,36 @@
 # define IEM_USE_UNALIGNED_DATA_ACCESS
 #endif
+
+#ifdef VBOX_WITH_NESTED_HWVIRT
+/** 
+ * Check if an SVM control/instruction intercept is set.
+ */ 
+#define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
+
+/** 
+ * Check if an SVM read CRx intercept is set.
+ */ 
+#define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)    (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
+
+/** 
+ * Check if an SVM write CRx intercept is set.
+ */ 
+#define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr)   (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
+
+/** 
+ * Check if an SVM read DRx intercept is set.
+ */ 
+#define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr)    (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
+
+/** 
+ * Check if an SVM write DRx intercept is set.
+ */ 
+#define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr)   (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
+
+/** 
+ * Check if an SVM exception intercept is set.
+ */ 
+#define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_enmXcpt)   (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_enmXcpt)))
+#endif /* VBOX_WITH_NESTED_HWVIRT */
 
 
@@ -14876,4 +14911,42 @@
 }
 
+
+#ifdef VBOX_WITH_NESTED_HWVIRT
+/**
+ * Interface for HM and EM to emulate the STGI instruction.
+ *  
+ * @returns Strict VBox status code. 
+ * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
+ * @param   cbInstr     The instruction length in bytes. 
+ * @thread  EMT(pVCpu) 
+ */
+VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
+{
+    IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
+
+    iemInitExec(pVCpu, false /*fBypassHandlers*/);
+    VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
+    return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
+}
+
+
+/**
+ * Interface for HM and EM to emulate the STGI instruction.
+ *  
+ * @returns Strict VBox status code. 
+ * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
+ * @param   cbInstr     The instruction length in bytes. 
+ * @thread  EMT(pVCpu) 
+ */
+VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
+{
+    IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
+
+    iemInitExec(pVCpu, false /*fBypassHandlers*/);
+    VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
+    return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
+}
+#endif /* VBOX_WITH_NESTED_HWVIRT */
+
 #ifdef IN_RING3
 
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 65903)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 65904)
@@ -5875,4 +5875,87 @@
 
 
+#ifdef VBOX_WITH_NESTED_HWVIRT
+/**
+ * Implements 'CLGI'.
+ */
+IEM_CIMPL_DEF_0(iemCImpl_clgi)
+{
+    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
+    if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvm)
+    {
+        Log2(("clgi: Not in CPUID -> #UD\n"));
+        return iemRaiseUndefinedOpcode(pVCpu);
+    }
+    if (!(pCtx->msrEFER & MSR_K6_EFER_SVME))
+    {
+        Log2(("clgi: EFER.SVME not enabled -> #UD\n"));
+        return iemRaiseUndefinedOpcode(pVCpu);
+    }
+    if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
+    {
+        Log2(("clgi: Real or v8086 mode -> #UD\n"));
+        return iemRaiseUndefinedOpcode(pVCpu);
+    }
+    if (pVCpu->iem.s.uCpl != 0)
+    {
+        Log2(("clgi: CPL != 0 -> #GP(0)\n"));
+        return iemRaiseGeneralProtectionFault0(pVCpu);
+    }
+#ifndef IN_RC
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
+    {
+        Log2(("clgi: Guest intercept -> VMexit\n"));
+        HMNstGstSvmVmExit(pVCpu, SVM_EXIT_CLGI);
+        return VINF_EM_RESCHEDULE;
+    }
+#endif
+
+    pCtx->hwvirt.svm.fGif = 0;
+    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Implements 'STGI'.
+ */
+IEM_CIMPL_DEF_0(iemCImpl_stgi)
+{
+    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
+    if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvm)
+    {
+        Log2(("stgi: Not in CPUID -> #UD\n"));
+        return iemRaiseUndefinedOpcode(pVCpu);
+    }
+    if (!(pCtx->msrEFER & MSR_K6_EFER_SVME))
+    {
+        Log2(("stgi: EFER.SVME not enabled -> #UD\n"));
+        return iemRaiseUndefinedOpcode(pVCpu);
+    }
+    if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
+    {
+        Log2(("stgi: Real or v8086 mode -> #UD\n"));
+        return iemRaiseUndefinedOpcode(pVCpu);
+    }
+    if (pVCpu->iem.s.uCpl != 0)
+    {
+        Log2(("stgi: CPL != 0 -> #GP(0)\n"));
+        return iemRaiseGeneralProtectionFault0(pVCpu);
+    }
+#ifndef IN_RC
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI))
+    {
+        Log2(("stgi: Guest intercept -> VMexit\n"));
+        HMNstGstSvmVmExit(pVCpu, SVM_EXIT_STGI);
+        return VINF_EM_RESCHEDULE;
+    }
+#endif
+
+    pCtx->hwvirt.svm.fGif = 1;
+    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+    return VINF_SUCCESS;
+}
+#endif /* VBOX_WITH_NESTED_HWVIRT */
+
 /**
  * Implements 'CLI'.
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h	(revision 65903)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h	(revision 65904)
@@ -451,4 +451,19 @@
 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmsave);
 
+#ifdef VBOX_WITH_NESTED_HWVIRT
+/** Opcode 0x0f 0x01 0xdc. */
+FNIEMOP_DEF(iemOp_Grp7_Amd_stgi)
+{
+    IEMOP_MNEMONIC(stgi, "stgi");
+    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stgi);
+}
+
+/** Opcode 0x0f 0x01 0xdd. */
+FNIEMOP_DEF(iemOp_Grp7_Amd_clgi)
+{
+    IEMOP_MNEMONIC(clgi, "clgi");
+    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clgi);
+}
+#else
 /** Opcode 0x0f 0x01 0xdc. */
 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_stgi);
@@ -456,4 +471,5 @@
 /** Opcode 0x0f 0x01 0xdd. */
 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_clgi);
+#endif /* VBOX_WITH_NESTED_HWVIRT */
 
 /** Opcode 0x0f 0x01 0xde. */
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 65903)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 65904)
@@ -308,4 +308,8 @@
 static FNSVMEXITHANDLER hmR0SvmExitXcptAC;
 static FNSVMEXITHANDLER hmR0SvmExitXcptBP;
+#ifdef VBOX_WITH_NESTED_HWVIRT
+static FNSVMEXITHANDLER hmR0SvmExitClgi;
+static FNSVMEXITHANDLER hmR0SvmExitStgi;
+#endif
 /** @} */
 
@@ -722,28 +726,27 @@
 
         /* Set up unconditional intercepts and conditions. */
-        pVmcb->ctrl.u32InterceptCtrl1 =   SVM_CTRL1_INTERCEPT_INTR          /* External interrupt causes a #VMEXIT. */
-                                        | SVM_CTRL1_INTERCEPT_NMI           /* Non-maskable interrupts causes a #VMEXIT. */
-                                        | SVM_CTRL1_INTERCEPT_INIT          /* INIT signal causes a #VMEXIT. */
-                                        | SVM_CTRL1_INTERCEPT_RDPMC         /* RDPMC causes a #VMEXIT. */
-                                        | SVM_CTRL1_INTERCEPT_CPUID         /* CPUID causes a #VMEXIT. */
-                                        | SVM_CTRL1_INTERCEPT_RSM           /* RSM causes a #VMEXIT. */
-                                        | SVM_CTRL1_INTERCEPT_HLT           /* HLT causes a #VMEXIT. */
-                                        | SVM_CTRL1_INTERCEPT_INOUT_BITMAP  /* Use the IOPM to cause IOIO #VMEXITs. */
-                                        | SVM_CTRL1_INTERCEPT_MSR_SHADOW    /* MSR access not covered by MSRPM causes a #VMEXIT.*/
-                                        | SVM_CTRL1_INTERCEPT_INVLPGA       /* INVLPGA causes a #VMEXIT. */
-                                        | SVM_CTRL1_INTERCEPT_SHUTDOWN      /* Shutdown events causes a #VMEXIT. */
-                                        | SVM_CTRL1_INTERCEPT_FERR_FREEZE;  /* Intercept "freezing" during legacy FPU handling. */
-
-        pVmcb->ctrl.u32InterceptCtrl2 =   SVM_CTRL2_INTERCEPT_VMRUN         /* VMRUN causes a #VMEXIT. */
-                                        | SVM_CTRL2_INTERCEPT_VMMCALL       /* VMMCALL causes a #VMEXIT. */
-                                        | SVM_CTRL2_INTERCEPT_VMLOAD        /* VMLOAD causes a #VMEXIT. */
-                                        | SVM_CTRL2_INTERCEPT_VMSAVE        /* VMSAVE causes a #VMEXIT. */
-                                        | SVM_CTRL2_INTERCEPT_STGI          /* STGI causes a #VMEXIT. */
-                                        | SVM_CTRL2_INTERCEPT_CLGI          /* CLGI causes a #VMEXIT. */
-                                        | SVM_CTRL2_INTERCEPT_SKINIT        /* SKINIT causes a #VMEXIT. */
-                                        | SVM_CTRL2_INTERCEPT_WBINVD        /* WBINVD causes a #VMEXIT. */
-                                        | SVM_CTRL2_INTERCEPT_MONITOR       /* MONITOR causes a #VMEXIT. */
-                                        | SVM_CTRL2_INTERCEPT_MWAIT         /* MWAIT causes a #VMEXIT. */
-                                        | SVM_CTRL2_INTERCEPT_XSETBV;       /* XSETBV causes a #VMEXIT. */
+        pVmcb->ctrl.u64InterceptCtrl = SVM_CTRL_INTERCEPT_INTR         /* External interrupt causes a #VMEXIT. */
+                                     | SVM_CTRL_INTERCEPT_NMI          /* Non-maskable interrupts causes a #VMEXIT. */
+                                     | SVM_CTRL_INTERCEPT_INIT         /* INIT signal causes a #VMEXIT. */
+                                     | SVM_CTRL_INTERCEPT_RDPMC        /* RDPMC causes a #VMEXIT. */
+                                     | SVM_CTRL_INTERCEPT_CPUID        /* CPUID causes a #VMEXIT. */
+                                     | SVM_CTRL_INTERCEPT_RSM          /* RSM causes a #VMEXIT. */
+                                     | SVM_CTRL_INTERCEPT_HLT          /* HLT causes a #VMEXIT. */
+                                     | SVM_CTRL_INTERCEPT_INOUT_BITMAP /* Use the IOPM to cause IOIO #VMEXITs. */
+                                     | SVM_CTRL_INTERCEPT_MSR_SHADOW   /* MSR access not covered by MSRPM causes a #VMEXIT.*/
+                                     | SVM_CTRL_INTERCEPT_INVLPGA      /* INVLPGA causes a #VMEXIT. */
+                                     | SVM_CTRL_INTERCEPT_SHUTDOWN     /* Shutdown events causes a #VMEXIT. */
+                                     | SVM_CTRL_INTERCEPT_FERR_FREEZE  /* Intercept "freezing" during legacy FPU handling. */
+                                     | SVM_CTRL_INTERCEPT_VMRUN        /* VMRUN causes a #VMEXIT. */
+                                     | SVM_CTRL_INTERCEPT_VMMCALL      /* VMMCALL causes a #VMEXIT. */
+                                     | SVM_CTRL_INTERCEPT_VMLOAD       /* VMLOAD causes a #VMEXIT. */
+                                     | SVM_CTRL_INTERCEPT_VMSAVE       /* VMSAVE causes a #VMEXIT. */
+                                     | SVM_CTRL_INTERCEPT_STGI         /* STGI causes a #VMEXIT. */
+                                     | SVM_CTRL_INTERCEPT_CLGI         /* CLGI causes a #VMEXIT. */
+                                     | SVM_CTRL_INTERCEPT_SKINIT       /* SKINIT causes a #VMEXIT. */
+                                     | SVM_CTRL_INTERCEPT_WBINVD       /* WBINVD causes a #VMEXIT. */
+                                     | SVM_CTRL_INTERCEPT_MONITOR      /* MONITOR causes a #VMEXIT. */
+                                     | SVM_CTRL_INTERCEPT_MWAIT        /* MWAIT causes a #VMEXIT. */
+                                     | SVM_CTRL_INTERCEPT_XSETBV;      /* XSETBV causes a #VMEXIT. */
 
         /* CR0, CR4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */
@@ -795,6 +798,6 @@
 
             /* Intercept INVLPG and task switches (may change CR3, EFLAGS, LDT). */
-            pVmcb->ctrl.u32InterceptCtrl1 |=   SVM_CTRL1_INTERCEPT_INVLPG
-                                             | SVM_CTRL1_INTERCEPT_TASK_SWITCH;
+            pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_INVLPG
+                                         |  SVM_CTRL_INTERCEPT_TASK_SWITCH;
 
             /* Page faults must be intercepted to implement shadow paging. */
@@ -803,5 +806,5 @@
 
 #ifdef HMSVM_ALWAYS_TRAP_TASK_SWITCH
-        pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_TASK_SWITCH;
+        pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_TASK_SWITCH;
 #endif
 
@@ -2326,12 +2329,12 @@
     if (fCanUseRealTsc)
     {
-        pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;
-        pVmcb->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP;
+        pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_RDTSC;
+        pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_RDTSCP;
         STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
     }
     else
     {
-        pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
-        pVmcb->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
+        pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_RDTSC;
+        pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_RDTSCP;
         STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
     }
@@ -2575,12 +2578,30 @@
 DECLINLINE(void) hmR0SvmSetVirtIntrIntercept(PSVMVMCB pVmcb)
 {
-    if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_VINTR))
+    if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR))
     {
         pVmcb->ctrl.IntCtrl.n.u1VIrqValid  = 1;     /* A virtual interrupt is pending. */
         pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0;     /* Not necessary as we #VMEXIT for delivering the interrupt. */
-        pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;
+        pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VINTR;
         pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
 
         Log4(("Setting VINTR intercept\n"));
+    }
+}
+
+
+/**
+ * Clears the virtual interrupt intercept control in the VMCB as
+ * we are figured the guest is unable process any interrupts
+ * at this point of time.
+ *
+ * @param   pVmcb       Pointer to the VM control block.
+ */
+DECLINLINE(void) hmR0SvmClearVirtIntrIntercept(PSVMVMCB pVmcb)
+{
+    if (pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR)
+    {
+        pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VINTR;
+        pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);
+        Log4(("Clearing VINTR intercept\n"));
     }
 }
@@ -2596,7 +2617,7 @@
 DECLINLINE(void) hmR0SvmSetIretIntercept(PSVMVMCB pVmcb)
 {
-    if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_IRET))
-    {
-        pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_IRET;
+    if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_IRET))
+    {
+        pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_IRET;
         pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);
 
@@ -2613,7 +2634,7 @@
 DECLINLINE(void) hmR0SvmClearIretIntercept(PSVMVMCB pVmcb)
 {
-    if (pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_IRET)
-    {
-        pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_IRET;
+    if (pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_IRET)
+    {
+        pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_IRET;
         pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);
 
@@ -2780,6 +2801,5 @@
         Log4(("ctrl.u16InterceptWrDRx            %#x\n",      pVmcb->ctrl.u16InterceptWrDRx));
         Log4(("ctrl.u32InterceptException        %#x\n",      pVmcb->ctrl.u32InterceptException));
-        Log4(("ctrl.u32InterceptCtrl1            %#x\n",      pVmcb->ctrl.u32InterceptCtrl1));
-        Log4(("ctrl.u32InterceptCtrl2            %#x\n",      pVmcb->ctrl.u32InterceptCtrl2));
+        Log4(("ctrl.u64InterceptCtrl             %#RX64\n",   pVmcb->ctrl.u64InterceptCtrl)); 
         Log4(("ctrl.u64IOPMPhysAddr              %#RX64\n",   pVmcb->ctrl.u64IOPMPhysAddr));
         Log4(("ctrl.u64MSRPMPhysAddr             %#RX64\n",   pVmcb->ctrl.u64MSRPMPhysAddr));
@@ -3189,5 +3209,5 @@
      */
     if (    (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
-        && !(pVmcb->ctrl.u32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP))
+        && !(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP)) 
     {
         hmR0SvmSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
@@ -3263,5 +3283,5 @@
 
     /* TSC read must be done early for maximum accuracy. */
-    if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC))
+    if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC))
         TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset);
 
@@ -3665,4 +3685,11 @@
                 }
 
+#ifdef VBOX_WITH_NESTED_HWVIRT
+                case SVM_EXIT_CLGI: return hmR0SvmExitClgi(pVCpu, pCtx, pSvmTransient);
+                case SVM_EXIT_STGI: return hmR0SvmExitStgi(pVCpu, pCtx, pSvmTransient);
+#else
+                case SVM_EXIT_CLGI:
+                case SVM_EXIT_STGI:
+#endif
                 case SVM_EXIT_INVLPGA:
                 case SVM_EXIT_RSM:
@@ -3670,6 +3697,4 @@
                 case SVM_EXIT_VMLOAD:
                 case SVM_EXIT_VMSAVE:
-                case SVM_EXIT_STGI:
-                case SVM_EXIT_CLGI:
                 case SVM_EXIT_SKINIT:
                     return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient);
@@ -5192,5 +5217,5 @@
 
     /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive interrupts/NMIs, it is now ready. */
-    pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_VINTR;
+    pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VINTR;
     pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
 
@@ -5667,4 +5692,45 @@
 }
 
+
+#ifdef VBOX_WITH_NESTED_HWVIRT
+/**
+ * \#VMEXIT handler for RDPMC (SVM_EXIT_CLGI). Conditional 
+ * \#VMEXIT. 
+ */
+HMSVM_EXIT_DECL hmR0SvmExitClgi(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
+{
+    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
+    PVM pVM = pVCpu->CTX_SUFF(pVM);
+    if (pVM->cpum.ro.GuestFeatures.fSvm)
+    {
+        /** @todo Stat. */
+        /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClgi); */
+        VBOXSTRICTRC rcStrict = IEMExecDecodedClgi(pVCpu, 3);
+        return VBOXSTRICTRC_VAL(rcStrict);
+    }
+    return hmR0SvmExitXcptUD(pVCpu, pCtx, pSvmTransient);
+}
+
+
+/**
+ * \#VMEXIT handler for RDPMC (SVM_EXIT_STGI). Conditional
+ * \#VMEXIT.
+ */
+HMSVM_EXIT_DECL hmR0SvmExitStgi(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
+{
+    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
+    PVM pVM = pVCpu->CTX_SUFF(pVM);
+    if (pVM->cpum.ro.GuestFeatures.fSvm)
+    {
+        /** @todo Stat. */
+        /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitStgi); */
+        VBOXSTRICTRC rcStrict = IEMExecDecodedStgi(pVCpu, 3);
+        return VBOXSTRICTRC_VAL(rcStrict);
+    }
+    return hmR0SvmExitXcptUD(pVCpu, pCtx, pSvmTransient);
+}
+#endif /* VBOX_WITH_NESTED_HWVIRT */
+
+
 /** @} */
 
Index: /trunk/src/VBox/VMM/VMMR3/CPUM.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 65903)
+++ /trunk/src/VBox/VMM/VMMR3/CPUM.cpp	(revision 65904)
@@ -1151,4 +1151,11 @@
     /* C-state control. Guesses. */
     pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = 1 /*C1*/ | RT_BIT_32(25) | RT_BIT_32(26) | RT_BIT_32(27) | RT_BIT_32(28);
+
+    /*
+     * Hardware virtualization state.
+     */
+    memset(&pCtx->hwvirt, 0, sizeof(pCtx->hwvirt));
+    /* SVM. */
+    pCtx->hwvirt.svm.fGif = 1;
 }
 
Index: /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp	(revision 65903)
+++ /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp	(revision 65904)
@@ -1679,4 +1679,5 @@
             pFeatures->fAmdMmxExts      = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_AXMMX);
             pFeatures->fXop             = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_XOP);
+            pFeatures->fSvm             = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM);
         }
 
@@ -2212,4 +2213,5 @@
     CPUMISAEXTCFG   enm3dNowPrf;
     CPUMISAEXTCFG   enmAmdExtMmx;
+    CPUMISAEXTCFG   enmSvm;
 
     uint32_t        uMaxStdLeaf;
@@ -2696,5 +2698,5 @@
         pExtFeatureLeaf->uEcx &= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF
                                //| X86_CPUID_AMD_FEATURE_ECX_CMPL   - set below if applicable.
-                               //| X86_CPUID_AMD_FEATURE_ECX_SVM    - not virtualized.
+                               | (pConfig->enmSvm       ? X86_CPUID_AMD_FEATURE_ECX_SVM : 0)
                                //| X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
                                /* Note: This could prevent teleporting from AMD to Intel CPUs! */
@@ -2737,4 +2739,5 @@
         {
             PORTABLE_DISABLE_FEATURE_BIT(    1, pExtFeatureLeaf->uEcx, CR8L,       X86_CPUID_AMD_FEATURE_ECX_CR8L);
+            PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, SVM,        X86_CPUID_AMD_FEATURE_ECX_SVM,       pConfig->enmSvm);
             PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, ABM,        X86_CPUID_AMD_FEATURE_ECX_ABM,       pConfig->enmAbm);
             PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, SSE4A,      X86_CPUID_AMD_FEATURE_ECX_SSE4A,     pConfig->enmSse4A);
@@ -2779,4 +2782,6 @@
         if (pConfig->enmSse4A     == CPUMISAEXTCFG_ENABLED_ALWAYS)
             pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_SSE4A;
+        if (pConfig->enmSvm       == CPUMISAEXTCFG_ENABLED_ALWAYS)
+            pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_SVM;
         if (pConfig->enmMisAlnSse == CPUMISAEXTCFG_ENABLED_ALWAYS)
             pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_MISALNSSE;
@@ -2785,4 +2790,6 @@
         if (pConfig->enmAmdExtMmx  == CPUMISAEXTCFG_ENABLED_ALWAYS)
             pExtFeatureLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_AXMMX;
+        if (pConfig->enmSvm        == CPUMISAEXTCFG_ENABLED_ALWAYS)
+            pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_SVM;
     }
     pExtFeatureLeaf = NULL; /* Must refetch! */
@@ -3348,7 +3355,16 @@
      *      ECX - Reserved.
      *      EDX - SVM Feature identification.
-     * We clear all as we currently does not virtualize SVM.
-     */
-    cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000000a));
+     */
+    pExtFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000001), 0);
+    if (pExtFeatureLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM)
+    {
+        PCPUMCPUIDLEAF pSvmFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 0x8000000a, 0);
+        pSvmFeatureLeaf->uEax = 0x1;
+        pSvmFeatureLeaf->uEbx = 0x8000;
+        pSvmFeatureLeaf->uEcx = 0;
+        pSvmFeatureLeaf->uEdx = 0; /** @todo Support SVM features */
+    }
+    else
+        cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000000a));
 
     /* Cpuid 0x8000000b thru 0x80000018: Reserved
@@ -3722,4 +3738,5 @@
                                   "|3DNOWPRF"
                                   "|AXMMX"
+                                  "|SVM"
                                   , "" /*pszValidNodes*/, "CPUM" /*pszWho*/, 0 /*uInstance*/);
         if (RT_FAILURE(rc))
@@ -3895,4 +3912,14 @@
     rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "AXMMX", &pConfig->enmAmdExtMmx, fNestedPagingAndFullGuestExec);
     AssertLogRelRCReturn(rc, rc);
+
+#ifdef VBOX_WITH_NESTED_HWVIRT
+    /** @cfgm{/CPUM/IsaExts/SVM, isaextcfg, depends}
+     * Whether to expose the AMD's hardware virtualization (SVM) instructions to the
+     * guest. For the time being, the default is to only do this for VMs with nested
+     * paging and AMD-V.
+     */
+    rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "SVM", &pConfig->enmSvm, fNestedPagingAndFullGuestExec);
+    AssertLogRelRCReturn(rc, rc);
+#endif
 
     return VINF_SUCCESS;
@@ -5832,5 +5859,5 @@
     DBGFREGSUBFIELD_RO("LahfSahf\0"     "LAHF/SAHF support in 64-bit mode",              0, 1, 0),
     DBGFREGSUBFIELD_RO("CmpLegacy\0"    "Core multi-processing legacy mode",             1, 1, 0),
-    DBGFREGSUBFIELD_RO("SVM\0"          "AMD VM extensions",                             2, 1, 0),
+    DBGFREGSUBFIELD_RO("SVM\0"          "AMD Secure Virtual Machine extensions",         2, 1, 0),
     DBGFREGSUBFIELD_RO("EXTAPIC\0"      "AMD Extended APIC registers",                   3, 1, 0),
     DBGFREGSUBFIELD_RO("CR8L\0"         "AMD LOCK MOV CR0 means MOV CR8",                4, 1, 0),
Index: /trunk/src/VBox/VMM/include/CPUMInternal.mac
===================================================================
--- /trunk/src/VBox/VMM/include/CPUMInternal.mac	(revision 65903)
+++ /trunk/src/VBox/VMM/include/CPUMInternal.mac	(revision 65904)
@@ -226,6 +226,15 @@
     .Guest.pXStateRC      RTRCPTR_RES 1
     .Guest.aoffXState         resw    64
-
+    alignb 8
+    .Guest.hwvirt.svm.uMsrHSavePa         resq    1
+    .Guest.hwvirt.svm.u64InterceptCtrl    resq    1
+    .Guest.hwvirt.svm.u32InterceptXcpt    resd    1
+    .Guest.hwvirt.svm.u16InterceptRdCRx   resw    1
+    .Guest.hwvirt.svm.u16InterceptWrCRx   resw    1
+    .Guest.hwvirt.svm.u16InterceptRdDRx   resw    1
+    .Guest.hwvirt.svm.u16InterceptWrDRx   resw    1
+    .Guest.hwvirt.svm.fGif                resb    1
     alignb 64
+
     .GuestMsrs                resq    0
     .GuestMsrs.au64           resq    64
@@ -484,4 +493,13 @@
     .Hyper.pXStateRC      RTRCPTR_RES 1
     .Hyper.aoffXState         resw    64
+    alignb 8
+    .Hyper.hwvirt.svm.uMsrHSavePa         resq    1
+    .Hyper.hwvirt.svm.u64InterceptCtrl    resq    1
+    .Hyper.hwvirt.svm.u32InterceptXcpt    resd    1
+    .Hyper.hwvirt.svm.u16InterceptRdCRx   resw    1
+    .Hyper.hwvirt.svm.u16InterceptWrCRx   resw    1
+    .Hyper.hwvirt.svm.u16InterceptRdDRx   resw    1
+    .Hyper.hwvirt.svm.u16InterceptWrDRx   resw    1
+    .Hyper.hwvirt.svm.fGif                resb    1
     alignb 64
 
Index: /trunk/src/VBox/VMM/testcase/tstVMStruct.h
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstVMStruct.h	(revision 65903)
+++ /trunk/src/VBox/VMM/testcase/tstVMStruct.h	(revision 65904)
@@ -131,4 +131,9 @@
 
     GEN_CHECK_SIZE(CPUMCTX);
+    GEN_CHECK_OFF(CPUMCTX, hwvirt);
+    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.uMsrHSavePa);
+    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.fGif);
+    /** @todo add rest of hwvirt fields when code is more
+     *        finalized. */
     GEN_CHECK_OFF(CPUMCTX, pXStateR0);
     GEN_CHECK_OFF(CPUMCTX, pXStateR3);
Index: /trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp	(revision 65903)
+++ /trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp	(revision 65904)
@@ -312,4 +312,5 @@
     CHECK_MEMBER_ALIGNMENT(CPUMCTX, gdtr.pGdt, 8);
     CHECK_MEMBER_ALIGNMENT(CPUMCTX, SysEnter, 8);
+    CHECK_MEMBER_ALIGNMENT(CPUMCTX, hwvirt, 8);
     CHECK_CPUMCTXCORE(rax);
     CHECK_CPUMCTXCORE(rcx);
