Index: /trunk/include/VBox/vmm/cpum.h
===================================================================
--- /trunk/include/VBox/vmm/cpum.h	(revision 66039)
+++ /trunk/include/VBox/vmm/cpum.h	(revision 66040)
@@ -1028,7 +1028,50 @@
     uint32_t        fSvm : 1;
 
+    /** Support for Intel VMX. */
+    uint32_t        fVmx : 1;
+
     /** Alignment padding / reserved for future use. */
-    uint32_t        fPadding : 26;
-    uint32_t        auPadding[3];
+    uint32_t        fPadding : 25;
+
+    /** Hardware virtualization features. */
+    union
+    {
+        /** SVM features.  */
+        struct
+        {
+            /** Features as reported by CPUID 0x8000000a.EDX.  */
+            union
+            {
+                struct
+                {
+                    uint32_t fNestedPaging         : 1;
+                    uint32_t fLbrVirt              : 1;
+                    uint32_t fSvmLock              : 1;
+                    uint32_t fNextRipSave          : 1;
+                    uint32_t fTscRateMsr           : 1;
+                    uint32_t fVmcbClean            : 1;
+                    uint32_t fFlusbByAsid          : 1;
+                    uint32_t fDecodeAssist         : 1;
+                    uint32_t u2Reserved0           : 2;
+                    uint32_t fPauseFilter          : 1;
+                    uint32_t u1Reserved0           : 1;
+                    uint32_t fPauseFilterThreshold : 1;
+                    uint32_t fAvic                 : 1;
+                    uint32_t u18Reserved0          : 18;
+                } n;
+                uint32_t    u;
+            } feat;
+            /** Maximum supported ASID. */
+            uint32_t        uMaxAsid;
+        } svm;
+
+        /** VMX features. */
+        struct
+        {
+            uint32_t    uDummy1;
+            uint32_t    uDummy2;
+        } vmx;
+    } CPUM_UNION_NM(hwvirt);
+    uint32_t        auPadding[1];
 } CPUMFEATURES;
 #ifndef VBOX_FOR_DTRACE_LIB
@@ -1269,6 +1312,5 @@
 
 /**
- * Checks if the guest has the specified ctrl/instruction
- * intercept active.
+ * Checks if the guest VMCB has the specified ctrl/instruction intercept active.
  *
  * @returns @c true if in intercept is set, @c false otherwise.
@@ -1279,9 +1321,9 @@
 DECLINLINE(bool) CPUMIsGuestSvmCtrlInterceptSet(PCPUMCTX pCtx, uint64_t fIntercept)
 {
-    return RT_BOOL(pCtx->hwvirt.svm.u64InterceptCtrl & fIntercept);
+    return RT_BOOL(pCtx->hwvirt.svm.VmcbCtrl.u64InterceptCtrl & fIntercept);
 }
 
 /**
- * Checks if the guest has the specified CR read intercept
+ * Checks if the guest VMCB has the specified CR read intercept
  * active.
  *
@@ -1292,9 +1334,9 @@
 DECLINLINE(bool) CPUMIsGuestSvmReadCRxInterceptSet(PCPUMCTX pCtx, uint8_t uCr)
 {
-    return RT_BOOL(pCtx->hwvirt.svm.u16InterceptRdCRx & (1 << uCr));
+    return RT_BOOL(pCtx->hwvirt.svm.VmcbCtrl.u16InterceptRdCRx & (1 << uCr));
 }
 
 /**
- * Checks if the guest has the specified CR write intercept
+ * Checks if the guest VMCB has the specified CR write intercept
  * active.
  *
@@ -1305,9 +1347,9 @@
 DECLINLINE(bool) CPUMIsGuestSvmWriteCRxInterceptSet(PCPUMCTX pCtx, uint8_t uCr)
 {
-    return RT_BOOL(pCtx->hwvirt.svm.u16InterceptWrCRx & (1 << uCr)); 
+    return RT_BOOL(pCtx->hwvirt.svm.VmcbCtrl.u16InterceptWrCRx & (1 << uCr));
 }
 
 /**
- * Checks if the guest has the specified DR read intercept
+ * Checks if the guest VMCB has the specified DR read intercept
  * active.
  *
@@ -1318,9 +1360,9 @@
 DECLINLINE(bool) CPUMIsGuestSvmReadDRxInterceptSet(PCPUMCTX pCtx, uint8_t uDr)
 {
-    return RT_BOOL(pCtx->hwvirt.svm.u16InterceptRdDRx & (1 << uDr)); 
+    return RT_BOOL(pCtx->hwvirt.svm.VmcbCtrl.u16InterceptRdDRx & (1 << uDr));
 }
 
 /**
- * Checks if the guest has the specified DR write intercept
+ * Checks if the guest VMCB has the specified DR write intercept
  * active.
  *
@@ -1331,9 +1373,9 @@
 DECLINLINE(bool) CPUMIsGuestSvmWriteDRxInterceptSet(PCPUMCTX pCtx, uint8_t uDr)
 {
-    return RT_BOOL(pCtx->hwvirt.svm.u16InterceptWrDRx & (1 << uDr)); 
+    return RT_BOOL(pCtx->hwvirt.svm.VmcbCtrl.u16InterceptWrDRx & (1 << uDr));
 }
 
 /**
- * Checks if the guest has the specified exception
+ * Checks if the guest VMCB has the specified exception
  * intercept active.
  *
@@ -1344,10 +1386,9 @@
 DECLINLINE(bool) CPUMIsGuestSvmXcptInterceptSet(PCPUMCTX pCtx, X86XCPT enmXcpt)
 {
-    return RT_BOOL(pCtx->hwvirt.svm.u32InterceptXcpt & enmXcpt); 
+    return RT_BOOL(pCtx->hwvirt.svm.VmcbCtrl.u32InterceptXcpt & enmXcpt);
 }
 
 /**
- * Checks if the guest is currently in nested hardware-virtualized
- * guest mode.
+ * Checks if we are executing inside the nested hardware-virtualized guest.
  *
  * @returns true if in nested-guest mode, false otherwise.
@@ -1357,9 +1398,8 @@
 {
     /*
-     * With SVM, the VMRUN intercept is a pre-requisite to entering guest-mode.
-     * See AMD spec., 15.5 "VMRUN instruction" subsection "Canonicalization and Consistency Checks".
+     * With AMD-V, the VMRUN intercept is a pre-requisite to entering SVM guest-mode.
+     * See AMD spec. 15.5 "VMRUN instruction" subsection "Canonicalization and Consistency Checks".
      */
-    /** @todo Fix this -- silly recompiler is redefining this stuff... why? */
-    return RT_BOOL(pCtx->hwvirt.svm.u64InterceptCtrl & RT_BIT_64(32) /* SVM_CTRL_INTERCEPT_VMRUN*/);
+    return RT_BOOL(pCtx->hwvirt.svm.VmcbCtrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN);
     /** @todo Intel VMX.  */
 }
Index: /trunk/include/VBox/vmm/cpum.mac
===================================================================
--- /trunk/include/VBox/vmm/cpum.mac	(revision 66039)
+++ /trunk/include/VBox/vmm/cpum.mac	(revision 66040)
@@ -259,13 +259,8 @@
 %endif
     .hwvirt.svm.uMsrHSavePa         resq    1
-    .hwvirt.svm.u64InterceptCtrl    resq    1
-    .hwvirt.svm.u32InterceptXcpt    resd    1
-    .hwvirt.svm.u16InterceptRdCRx   resw    1
-    .hwvirt.svm.u16InterceptWrCRx   resw    1
-    .hwvirt.svm.u16InterceptRdDRx   resw    1
-    .hwvirt.svm.u16InterceptWrDRx   resw    1
+    .hwvirt.svm.GCPhysVmcb          resq    1
+    .hwvirt.svm.VmcbCtrl            resb  256
+    .hwvirt.svm.HostState           resb  184
     .hwvirt.svm.fGif                resb    1
-    .hwvirt.svm.abPadding           resb    3
-    .hwvirt.svm.GCPhysNstGstVmcb    resq    1
     alignb 64
 endstruc
Index: /trunk/include/VBox/vmm/cpumctx.h
===================================================================
--- /trunk/include/VBox/vmm/cpumctx.h	(revision 66039)
+++ /trunk/include/VBox/vmm/cpumctx.h	(revision 66040)
@@ -30,4 +30,5 @@
 # include <iprt/x86.h>
 # include <VBox/types.h>
+# include <VBox/vmm/hm_svm.h>
 #else
 # pragma D depends_on library x86.d
@@ -72,6 +73,6 @@
     X86DESCATTR Attr;
 } CPUMSELREG;
-#ifdef VBOX_FOR_DTRACE_LIB
-AssertCompileSize(CPUMSELREG, 24)
+#ifndef VBOX_FOR_DTRACE_LIB
+AssertCompileSize(CPUMSELREG, 24);
 #endif
 
@@ -169,5 +170,5 @@
     } CPUM_STRUCT_NM(s);
 } CPUMCTXGREG;
-#ifdef VBOX_FOR_DTRACE_LIB
+#ifndef VBOX_FOR_DTRACE_LIB
 AssertCompileSize(CPUMCTXGREG, 8);
 AssertCompileMemberOffset(CPUMCTXGREG, CPUM_STRUCT_NM(s.) bLo, 0);
@@ -283,4 +284,37 @@
 } CPUMCTXCORE;
 #pragma pack()
+
+
+/**
+ * SVM Host-state area (Nested Hw.virt - VirtualBox's layout).
+ */
+#pragma pack(1)
+typedef struct SVMHOSTSTATE
+{
+    uint64_t    uEferMsr;
+    uint64_t    uCr0;
+    uint64_t    uCr4;
+    uint64_t    uCr3;
+    uint64_t    uRip;
+    uint64_t    uRsp;
+    uint64_t    uRax;
+    X86RFLAGS   rflags;
+    CPUMSELREG  es;
+    CPUMSELREG  cs;
+    CPUMSELREG  ss;
+    CPUMSELREG  ds;
+    VBOXGDTR    gdtr;
+    VBOXIDTR    idtr;
+    uint8_t     abPadding[4];
+} SVMHOSTSTATE;
+#pragma pack()
+/** Pointer to the SVMHOSTSTATE structure. */
+typedef SVMHOSTSTATE *PSVMHOSTSTATE;
+/** Pointer to a const SVMHOSTSTATE structure. */
+typedef const SVMHOSTSTATE *PCSVMHOSTSTATE;
+#ifndef VBOX_FOR_DTRACE_LIB
+AssertCompileSizeAlignment(SVMHOSTSTATE, 8);
+AssertCompileSize(SVMHOSTSTATE, 184);
+#endif
 
 
@@ -441,28 +475,16 @@
             struct
             {
-                /** 728 - MSR holding physical address of the Guest's 'host-state'. */
-                uint64_t            uMsrHSavePa;
-
-                /** @name Cache of the nested-guest VMCB controls.
-                 * @{ */
-                /** 736 - Control intercepts. */
-                uint64_t            u64InterceptCtrl;
-                /** 744 - Exception intercepts. */
-                uint32_t            u32InterceptXcpt;
-                /** 748 - CR0-CR15 read intercepts. */
-                uint16_t            u16InterceptRdCRx;
-                /** 750 - CR0-CR15 write intercepts. */
-                uint16_t            u16InterceptWrCRx;
-                /** 752 - DR0-DR15 read intercepts. */
-                uint16_t            u16InterceptRdDRx;
-                /** 754 - DR0-DR15 write intercepts. */
-                uint16_t            u16InterceptWrDRx;
-                /** @} */
-
-                /** 756 - Global interrupt flag. */
-                uint8_t            fGif;
-                uint8_t            abPadding[3];
-                /** 760 - Nested-guest VMCB. */
-                RTGCPHYS           GCPhysNstGstVmcb;
+                /** 728 - MSR holding physical address of the Guest's Host-state. */
+                uint64_t        uMsrHSavePa;
+                /** 736 - Guest physical address of the nested-guest VMCB. */
+                RTGCPHYS        GCPhysVmcb;
+                /** 744 - Cache of the nested-guest VMCB control area. */
+                SVMVMCBCTRL     VmcbCtrl;
+                /** 1000 - Guest's host-state save area. */
+                SVMHOSTSTATE    HostState;
+                /** 1184 - Global interrupt flag. */
+                uint8_t         fGif;
+                /** 1185 - Padding. */
+                uint8_t         abPadding0[31];
             } svm;
 #if 0
@@ -527,12 +549,9 @@
 AssertCompileMemberOffset(CPUMCTX,                 aoffXState, HC_ARCH_BITS == 64 ? 596 : 588);
 AssertCompileMemberOffset(CPUMCTX, hwvirt, 728);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.uMsrHSavePa,       728);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u64InterceptCtrl,  736);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u32InterceptXcpt,  744);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u16InterceptRdCRx, 748);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u16InterceptWrCRx, 750);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u16InterceptRdDRx, 752);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u16InterceptWrDRx, 754);
-AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.fGif,              756);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.uMsrHSavePa, 728);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.GCPhysVmcb,  736);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.VmcbCtrl,    744);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.HostState,  1000);
+AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.fGif,       1184);
 
 AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_NM(g.) aGRegs);
@@ -719,29 +738,4 @@
 typedef const CPUMCPUID *PCCPUMCPUID;
 
-/**
- * SVM Host-state area (Nested Hw.virt - VirtualBox's layout).
- */
-typedef struct SVMHOSTSTATE
-{
-    uint64_t    uEferMsr;
-    uint64_t    uCr0;
-    uint64_t    uCr4;
-    uint64_t    uCr3;
-    uint64_t    uRip;
-    uint64_t    uRsp;
-    uint64_t    uRax;
-    X86RFLAGS   rflags;
-    CPUMSELREG  es;
-    CPUMSELREG  cs;
-    CPUMSELREG  ss;
-    CPUMSELREG  ds;
-    VBOXGDTR    gdtr;
-    VBOXIDTR    idtr;
-} SVMHOSTSTATE;
-/** Pointer to the SVMHOSTSTATE structure. */
-typedef SVMHOSTSTATE *PSVMHOSTSTATE;
-/** Pointer to a const SVMHOSTSTATE structure. */
-typedef const SVMHOSTSTATE *PCSVMHOSTSTATE;
-
 /** @}  */
 
Index: /trunk/include/VBox/vmm/hm.h
===================================================================
--- /trunk/include/VBox/vmm/hm.h	(revision 66039)
+++ /trunk/include/VBox/vmm/hm.h	(revision 66040)
@@ -153,5 +153,5 @@
 VMM_INT_DECL(void)              HMVmxNstGstVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason);
 VMM_INT_DECL(VBOXSTRICTRC)      HMSvmVmmcall(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfRipUpdated);
-VMM_INT_DECL(VBOXSTRICTRC)      HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb, PSVMHOSTSTATE pHostState);
+VMM_INT_DECL(VBOXSTRICTRC)      HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx);
 
 #ifndef IN_RC
Index: /trunk/include/VBox/vmm/hm_svm.h
===================================================================
--- /trunk/include/VBox/vmm/hm_svm.h	(revision 66039)
+++ /trunk/include/VBox/vmm/hm_svm.h	(revision 66040)
@@ -67,214 +67,220 @@
 /** @} */
 
-
+/*
+ * Ugly!
+ * When compiling the recompiler, its own svm.h defines clash with
+ * the following defines. Avoid just the duplicates here as we still
+ * require other definitions and structures in this header.
+ */
+#ifndef IN_REM_R3
 /** @name SVM Basic Exit Reasons.
  * @{
  */
 /** Invalid guest state in VMCB. */
-#define SVM_EXIT_INVALID                (uint64_t)(-1)
+# define SVM_EXIT_INVALID                (uint64_t)(-1)
 /** Read from CR0-CR15. */
-#define SVM_EXIT_READ_CR0               0x0
-#define SVM_EXIT_READ_CR1               0x1
-#define SVM_EXIT_READ_CR2               0x2
-#define SVM_EXIT_READ_CR3               0x3
-#define SVM_EXIT_READ_CR4               0x4
-#define SVM_EXIT_READ_CR5               0x5
-#define SVM_EXIT_READ_CR6               0x6
-#define SVM_EXIT_READ_CR7               0x7
-#define SVM_EXIT_READ_CR8               0x8
-#define SVM_EXIT_READ_CR9               0x9
-#define SVM_EXIT_READ_CR10              0xA
-#define SVM_EXIT_READ_CR11              0xB
-#define SVM_EXIT_READ_CR12              0xC
-#define SVM_EXIT_READ_CR13              0xD
-#define SVM_EXIT_READ_CR14              0xE
-#define SVM_EXIT_READ_CR15              0xF
+# define SVM_EXIT_READ_CR0               0x0
+# define SVM_EXIT_READ_CR1               0x1
+# define SVM_EXIT_READ_CR2               0x2
+# define SVM_EXIT_READ_CR3               0x3
+# define SVM_EXIT_READ_CR4               0x4
+# define SVM_EXIT_READ_CR5               0x5
+# define SVM_EXIT_READ_CR6               0x6
+# define SVM_EXIT_READ_CR7               0x7
+# define SVM_EXIT_READ_CR8               0x8
+# define SVM_EXIT_READ_CR9               0x9
+# define SVM_EXIT_READ_CR10              0xA
+# define SVM_EXIT_READ_CR11              0xB
+# define SVM_EXIT_READ_CR12              0xC
+# define SVM_EXIT_READ_CR13              0xD
+# define SVM_EXIT_READ_CR14              0xE
+# define SVM_EXIT_READ_CR15              0xF
 /** Writes to CR0-CR15. */
-#define SVM_EXIT_WRITE_CR0              0x10
-#define SVM_EXIT_WRITE_CR1              0x11
-#define SVM_EXIT_WRITE_CR2              0x12
-#define SVM_EXIT_WRITE_CR3              0x13
-#define SVM_EXIT_WRITE_CR4              0x14
-#define SVM_EXIT_WRITE_CR5              0x15
-#define SVM_EXIT_WRITE_CR6              0x16
-#define SVM_EXIT_WRITE_CR7              0x17
-#define SVM_EXIT_WRITE_CR8              0x18
-#define SVM_EXIT_WRITE_CR9              0x19
-#define SVM_EXIT_WRITE_CR10             0x1A
-#define SVM_EXIT_WRITE_CR11             0x1B
-#define SVM_EXIT_WRITE_CR12             0x1C
-#define SVM_EXIT_WRITE_CR13             0x1D
-#define SVM_EXIT_WRITE_CR14             0x1E
-#define SVM_EXIT_WRITE_CR15             0x1F
+# define SVM_EXIT_WRITE_CR0              0x10
+# define SVM_EXIT_WRITE_CR1              0x11
+# define SVM_EXIT_WRITE_CR2              0x12
+# define SVM_EXIT_WRITE_CR3              0x13
+# define SVM_EXIT_WRITE_CR4              0x14
+# define SVM_EXIT_WRITE_CR5              0x15
+# define SVM_EXIT_WRITE_CR6              0x16
+# define SVM_EXIT_WRITE_CR7              0x17
+# define SVM_EXIT_WRITE_CR8              0x18
+# define SVM_EXIT_WRITE_CR9              0x19
+# define SVM_EXIT_WRITE_CR10             0x1A
+# define SVM_EXIT_WRITE_CR11             0x1B
+# define SVM_EXIT_WRITE_CR12             0x1C
+# define SVM_EXIT_WRITE_CR13             0x1D
+# define SVM_EXIT_WRITE_CR14             0x1E
+# define SVM_EXIT_WRITE_CR15             0x1F
 /** Read from DR0-DR15. */
-#define SVM_EXIT_READ_DR0               0x20
-#define SVM_EXIT_READ_DR1               0x21
-#define SVM_EXIT_READ_DR2               0x22
-#define SVM_EXIT_READ_DR3               0x23
-#define SVM_EXIT_READ_DR4               0x24
-#define SVM_EXIT_READ_DR5               0x25
-#define SVM_EXIT_READ_DR6               0x26
-#define SVM_EXIT_READ_DR7               0x27
-#define SVM_EXIT_READ_DR8               0x28
-#define SVM_EXIT_READ_DR9               0x29
-#define SVM_EXIT_READ_DR10              0x2A
-#define SVM_EXIT_READ_DR11              0x2B
-#define SVM_EXIT_READ_DR12              0x2C
-#define SVM_EXIT_READ_DR13              0x2D
-#define SVM_EXIT_READ_DR14              0x2E
-#define SVM_EXIT_READ_DR15              0x2F
+# define SVM_EXIT_READ_DR0               0x20
+# define SVM_EXIT_READ_DR1               0x21
+# define SVM_EXIT_READ_DR2               0x22
+# define SVM_EXIT_READ_DR3               0x23
+# define SVM_EXIT_READ_DR4               0x24
+# define SVM_EXIT_READ_DR5               0x25
+# define SVM_EXIT_READ_DR6               0x26
+# define SVM_EXIT_READ_DR7               0x27
+# define SVM_EXIT_READ_DR8               0x28
+# define SVM_EXIT_READ_DR9               0x29
+# define SVM_EXIT_READ_DR10              0x2A
+# define SVM_EXIT_READ_DR11              0x2B
+# define SVM_EXIT_READ_DR12              0x2C
+# define SVM_EXIT_READ_DR13              0x2D
+# define SVM_EXIT_READ_DR14              0x2E
+# define SVM_EXIT_READ_DR15              0x2F
 /** Writes to DR0-DR15. */
-#define SVM_EXIT_WRITE_DR0              0x30
-#define SVM_EXIT_WRITE_DR1              0x31
-#define SVM_EXIT_WRITE_DR2              0x32
-#define SVM_EXIT_WRITE_DR3              0x33
-#define SVM_EXIT_WRITE_DR4              0x34
-#define SVM_EXIT_WRITE_DR5              0x35
-#define SVM_EXIT_WRITE_DR6              0x36
-#define SVM_EXIT_WRITE_DR7              0x37
-#define SVM_EXIT_WRITE_DR8              0x38
-#define SVM_EXIT_WRITE_DR9              0x39
-#define SVM_EXIT_WRITE_DR10             0x3A
-#define SVM_EXIT_WRITE_DR11             0x3B
-#define SVM_EXIT_WRITE_DR12             0x3C
-#define SVM_EXIT_WRITE_DR13             0x3D
-#define SVM_EXIT_WRITE_DR14             0x3E
-#define SVM_EXIT_WRITE_DR15             0x3F
+# define SVM_EXIT_WRITE_DR0              0x30
+# define SVM_EXIT_WRITE_DR1              0x31
+# define SVM_EXIT_WRITE_DR2              0x32
+# define SVM_EXIT_WRITE_DR3              0x33
+# define SVM_EXIT_WRITE_DR4              0x34
+# define SVM_EXIT_WRITE_DR5              0x35
+# define SVM_EXIT_WRITE_DR6              0x36
+# define SVM_EXIT_WRITE_DR7              0x37
+# define SVM_EXIT_WRITE_DR8              0x38
+# define SVM_EXIT_WRITE_DR9              0x39
+# define SVM_EXIT_WRITE_DR10             0x3A
+# define SVM_EXIT_WRITE_DR11             0x3B
+# define SVM_EXIT_WRITE_DR12             0x3C
+# define SVM_EXIT_WRITE_DR13             0x3D
+# define SVM_EXIT_WRITE_DR14             0x3E
+# define SVM_EXIT_WRITE_DR15             0x3F
 /* Exception 0-31. */
-#define SVM_EXIT_EXCEPTION_0            0x40
-#define SVM_EXIT_EXCEPTION_1            0x41
-#define SVM_EXIT_EXCEPTION_2            0x42
-#define SVM_EXIT_EXCEPTION_3            0x43
-#define SVM_EXIT_EXCEPTION_4            0x44
-#define SVM_EXIT_EXCEPTION_5            0x45
-#define SVM_EXIT_EXCEPTION_6            0x46
-#define SVM_EXIT_EXCEPTION_7            0x47
-#define SVM_EXIT_EXCEPTION_8            0x48
-#define SVM_EXIT_EXCEPTION_9            0x49
-#define SVM_EXIT_EXCEPTION_A            0x4A
-#define SVM_EXIT_EXCEPTION_B            0x4B
-#define SVM_EXIT_EXCEPTION_C            0x4C
-#define SVM_EXIT_EXCEPTION_D            0x4D
-#define SVM_EXIT_EXCEPTION_E            0x4E
-#define SVM_EXIT_EXCEPTION_F            0x4F
-#define SVM_EXIT_EXCEPTION_10           0x50
-#define SVM_EXIT_EXCEPTION_11           0x51
-#define SVM_EXIT_EXCEPTION_12           0x52
-#define SVM_EXIT_EXCEPTION_13           0x53
-#define SVM_EXIT_EXCEPTION_14           0x54
-#define SVM_EXIT_EXCEPTION_15           0x55
-#define SVM_EXIT_EXCEPTION_16           0x56
-#define SVM_EXIT_EXCEPTION_17           0x57
-#define SVM_EXIT_EXCEPTION_18           0x58
-#define SVM_EXIT_EXCEPTION_19           0x59
-#define SVM_EXIT_EXCEPTION_1A           0x5A
-#define SVM_EXIT_EXCEPTION_1B           0x5B
-#define SVM_EXIT_EXCEPTION_1C           0x5C
-#define SVM_EXIT_EXCEPTION_1D           0x5D
-#define SVM_EXIT_EXCEPTION_1E           0x5E
-#define SVM_EXIT_EXCEPTION_1F           0x5F
+# define SVM_EXIT_EXCEPTION_0            0x40
+# define SVM_EXIT_EXCEPTION_1            0x41
+# define SVM_EXIT_EXCEPTION_2            0x42
+# define SVM_EXIT_EXCEPTION_3            0x43
+# define SVM_EXIT_EXCEPTION_4            0x44
+# define SVM_EXIT_EXCEPTION_5            0x45
+# define SVM_EXIT_EXCEPTION_6            0x46
+# define SVM_EXIT_EXCEPTION_7            0x47
+# define SVM_EXIT_EXCEPTION_8            0x48
+# define SVM_EXIT_EXCEPTION_9            0x49
+# define SVM_EXIT_EXCEPTION_A            0x4A
+# define SVM_EXIT_EXCEPTION_B            0x4B
+# define SVM_EXIT_EXCEPTION_C            0x4C
+# define SVM_EXIT_EXCEPTION_D            0x4D
+# define SVM_EXIT_EXCEPTION_E            0x4E
+# define SVM_EXIT_EXCEPTION_F            0x4F
+# define SVM_EXIT_EXCEPTION_10           0x50
+# define SVM_EXIT_EXCEPTION_11           0x51
+# define SVM_EXIT_EXCEPTION_12           0x52
+# define SVM_EXIT_EXCEPTION_13           0x53
+# define SVM_EXIT_EXCEPTION_14           0x54
+# define SVM_EXIT_EXCEPTION_15           0x55
+# define SVM_EXIT_EXCEPTION_16           0x56
+# define SVM_EXIT_EXCEPTION_17           0x57
+# define SVM_EXIT_EXCEPTION_18           0x58
+# define SVM_EXIT_EXCEPTION_19           0x59
+# define SVM_EXIT_EXCEPTION_1A           0x5A
+# define SVM_EXIT_EXCEPTION_1B           0x5B
+# define SVM_EXIT_EXCEPTION_1C           0x5C
+# define SVM_EXIT_EXCEPTION_1D           0x5D
+# define SVM_EXIT_EXCEPTION_1E           0x5E
+# define SVM_EXIT_EXCEPTION_1F           0x5F
 /** Physical maskable interrupt. */
-#define SVM_EXIT_INTR                   0x60
+# define SVM_EXIT_INTR                   0x60
 /** Non-maskable interrupt. */
-#define SVM_EXIT_NMI                    0x61
+# define SVM_EXIT_NMI                    0x61
 /** System Management interrupt. */
-#define SVM_EXIT_SMI                    0x62
+# define SVM_EXIT_SMI                    0x62
 /** Physical INIT signal. */
-#define SVM_EXIT_INIT                   0x63
+# define SVM_EXIT_INIT                   0x63
 /** Virtual interrupt. */
-#define SVM_EXIT_VINTR                  0x64
+# define SVM_EXIT_VINTR                  0x64
 /** Write to CR0 that changed any bits other than CR0.TS or CR0.MP. */
-#define SVM_EXIT_CR0_SEL_WRITE          0x65
+# define SVM_EXIT_CR0_SEL_WRITE          0x65
 /** IDTR read. */
-#define SVM_EXIT_IDTR_READ              0x66
+# define SVM_EXIT_IDTR_READ              0x66
 /** GDTR read. */
-#define SVM_EXIT_GDTR_READ              0x67
+# define SVM_EXIT_GDTR_READ              0x67
 /** LDTR read. */
-#define SVM_EXIT_LDTR_READ              0x68
+# define SVM_EXIT_LDTR_READ              0x68
 /** TR read. */
-#define SVM_EXIT_TR_READ                0x69
+# define SVM_EXIT_TR_READ                0x69
 /** IDTR write. */
-#define SVM_EXIT_IDTR_WRITE             0x6A
+# define SVM_EXIT_IDTR_WRITE             0x6A
 /** GDTR write. */
-#define SVM_EXIT_GDTR_WRITE             0x6B
+# define SVM_EXIT_GDTR_WRITE             0x6B
 /** LDTR write. */
-#define SVM_EXIT_LDTR_WRITE             0x6C
+# define SVM_EXIT_LDTR_WRITE             0x6C
 /** TR write. */
-#define SVM_EXIT_TR_WRITE               0x6D
+# define SVM_EXIT_TR_WRITE               0x6D
 /** RDTSC instruction. */
-#define SVM_EXIT_RDTSC                  0x6E
+# define SVM_EXIT_RDTSC                  0x6E
 /** RDPMC instruction. */
-#define SVM_EXIT_RDPMC                  0x6F
+# define SVM_EXIT_RDPMC                  0x6F
 /** PUSHF instruction. */
-#define SVM_EXIT_PUSHF                  0x70
+# define SVM_EXIT_PUSHF                  0x70
 /** POPF instruction. */
-#define SVM_EXIT_POPF                   0x71
+# define SVM_EXIT_POPF                   0x71
 /** CPUID instruction. */
-#define SVM_EXIT_CPUID                  0x72
+# define SVM_EXIT_CPUID                  0x72
 /** RSM instruction. */
-#define SVM_EXIT_RSM                    0x73
+# define SVM_EXIT_RSM                    0x73
 /** IRET instruction. */
-#define SVM_EXIT_IRET                   0x74
+# define SVM_EXIT_IRET                   0x74
 /** software interrupt (INTn instructions). */
-#define SVM_EXIT_SWINT                  0x75
+# define SVM_EXIT_SWINT                  0x75
 /** INVD instruction. */
-#define SVM_EXIT_INVD                   0x76
+# define SVM_EXIT_INVD                   0x76
 /** PAUSE instruction. */
-#define SVM_EXIT_PAUSE                  0x77
+# define SVM_EXIT_PAUSE                  0x77
 /** HLT instruction. */
-#define SVM_EXIT_HLT                    0x78
+# define SVM_EXIT_HLT                    0x78
 /** INVLPG instructions. */
-#define SVM_EXIT_INVLPG                 0x79
+# define SVM_EXIT_INVLPG                 0x79
 /** INVLPGA instruction. */
-#define SVM_EXIT_INVLPGA                0x7A
+# define SVM_EXIT_INVLPGA                0x7A
 /** IN or OUT accessing protected port (the EXITINFO1 field provides more information). */
-#define SVM_EXIT_IOIO                   0x7B
+# define SVM_EXIT_IOIO                   0x7B
 /** RDMSR or WRMSR access to protected MSR. */
-#define SVM_EXIT_MSR                    0x7C
+# define SVM_EXIT_MSR                    0x7C
 /** task switch. */
-#define SVM_EXIT_TASK_SWITCH            0x7D
+# define SVM_EXIT_TASK_SWITCH            0x7D
 /** FP legacy handling enabled, and processor is frozen in an x87/mmx instruction waiting for an interrupt. */
-#define SVM_EXIT_FERR_FREEZE            0x7E
+# define SVM_EXIT_FERR_FREEZE            0x7E
 /** Shutdown. */
-#define SVM_EXIT_SHUTDOWN               0x7F
+# define SVM_EXIT_SHUTDOWN               0x7F
 /** VMRUN instruction. */
-#define SVM_EXIT_VMRUN                  0x80
+# define SVM_EXIT_VMRUN                  0x80
 /** VMMCALL instruction. */
-#define SVM_EXIT_VMMCALL                0x81
+# define SVM_EXIT_VMMCALL                0x81
 /** VMLOAD instruction. */
-#define SVM_EXIT_VMLOAD                 0x82
+# define SVM_EXIT_VMLOAD                 0x82
 /** VMSAVE instruction. */
-#define SVM_EXIT_VMSAVE                 0x83
+# define SVM_EXIT_VMSAVE                 0x83
 /** STGI instruction. */
-#define SVM_EXIT_STGI                   0x84
+# define SVM_EXIT_STGI                   0x84
 /** CLGI instruction. */
-#define SVM_EXIT_CLGI                   0x85
+# define SVM_EXIT_CLGI                   0x85
 /** SKINIT instruction. */
-#define SVM_EXIT_SKINIT                 0x86
+# define SVM_EXIT_SKINIT                 0x86
 /** RDTSCP instruction. */
-#define SVM_EXIT_RDTSCP                 0x87
+# define SVM_EXIT_RDTSCP                 0x87
 /** ICEBP instruction. */
-#define SVM_EXIT_ICEBP                  0x88
+# define SVM_EXIT_ICEBP                  0x88
 /** WBINVD instruction. */
-#define SVM_EXIT_WBINVD                 0x89
+# define SVM_EXIT_WBINVD                 0x89
 /** MONITOR instruction. */
-#define SVM_EXIT_MONITOR                0x8A
+# define SVM_EXIT_MONITOR                0x8A
 /** MWAIT instruction. */
-#define SVM_EXIT_MWAIT                  0x8B
+# define SVM_EXIT_MWAIT                  0x8B
 /** MWAIT instruction, when armed. */
-#define SVM_EXIT_MWAIT_ARMED            0x8C
+# define SVM_EXIT_MWAIT_ARMED            0x8C
 /** XSETBV instruction. */
-#define SVM_EXIT_XSETBV                 0x8D
+# define SVM_EXIT_XSETBV                 0x8D
 /** Nested paging: host-level page fault occurred (EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault). */
-#define SVM_EXIT_NPF                    0x400
+# define SVM_EXIT_NPF                    0x400
 /** AVIC: Virtual IPI delivery not completed. */
-#define SVM_EXIT_AVIC_INCOMPLETE_IPI    0x401
+# define SVM_EXIT_AVIC_INCOMPLETE_IPI    0x401
 /** AVIC: Attempted access by guest to a vAPIC register not handled by AVIC
  *  hardware. */
-#define SVM_EXIT_AVIC_NOACCEL           0x402
-
+# define SVM_EXIT_AVIC_NOACCEL           0x402
 /** The maximum possible exit value. */
-#define SVM_EXIT_MAX                    (SVM_EXIT_AVIC_NOACCEL)
-/** @} */
+# define SVM_EXIT_MAX                    (SVM_EXIT_AVIC_NOACCEL)
+/** @} */
+#endif /* !IN_REM_R3*/
 
 
@@ -283,11 +289,11 @@
  */
 /** Set to 1 if the task switch was caused by an IRET; else cleared to 0. */
-#define SVM_EXIT2_TASK_SWITCH_IRET            RT_BIT_64(36)
+# define SVM_EXIT2_TASK_SWITCH_IRET            RT_BIT_64(36)
 /** Set to 1 if the task switch was caused by a far jump; else cleared to 0. */
-#define SVM_EXIT2_TASK_SWITCH_JMP             RT_BIT_64(38)
+# define SVM_EXIT2_TASK_SWITCH_JMP             RT_BIT_64(38)
 /** Set to 1 if the task switch has an error code; else cleared to 0. */
-#define SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE  RT_BIT_64(44)
+# define SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE  RT_BIT_64(44)
 /** The value of EFLAGS.RF that would be saved in the outgoing TSS if the task switch were not intercepted. */
-#define SVM_EXIT2_TASK_SWITCH_EFLAGS_RF       RT_BIT_64(48)
+# define SVM_EXIT2_TASK_SWITCH_EFLAGS_RF       RT_BIT_64(48)
 /** @} */
 
@@ -296,7 +302,7 @@
  */
 /** The access was a read MSR. */
-#define SVM_EXIT1_MSR_READ                    0x0
+# define SVM_EXIT1_MSR_READ                    0x0
 /** The access was a write MSR. */
-#define SVM_EXIT1_MSR_WRITE                   0x1
+# define SVM_EXIT1_MSR_WRITE                   0x1
 /** @} */
 
Index: /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 66039)
+++ /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 66040)
@@ -179,57 +179,73 @@
  *
  * @returns Strict VBox status code (i.e. informational status codes too).
- *
  * @param   pVCpu               The cross context virtual CPU structure.
  * @param   pCtx                Pointer to the guest-CPU context.
- * @param   pVmcb               The VMCB of the nested-guest.
- * @param   pHostState          The host-state save area in the guest.
- */
-VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb, PSVMHOSTSTATE pHostState)
-{
-    Assert(pHostState);
-    Assert(pVmcb);
-
-    /*
-     * Save host state.
-     */
-    pHostState->es       = pCtx->es;
-    pHostState->cs       = pCtx->cs;
-    pHostState->ss       = pCtx->ss;
-    pHostState->ds       = pCtx->ds;
-    pHostState->gdtr     = pCtx->gdtr;
-    pHostState->idtr     = pCtx->idtr;
-    pHostState->uEferMsr = pCtx->msrEFER;
-    pHostState->uCr0     = pCtx->cr0;
-    pHostState->uCr3     = pCtx->cr3;
-    pHostState->uCr4     = pCtx->cr4;
-    pHostState->rflags   = pCtx->rflags;
-    pHostState->uRip     = pCtx->rip;
-    pHostState->uRsp     = pCtx->rsp;
-    pHostState->uRax     = pCtx->rax;
-
-    /*
-     * Load controls from VMCB.
-     */
-    pCtx->hwvirt.svm.u16InterceptRdCRx = pVmcb->ctrl.u16InterceptRdCRx;
-    pCtx->hwvirt.svm.u16InterceptWrCRx = pVmcb->ctrl.u16InterceptWrCRx;
-    pCtx->hwvirt.svm.u16InterceptRdDRx = pVmcb->ctrl.u16InterceptRdDRx;
-    pCtx->hwvirt.svm.u16InterceptWrDRx = pVmcb->ctrl.u16InterceptWrDRx;
-    pCtx->hwvirt.svm.u64InterceptCtrl  = pVmcb->ctrl.u64InterceptCtrl;
-    pCtx->hwvirt.svm.u32InterceptXcpt  = pVmcb->ctrl.u32InterceptXcpt;
-    if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN))
-    {
-        Log(("HMSvmVmRun: VMRUN instruction not intercepted -> #VMEXIT\n"));
-        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
-    }
-    if (!pVmcb->ctrl.TLBCtrl.n.u32ASID)
-    {
-        Log(("HMSvmVmRun: Guest ASID is invalid -> #VMEXIT\n"));
-        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
-    }
-
-
-    /** @todo the rest. */
-
-    return VERR_NOT_IMPLEMENTED;
+ * @param   GCPhysVmcb          Guest physical address of the VMCB to run.
+ */
+VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPHYS GCPhysVmcb)
+{
+    Assert(pVCpu);
+    Assert(pCtx);
+
+    /*
+     * Cache the physical address of the VMCB for #VMEXIT exceptions.
+     */
+    pCtx->hwvirt.svm.GCPhysVmcb = GCPhysVmcb;
+
+    SVMVMCB Vmcb;
+    PVM pVM = pVCpu->CTX_SUFF(pVM);
+    int rc = PGMPhysSimpleReadGCPhys(pVM, &Vmcb, GCPhysVmcb, X86_PAGE_4K_SIZE);
+    if (RT_SUCCESS(rc))
+    {
+        /*
+         * Save host state.
+         */
+        PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
+        pHostState->es       = pCtx->es;
+        pHostState->cs       = pCtx->cs;
+        pHostState->ss       = pCtx->ss;
+        pHostState->ds       = pCtx->ds;
+        pHostState->gdtr     = pCtx->gdtr;
+        pHostState->idtr     = pCtx->idtr;
+        pHostState->uEferMsr = pCtx->msrEFER;
+        pHostState->uCr0     = pCtx->cr0;
+        pHostState->uCr3     = pCtx->cr3;
+        pHostState->uCr4     = pCtx->cr4;
+        pHostState->rflags   = pCtx->rflags;
+        pHostState->uRip     = pCtx->rip;
+        pHostState->uRsp     = pCtx->rsp;
+        pHostState->uRax     = pCtx->rax;
+
+        /*
+         * Cache the VMCB controls.
+         */
+        pCtx->hwvirt.svm.VmcbCtrl = Vmcb.ctrl;
+
+        /*
+         * Validate the VMCB controls.
+         */
+        if (!CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_VMRUN))
+        {
+            Log(("HMSvmVmRun: VMRUN instruction not intercepted -> #VMEXIT\n"));
+            return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        }
+        if (    pCtx->hwvirt.svm.VmcbCtrl.NestedPaging.n.u1NestedPaging
+            && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fNestedPaging)
+        {
+            Log(("HMSvmVmRun: Nested paging not supported -> #VMEXIT\n"));
+            return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        }
+        if (!pCtx->hwvirt.svm.VmcbCtrl.TLBCtrl.n.u32ASID)
+        {
+            Log(("HMSvmVmRun: Guest ASID is invalid -> #VMEXIT\n"));
+            return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+        }
+
+        /** @todo the rest. */
+
+        return VERR_NOT_IMPLEMENTED;
+    }
+
+    return rc;
 }
 
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 66039)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 66040)
@@ -5900,29 +5900,5 @@
 #endif
 
-    void *pvVmcb;
-    PGMPAGEMAPLOCK PgLockVmcb;
-    VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_RW, &pvVmcb, &PgLockVmcb);
-    if (rcStrict == VINF_SUCCESS)
-    {
-        pCtx->hwvirt.svm.GCPhysNstGstVmcb = GCPhysVmcb;
-
-        RTGCPHYS GCPhysHostState = pCtx->hwvirt.svm.uMsrHSavePa;
-        /** @todo SVM does not validate the host-state area beyond checking the
-         *        alignment and range of the physical address. Nothing to prevent users
-         *        from using MMIO or other weird stuff in which case anything might
-         *        happen. */
-        void *pvHostState;
-        PGMPAGEMAPLOCK PgLockHostState;
-        rcStrict = iemMemPageMap(pVCpu, GCPhysHostState, IEM_ACCESS_DATA_RW, &pvHostState, &PgLockHostState);
-        if (rcStrict == VINF_SUCCESS)
-        {
-            PSVMHOSTSTATE pHostState = (PSVMHOSTSTATE)pvHostState;
-            PSVMVMCB      pVmcb      = (PSVMVMCB)pvVmcb;
-            rcStrict = HMSvmVmrun(pVCpu, pCtx, pVmcb, pHostState);
-
-            iemMemPageUnmap(pVCpu, GCPhysHostState, IEM_ACCESS_DATA_RW, pvHostState, &PgLockHostState);
-        }
-        iemMemPageUnmap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_RW, pvVmcb, &PgLockVmcb);
-    }
+    rcStrict = HMSvmVmrun(pVCpu, pCtx, );
     RT_NOREF(cbInstr);
     return rcStrict;
Index: /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp	(revision 66039)
+++ /trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp	(revision 66040)
@@ -1680,4 +1680,11 @@
             pFeatures->fXop             = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_XOP);
             pFeatures->fSvm             = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM);
+            if (pFeatures->fSvm)
+            {
+                PCCPUMCPUIDLEAF pSvmLeaf = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 0x8000000a);
+                AssertLogRelReturn(pSvmLeaf, VERR_CPUM_IPE_1);
+                pFeatures->svm.feat.u   = pSvmLeaf->uEdx;
+                pFeatures->svm.uMaxAsid = pSvmLeaf->uEbx;
+            }
         }
 
@@ -3363,5 +3370,5 @@
         pSvmFeatureLeaf->uEbx = 0x8000;     /** @todo figure out virtual NASID. */
         pSvmFeatureLeaf->uEcx = 0;
-        pSvmFeatureLeaf->uEdx = 0; /** @todo Support SVM features */
+        pSvmFeatureLeaf->uEdx = 0;          /** @todo Support SVM features */
     }
     else
Index: /trunk/src/VBox/VMM/include/CPUMInternal.mac
===================================================================
--- /trunk/src/VBox/VMM/include/CPUMInternal.mac	(revision 66039)
+++ /trunk/src/VBox/VMM/include/CPUMInternal.mac	(revision 66040)
@@ -231,14 +231,9 @@
     .Guest.abPadding          resb    12
 %endif
-    .Guest.hwvirt.svm.uMsrHSavePa         resq    1
-    .Guest.hwvirt.svm.u64InterceptCtrl    resq    1
-    .Guest.hwvirt.svm.u32InterceptXcpt    resd    1
-    .Guest.hwvirt.svm.u16InterceptRdCRx   resw    1
-    .Guest.hwvirt.svm.u16InterceptWrCRx   resw    1
-    .Guest.hwvirt.svm.u16InterceptRdDRx   resw    1
-    .Guest.hwvirt.svm.u16InterceptWrDRx   resw    1
-    .Guest.hwvirt.svm.fGif                resb    1
-    .Guest.hwvirt.svm.abPadding           resb    3
-    .Guest.hwvirt.svm.GCPhysNstGstVmcb    resq    1
+    .Guest.hwvirt.svm.uMsrHSavePa    resq    1
+    .Guest.hwvirt.svm.GCPhysVmcb     resq    1
+    .Guest.hwvirt.svm.VmcbCtrl       resb  256
+    .Guest.hwvirt.svm.HostState      resb  184
+    .Guest.hwvirt.svm.fGif           resb    1
     alignb 64
 
@@ -504,14 +499,9 @@
     .Hyper.abPadding          resb    12
 %endif
-    .Hyper.hwvirt.svm.uMsrHSavePa         resq    1
-    .Hyper.hwvirt.svm.u64InterceptCtrl    resq    1
-    .Hyper.hwvirt.svm.u32InterceptXcpt    resd    1
-    .Hyper.hwvirt.svm.u16InterceptRdCRx   resw    1
-    .Hyper.hwvirt.svm.u16InterceptWrCRx   resw    1
-    .Hyper.hwvirt.svm.u16InterceptRdDRx   resw    1
-    .Hyper.hwvirt.svm.u16InterceptWrDRx   resw    1
-    .Hyper.hwvirt.svm.fGif                resb    1
-    .Hyper.hwvirt.svm.abPadding           resb    3
-    .Hyper.hwvirt.svm.GCPhysNstGstVmcb    resq    1
+    .Hyper.hwvirt.svm.uMsrHSavePa    resq    1
+    .Hyper.hwvirt.svm.GCPhysVmcb     resq    1
+    .Hyper.hwvirt.svm.VmcbCtrl       resb  256
+    .Hyper.hwvirt.svm.HostState      resb  184
+    .Hyper.hwvirt.svm.fGif           resb    1
     alignb 64
 
Index: /trunk/src/VBox/VMM/testcase/tstVMStruct.h
===================================================================
--- /trunk/src/VBox/VMM/testcase/tstVMStruct.h	(revision 66039)
+++ /trunk/src/VBox/VMM/testcase/tstVMStruct.h	(revision 66040)
@@ -133,4 +133,7 @@
     GEN_CHECK_OFF(CPUMCTX, hwvirt);
     GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.uMsrHSavePa);
+    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.GCPhysVmcb);
+    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.VmcbCtrl);
+    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.HostState);
     GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.fGif);
     /** @todo add rest of hwvirt fields when code is more
