Index: /trunk/include/VBox/vmm/cpum.h
===================================================================
--- /trunk/include/VBox/vmm/cpum.h	(revision 65932)
+++ /trunk/include/VBox/vmm/cpum.h	(revision 65933)
@@ -1162,5 +1162,5 @@
  *
  * @returns true if in real mode, otherwise false.
- * @param   pCtx    Current CPU context
+ * @param   pCtx    Current CPU context.
  */
 DECLINLINE(bool) CPUMIsGuestInRealModeEx(PCPUMCTX pCtx)
@@ -1173,5 +1173,5 @@
  *
  * @returns @c true if it is, @c false if not.
- * @param   pCtx    Current CPU context
+ * @param   pCtx    Current CPU context.
  */
 DECLINLINE(bool) CPUMIsGuestInRealOrV86ModeEx(PCPUMCTX pCtx)
@@ -1185,5 +1185,5 @@
  *
  * @returns @c true if it is, @c false if not.
- * @param   pCtx    Current CPU context
+ * @param   pCtx    Current CPU context.
  */
 DECLINLINE(bool) CPUMIsGuestInV86ModeEx(PCPUMCTX pCtx)
@@ -1196,5 +1196,5 @@
  *
  * @returns true if in paged protected mode, otherwise false.
- * @param   pCtx    Current CPU context
+ * @param   pCtx    Current CPU context.
  */
 DECLINLINE(bool) CPUMIsGuestInPagedProtectedModeEx(PCPUMCTX pCtx)
@@ -1207,5 +1207,5 @@
  *
  * @returns true if in long mode, otherwise false.
- * @param   pCtx    Current CPU context
+ * @param   pCtx    Current CPU context.
  */
 DECLINLINE(bool) CPUMIsGuestInLongModeEx(PCPUMCTX pCtx)
@@ -1220,5 +1220,5 @@
  *
  * @returns true if in 64 bits protected mode, otherwise false.
- * @param   pCtx    Current CPU context
+ * @param   pCtx    Current CPU context.
  */
 DECLINLINE(bool) CPUMIsGuestIn64BitCodeEx(PCPUMCTX pCtx)
@@ -1235,5 +1235,5 @@
  *
  * @returns true if paging is enabled, otherwise false.
- * @param   pCtx    Current CPU context
+ * @param   pCtx    Current CPU context.
  */
 DECLINLINE(bool) CPUMIsGuestPagingEnabledEx(PCPUMCTX pCtx)
@@ -1246,5 +1246,5 @@
  *
  * @returns true if in PAE mode, otherwise false.
- * @param   pCtx    Current CPU context
+ * @param   pCtx    Current CPU context.
  */
 DECLINLINE(bool) CPUMIsGuestInPAEModeEx(PCPUMCTX pCtx)
@@ -1255,4 +1255,15 @@
             && CPUMIsGuestPagingEnabledEx(pCtx)
             && !(pCtx->msrEFER & MSR_K6_EFER_LMA));
+}
+
+/**
+ * Tests is if the guest has AMD SVM enabled or not.
+ *
+ * @returns true if SMV is enabled, otherwise false.
+ * @param   pCtx    Current CPU context.
+ */
+DECLINLINE(bool) CPUMIsGuestSvmEnabled(PCPUMCTX pCtx)
+{
+    return RT_BOOL(pCtx->msrEFER & MSR_K6_EFER_SVME);
 }
 
Index: /trunk/include/VBox/vmm/hm_svm.h
===================================================================
--- /trunk/include/VBox/vmm/hm_svm.h	(revision 65932)
+++ /trunk/include/VBox/vmm/hm_svm.h	(revision 65933)
@@ -805,4 +805,6 @@
 /** Pointer to the SVMVMCB structure. */
 typedef SVMVMCB *PSVMVMCB;
+/** Pointer to a const SVMVMCB structure. */
+typedef const SVMVMCB *PCSVMVMCB;
 AssertCompileMemberOffset(SVMVMCB, ctrl, 0x00);
 AssertCompileMemberOffset(SVMVMCB, ctrl.u16InterceptRdCRx, 0x00);
@@ -890,4 +892,61 @@
 #endif /* IN_RING0 */
 
+/** 
+ * Segment attribute conversion between CPU and AMD-V VMCB format. 
+ *
+ * The CPU format of the segment attribute is described in X86DESCATTRBITS
+ * which is 16-bits (i.e. includes 4 bits of the segment limit).
+ *
+ * The AMD-V VMCB format the segment attribute is compact 12-bits (strictly
+ * only the attribute bits and nothing else). Upper 4-bits are unused.
+ */
+#define HMSVM_CPU_2_VMCB_SEG_ATTR(a)       ( ((a) & 0xff) | (((a) & 0xf000) >> 4) )
+#define HMSVM_VMCB_2_CPU_SEG_ATTR(a)       ( ((a) & 0xff) | (((a) & 0x0f00) << 4) )
+
+/** @def HMSVM_SEG_REG_COPY_TO_VMCB
+ * Copies the specified segment register to a VMCB from a virtual CPU context.
+ *  
+ * @param   a_pCtx      The virtual-CPU context. 
+ * @param   a_pVmcb     The VMCB. 
+ * @param   REG         The segment register in the VMCB struct. (CS, DS, FS 
+ *                      etc.)
+ * @param   reg         The segment register in the virtual CPU struct (cs, ds, 
+ *                      fs etc.)
+ */
+#define HMSVM_SEG_REG_COPY_TO_VMCB(a_pCtx, a_pVmcb, REG, reg) \
+    do \
+    { \
+        Assert((a_pCtx)->reg.fFlags & CPUMSELREG_FLAGS_VALID);  \
+        Assert((a_pCtx)->reg.ValidSel == (a_pCtx)->reg.Sel);    \
+        (a_pVmcb)->guest.REG.u16Sel    = (a_pCtx)->reg.Sel;      \
+        (a_pVmcb)->guest.REG.u32Limit  = (a_pCtx)->reg.u32Limit; \
+        (a_pVmcb)->guest.REG.u64Base   = (a_pCtx)->reg.u64Base;  \
+        (a_pVmcb)->guest.REG.u16Attr   = HMSVM_CPU_2_VMCB_SEG_ATTR((a_pCtx)->reg.Attr.u); \
+    } while (0)
+
+/** @def HMSVM_SEG_REG_COPY_TO_VMCB
+ * Copies the specified segment register from the VMCB to a virtual CPU 
+ * context. 
+ *  
+ * @param   a_pCtx      The virtual-CPU context. 
+ * @param   a_pVmcb     The VMCB. 
+ * @param   REG         The segment register in the VMCB struct. (CS, DS, FS 
+ *                      etc.)
+ * @param   reg         The segment register in the virtual CPU struct (cs, ds, 
+ *                      fs etc.)
+ */
+#define HMSVM_SEG_REG_COPY_FROM_VMCB(a_pCtx, a_pVmcb, REG, reg) \
+    do \
+    { \
+        (a_pCtx)->reg.Sel       = (a_pVmcb)->guest.REG.u16Sel;   \
+        (a_pCtx)->reg.ValidSel  = (a_pVmcb)->guest.REG.u16Sel;   \
+        (a_pCtx)->reg.fFlags    = CPUMSELREG_FLAGS_VALID;    \
+        (a_pCtx)->reg.u32Limit  = (a_pVmcb)->guest.REG.u32Limit; \
+        (a_pCtx)->reg.u64Base   = (a_pVmcb)->guest.REG.u64Base;  \
+        (a_pCtx)->reg.Attr.u    = HMSVM_VMCB_2_CPU_SEG_ATTR((a_pVmcb)->guest.REG.u16Attr); \
+    } while (0)
+/** @} */
+
+
 /** @} */
 
Index: /trunk/include/VBox/vmm/iem.h
===================================================================
--- /trunk/include/VBox/vmm/iem.h	(revision 65932)
+++ /trunk/include/VBox/vmm/iem.h	(revision 65933)
@@ -135,4 +135,6 @@
 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr);
 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr);
+VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr);
+VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr);
 #endif
 /** @}  */
Index: /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 65932)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAll.cpp	(revision 65933)
@@ -367,5 +367,32 @@
 
 #ifdef VBOX_WITH_NESTED_HWVIRT
-/** 
+/**
+ * Check the common SVM instruction preconditions.
+ */
+#define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
+    do { \
+        if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
+        { \
+            Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
+            return iemRaiseUndefinedOpcode(pVCpu); \
+        } \
+        if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
+        { \
+            Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
+            return iemRaiseUndefinedOpcode(pVCpu); \
+        } \
+        if (pVCpu->iem.s.uCpl != 0) \
+        { \
+            Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
+            return iemRaiseGeneralProtectionFault0(pVCpu); \
+        } \
+    } while (0)
+
+/**
+ * Check if an SVM is enabled.
+ */
+#define IEM_IS_SVM_ENABLED(a_pVCpu)                         (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
+
+/**
  * Check if an SVM control/instruction intercept is set.
  */ 
@@ -11673,6 +11700,18 @@
     { \
         if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
-            return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
+            return IEMOP_RAISE_INVALID_OPCODE(); \
     } while (0)
+
+#if 0
+#ifdef VBOX_WITH_NESTED_HWVIRT
+/** The instruction raises an \#UD when SVM is not enabled. */
+#define IEMOP_HLP_NEEDS_SVM_ENABLED() \
+    do \
+    { \
+        if (IEM_IS_SVM_ENABLED(pVCpu)) \
+            return IEMOP_RAISE_INVALID_OPCODE(); \
+    } while (0)
+#endif
+#endif
 
 /** The instruction is not available in 64-bit mode, throw \#UD if we're in
@@ -14948,4 +14987,40 @@
     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
 }
+
+
+/**
+ * Interface for HM and EM to emulate the VMLOAD instruction.
+ *
+ * @returns Strict VBox status code.
+ * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
+ * @param   cbInstr     The instruction length in bytes.
+ * @thread  EMT(pVCpu)
+ */
+VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
+{
+    IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
+
+    iemInitExec(pVCpu, false /*fBypassHandlers*/);
+    VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
+    return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
+}
+
+
+/**
+ * Interface for HM and EM to emulate the VMSAVE instruction.
+ *
+ * @returns Strict VBox status code.
+ * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
+ * @param   cbInstr     The instruction length in bytes.
+ * @thread  EMT(pVCpu)
+ */
+VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
+{
+    IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
+
+    iemInitExec(pVCpu, false /*fBypassHandlers*/);
+    VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
+    return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
+}
 #endif /* VBOX_WITH_NESTED_HWVIRT */
 
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 65932)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 65933)
@@ -5877,4 +5877,106 @@
 #ifdef VBOX_WITH_NESTED_HWVIRT
 /**
+ * Implements 'VMLOAD'.
+ */
+IEM_CIMPL_DEF_0(iemCImpl_vmload)
+{
+    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
+    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload);
+#ifndef IN_RC
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
+    {
+        Log(("vmload: Guest intercept -> VMexit\n"));
+        HMNstGstSvmVmExit(pVCpu, SVM_EXIT_VMLOAD);
+        return VINF_EM_RESCHEDULE;
+    }
+#endif
+
+    RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
+    if (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
+    {
+        Log(("vmload: VMCB physaddr (%#RGp) not 4K aligned -> #GP(0)\n", GCPhysVmcb));
+        return iemRaiseGeneralProtectionFault0(pVCpu);
+    }
+
+    void *pvVmcb;
+    PGMPAGEMAPLOCK PgLockVmcb;
+    VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_R, &pvVmcb, &PgLockVmcb);
+    if (rcStrict == VINF_SUCCESS)
+    {
+        PCSVMVMCB pVmcb = (PCSVMVMCB)pvVmcb;
+        HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcb, FS, fs);
+        HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcb, GS, gs);
+        HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcb, TR, tr);
+        HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcb, LDTR, ldtr);
+
+        pCtx->msrKERNELGSBASE = pVmcb->guest.u64KernelGSBase;
+        pCtx->msrSTAR         = pVmcb->guest.u64STAR;
+        pCtx->msrLSTAR        = pVmcb->guest.u64LSTAR;
+        pCtx->msrCSTAR        = pVmcb->guest.u64CSTAR;
+        pCtx->msrSFMASK       = pVmcb->guest.u64SFMASK;
+
+        pCtx->SysEnter.cs     = pVmcb->guest.u64SysEnterCS;
+        pCtx->SysEnter.esp    = pVmcb->guest.u64SysEnterESP;
+        pCtx->SysEnter.eip    = pVmcb->guest.u64SysEnterEIP;
+
+        iemMemPageUnmap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_R, pvVmcb, &PgLockVmcb);
+        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+    }
+    return rcStrict;
+}
+
+
+/**
+ * Implements 'VMSAVE'.
+ */
+IEM_CIMPL_DEF_0(iemCImpl_vmsave)
+{
+    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
+    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmsave);
+#ifndef IN_RC
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
+    {
+        Log(("vmsave: Guest intercept -> VMexit\n"));
+        HMNstGstSvmVmExit(pVCpu, SVM_EXIT_VMSAVE);
+        return VINF_EM_RESCHEDULE;
+    }
+#endif
+
+    RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
+    if (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
+    {
+        Log(("vmsave: VMCB physaddr (%#RGp) not 4K aligned -> #GP(0)\n", GCPhysVmcb));
+        return iemRaiseGeneralProtectionFault0(pVCpu);
+    }
+
+    void *pvVmcb;
+    PGMPAGEMAPLOCK PgLockVmcb;
+    VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_RW, &pvVmcb, &PgLockVmcb);
+    if (rcStrict == VINF_SUCCESS)
+    {
+        PSVMVMCB pVmcb = (PSVMVMCB)pvVmcb;
+        HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, FS, fs);
+        HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, GS, gs);
+        HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, TR, tr);
+        HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, LDTR, ldtr);
+
+        pVmcb->guest.u64KernelGSBase  = pCtx->msrKERNELGSBASE;
+        pVmcb->guest.u64STAR          = pCtx->msrSTAR;
+        pVmcb->guest.u64LSTAR         = pCtx->msrLSTAR;
+        pVmcb->guest.u64CSTAR         = pCtx->msrCSTAR;
+        pVmcb->guest.u64SFMASK        = pCtx->msrSFMASK;
+
+        pVmcb->guest.u64SysEnterCS    = pCtx->SysEnter.cs;
+        pVmcb->guest.u64SysEnterESP   = pCtx->SysEnter.esp;
+        pVmcb->guest.u64SysEnterEIP   = pCtx->SysEnter.eip;
+
+        iemMemPageUnmap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_R, pvVmcb, &PgLockVmcb);
+        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+    }
+    return rcStrict;
+}
+
+
+/**
  * Implements 'CLGI'.
  */
@@ -5882,23 +5984,9 @@
 {
     PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
-    if (!(pCtx->msrEFER & MSR_K6_EFER_SVME))
-    {
-        Log2(("clgi: EFER.SVME not enabled -> #UD\n"));
-        return iemRaiseUndefinedOpcode(pVCpu);
-    }
-    if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
-    {
-        Log2(("clgi: Real or v8086 mode -> #UD\n"));
-        return iemRaiseUndefinedOpcode(pVCpu);
-    }
-    if (pVCpu->iem.s.uCpl != 0)
-    {
-        Log2(("clgi: CPL != 0 -> #GP(0)\n"));
-        return iemRaiseGeneralProtectionFault0(pVCpu);
-    }
+    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, clgi);
 #ifndef IN_RC
     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
     {
-        Log2(("clgi: Guest intercept -> VMexit\n"));
+        Log(("clgi: Guest intercept -> VMexit\n"));
         HMNstGstSvmVmExit(pVCpu, SVM_EXIT_CLGI);
         return VINF_EM_RESCHEDULE;
@@ -5918,19 +6006,5 @@
 {
     PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
-    if (!(pCtx->msrEFER & MSR_K6_EFER_SVME))
-    {
-        Log2(("stgi: EFER.SVME not enabled -> #UD\n"));
-        return iemRaiseUndefinedOpcode(pVCpu);
-    }
-    if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
-    {
-        Log2(("stgi: Real or v8086 mode -> #UD\n"));
-        return iemRaiseUndefinedOpcode(pVCpu);
-    }
-    if (pVCpu->iem.s.uCpl != 0)
-    {
-        Log2(("stgi: CPL != 0 -> #GP(0)\n"));
-        return iemRaiseGeneralProtectionFault0(pVCpu);
-    }
+    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, stgi);
 #ifndef IN_RC
     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI))
@@ -5943,4 +6017,31 @@
 
     pCtx->hwvirt.svm.fGif = 1;
+    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Implements 'INVLPGA'.
+ */
+IEM_CIMPL_DEF_0(iemCImpl_invlpga)
+{
+    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
+    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
+#ifndef IN_RC
+    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
+    {
+        Log2(("invlpga: Guest intercept -> VMexit\n"));
+        HMNstGstSvmVmExit(pVCpu, SVM_EXIT_INVLPGA);
+        return VINF_EM_RESCHEDULE;
+    }
+#endif
+
+    RTGCPTR  const GCPtrPage = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
+    /** @todo PGM needs virtual ASID support. */
+#if 0
+    uint32_t const uAsid     = pCtx->ecx;
+#endif
+    PGMInvalidatePage(pVCpu, GCPtrPage);
     iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     return VINF_SUCCESS;
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h	(revision 65932)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h	(revision 65933)
@@ -445,4 +445,44 @@
 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmmcall);
 
+#ifdef VBOX_WITH_NESTED_HWVIRT
+/** Opcode 0x0f 0x01 0xda. */
+FNIEMOP_DEF(iemOp_Grp7_Amd_vmload)
+{
+    IEMOP_MNEMONIC(vmload, "vmload");
+    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmload);
+}
+
+
+/** Opcode 0x0f 0x01 0xdb. */
+FNIEMOP_DEF(iemOp_Grp7_Amd_vmsave)
+{
+    IEMOP_MNEMONIC(vmsave, "vmsave");
+    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmsave);
+}
+
+
+/** Opcode 0x0f 0x01 0xdc. */
+FNIEMOP_DEF(iemOp_Grp7_Amd_stgi)
+{
+    IEMOP_MNEMONIC(stgi, "stgi");
+    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stgi);
+}
+
+
+/** Opcode 0x0f 0x01 0xdd. */
+FNIEMOP_DEF(iemOp_Grp7_Amd_clgi)
+{
+    IEMOP_MNEMONIC(clgi, "clgi");
+    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clgi);
+}
+
+
+/** Opcode 0x0f 0x01 0xdf. */
+FNIEMOP_DEF(iemOp_Grp7_Amd_invlpga)
+{
+    IEMOP_MNEMONIC(invlpga, "invlpga");
+    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_invlpga);
+}
+#else
 /** Opcode 0x0f 0x01 0xda. */
 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmload);
@@ -451,19 +491,4 @@
 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmsave);
 
-#ifdef VBOX_WITH_NESTED_HWVIRT
-/** Opcode 0x0f 0x01 0xdc. */
-FNIEMOP_DEF(iemOp_Grp7_Amd_stgi)
-{
-    IEMOP_MNEMONIC(stgi, "stgi");
-    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stgi);
-}
-
-/** Opcode 0x0f 0x01 0xdd. */
-FNIEMOP_DEF(iemOp_Grp7_Amd_clgi)
-{
-    IEMOP_MNEMONIC(clgi, "clgi");
-    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clgi);
-}
-#else
 /** Opcode 0x0f 0x01 0xdc. */
 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_stgi);
@@ -471,11 +496,11 @@
 /** Opcode 0x0f 0x01 0xdd. */
 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_clgi);
+
+/** Opcode 0x0f 0x01 0xdf. */
+FNIEMOP_UD_STUB(iemOp_Grp7_Amd_invlpga);
 #endif /* VBOX_WITH_NESTED_HWVIRT */
 
 /** Opcode 0x0f 0x01 0xde. */
 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_skinit);
-
-/** Opcode 0x0f 0x01 0xdf. */
-FNIEMOP_UD_STUB(iemOp_Grp7_Amd_invlpga);
 
 /** Opcode 0x0f 0x01 /4. */
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 65932)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 65933)
@@ -62,42 +62,4 @@
  *  switch to a "static DECLCALLBACK(int)". */
 #define HMSVM_EXIT_DECL                 static int
-
-/** @name Segment attribute conversion between CPU and AMD-V VMCB format.
- *
- * The CPU format of the segment attribute is described in X86DESCATTRBITS
- * which is 16-bits (i.e. includes 4 bits of the segment limit).
- *
- * The AMD-V VMCB format the segment attribute is compact 12-bits (strictly
- * only the attribute bits and nothing else). Upper 4-bits are unused.
- *
- * @{ */
-#define HMSVM_CPU_2_VMCB_SEG_ATTR(a)       ( ((a) & 0xff) | (((a) & 0xf000) >> 4) )
-#define HMSVM_VMCB_2_CPU_SEG_ATTR(a)       ( ((a) & 0xff) | (((a) & 0x0f00) << 4) )
-/** @} */
-
-/** @name Macros for loading, storing segment registers to/from the VMCB.
- *  @{ */
-#define HMSVM_LOAD_SEG_REG(REG, reg) \
-    do \
-    { \
-        Assert(pCtx->reg.fFlags & CPUMSELREG_FLAGS_VALID); \
-        Assert(pCtx->reg.ValidSel == pCtx->reg.Sel); \
-        pVmcb->guest.REG.u16Sel     = pCtx->reg.Sel; \
-        pVmcb->guest.REG.u32Limit   = pCtx->reg.u32Limit; \
-        pVmcb->guest.REG.u64Base    = pCtx->reg.u64Base; \
-        pVmcb->guest.REG.u16Attr    = HMSVM_CPU_2_VMCB_SEG_ATTR(pCtx->reg.Attr.u); \
-    } while (0)
-
-#define HMSVM_SAVE_SEG_REG(REG, reg) \
-    do \
-    { \
-        pMixedCtx->reg.Sel       = pVmcb->guest.REG.u16Sel; \
-        pMixedCtx->reg.ValidSel  = pVmcb->guest.REG.u16Sel; \
-        pMixedCtx->reg.fFlags    = CPUMSELREG_FLAGS_VALID; \
-        pMixedCtx->reg.u32Limit  = pVmcb->guest.REG.u32Limit; \
-        pMixedCtx->reg.u64Base   = pVmcb->guest.REG.u64Base; \
-        pMixedCtx->reg.Attr.u    = HMSVM_VMCB_2_CPU_SEG_ATTR(pVmcb->guest.REG.u16Attr); \
-    } while (0)
-/** @} */
 
 /** Macro for checking and returning from the using function for
@@ -311,4 +273,6 @@
 static FNSVMEXITHANDLER hmR0SvmExitClgi;
 static FNSVMEXITHANDLER hmR0SvmExitStgi;
+static FNSVMEXITHANDLER hmR0SvmExitVmload;
+static FNSVMEXITHANDLER hmR0SvmExitVmsave;
 #endif
 /** @} */
@@ -1336,10 +1300,10 @@
     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))
     {
-        HMSVM_LOAD_SEG_REG(CS, cs);
-        HMSVM_LOAD_SEG_REG(SS, ss);
-        HMSVM_LOAD_SEG_REG(DS, ds);
-        HMSVM_LOAD_SEG_REG(ES, es);
-        HMSVM_LOAD_SEG_REG(FS, fs);
-        HMSVM_LOAD_SEG_REG(GS, gs);
+        HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, CS, cs);
+        HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, SS, ss);
+        HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, DS, ds);
+        HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, ES, es);
+        HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, FS, fs);
+        HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, GS, gs); 
 
         pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl;
@@ -1351,5 +1315,5 @@
     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))
     {
-        HMSVM_LOAD_SEG_REG(TR, tr);
+        HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, TR, tr);
         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);
     }
@@ -1358,5 +1322,5 @@
     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))
     {
-        HMSVM_LOAD_SEG_REG(LDTR, ldtr);
+        HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, LDTR, ldtr);
         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);
     }
@@ -1986,10 +1950,10 @@
      * Guest segment registers (includes FS, GS base MSRs for 64-bit guests).
      */
-    HMSVM_SAVE_SEG_REG(CS, cs);
-    HMSVM_SAVE_SEG_REG(SS, ss);
-    HMSVM_SAVE_SEG_REG(DS, ds);
-    HMSVM_SAVE_SEG_REG(ES, es);
-    HMSVM_SAVE_SEG_REG(FS, fs);
-    HMSVM_SAVE_SEG_REG(GS, gs);
+    HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, CS, cs);
+    HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, SS, ss);
+    HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, DS, ds);
+    HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, ES, es);
+    HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, FS, fs);
+    HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, GS, gs);
 
     /*
@@ -2041,5 +2005,5 @@
      * ASSUME that it's normally correct and that we're in 32-bit or 64-bit mode.
      */
-    HMSVM_SAVE_SEG_REG(TR, tr);
+    HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, TR, tr);
     if (pMixedCtx->tr.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
     {
@@ -2054,5 +2018,5 @@
      * Guest Descriptor-Table registers.
      */
-    HMSVM_SAVE_SEG_REG(LDTR, ldtr);
+    HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, LDTR, ldtr);
     pMixedCtx->gdtr.cbGdt = pVmcb->guest.GDTR.u32Limit;
     pMixedCtx->gdtr.pGdt  = pVmcb->guest.GDTR.u64Base;
@@ -3688,15 +3652,17 @@
 
 #ifdef VBOX_WITH_NESTED_HWVIRT
-                case SVM_EXIT_CLGI: return hmR0SvmExitClgi(pVCpu, pCtx, pSvmTransient);
-                case SVM_EXIT_STGI: return hmR0SvmExitStgi(pVCpu, pCtx, pSvmTransient);
+                case SVM_EXIT_CLGI:     return hmR0SvmExitClgi(pVCpu, pCtx, pSvmTransient);
+                case SVM_EXIT_STGI:     return hmR0SvmExitStgi(pVCpu, pCtx, pSvmTransient);
+                case SVM_EXIT_VMLOAD:   return hmR0SvmExitVmload(pVCpu, pCtx, pSvmTransient);
+                case SVM_EXIT_VMSAVE:   return hmR0SvmExitVmsave(pVCpu, pCtx, pSvmTransient);
 #else
                 case SVM_EXIT_CLGI:
                 case SVM_EXIT_STGI:
+                case SVM_EXIT_VMLOAD:
+                case SVM_EXIT_VMSAVE:
 #endif
                 case SVM_EXIT_INVLPGA:
                 case SVM_EXIT_RSM:
                 case SVM_EXIT_VMRUN:
-                case SVM_EXIT_VMLOAD:
-                case SVM_EXIT_VMSAVE:
                 case SVM_EXIT_SKINIT:
                     return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient);
@@ -5697,38 +5663,52 @@
 #ifdef VBOX_WITH_NESTED_HWVIRT
 /**
- * \#VMEXIT handler for RDPMC (SVM_EXIT_CLGI). Conditional 
- * \#VMEXIT. 
+ * \#VMEXIT handler for CLGI (SVM_EXIT_CLGI). Conditional \#VMEXIT.
  */
 HMSVM_EXIT_DECL hmR0SvmExitClgi(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
 {
     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
-    PVM pVM = pVCpu->CTX_SUFF(pVM);
-    if (pVM->cpum.ro.GuestFeatures.fSvm)
-    {
-        /** @todo Stat. */
-        /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClgi); */
-        VBOXSTRICTRC rcStrict = IEMExecDecodedClgi(pVCpu, 3);
-        return VBOXSTRICTRC_VAL(rcStrict);
-    }
-    return hmR0SvmExitXcptUD(pVCpu, pCtx, pSvmTransient);
-}
-
-
-/**
- * \#VMEXIT handler for RDPMC (SVM_EXIT_STGI). Conditional
- * \#VMEXIT.
+    /** @todo Stat. */
+    /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClgi); */
+    VBOXSTRICTRC rcStrict = IEMExecDecodedClgi(pVCpu, 3);
+    return VBOXSTRICTRC_VAL(rcStrict);
+}
+
+
+/**
+ * \#VMEXIT handler for STGI (SVM_EXIT_STGI). Conditional \#VMEXIT.
  */
 HMSVM_EXIT_DECL hmR0SvmExitStgi(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
 {
     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
-    PVM pVM = pVCpu->CTX_SUFF(pVM);
-    if (pVM->cpum.ro.GuestFeatures.fSvm)
-    {
-        /** @todo Stat. */
-        /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitStgi); */
-        VBOXSTRICTRC rcStrict = IEMExecDecodedStgi(pVCpu, 3);
-        return VBOXSTRICTRC_VAL(rcStrict);
-    }
-    return hmR0SvmExitXcptUD(pVCpu, pCtx, pSvmTransient);
+    /** @todo Stat. */
+    /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitStgi); */
+    VBOXSTRICTRC rcStrict = IEMExecDecodedStgi(pVCpu, 3);
+    return VBOXSTRICTRC_VAL(rcStrict);
+}
+
+
+/**
+ * \#VMEXIT handler for VMLOAD (SVM_EXIT_VMLOAD). Conditional \#VMEXIT.
+ */
+HMSVM_EXIT_DECL hmR0SvmExitVmload(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
+{
+    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
+    /** @todo Stat. */
+    /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmload); */
+    VBOXSTRICTRC rcStrict = IEMExecDecodedVmload(pVCpu, 3);
+    return VBOXSTRICTRC_VAL(rcStrict);
+}
+
+
+/**
+ * \#VMEXIT handler for VMSAVE (SVM_EXIT_VMSAVE). Conditional \#VMEXIT.
+ */
+HMSVM_EXIT_DECL hmR0SvmExitVmsave(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
+{
+    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
+    /** @todo Stat. */
+    /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmsave); */
+    VBOXSTRICTRC rcStrict = IEMExecDecodedVmsave(pVCpu, 3);
+    return VBOXSTRICTRC_VAL(rcStrict);
 }
 #endif /* VBOX_WITH_NESTED_HWVIRT */
