Index: /trunk/include/VBox/vmm/cpumctx.h
===================================================================
--- /trunk/include/VBox/vmm/cpumctx.h	(revision 66014)
+++ /trunk/include/VBox/vmm/cpumctx.h	(revision 66015)
@@ -719,4 +719,29 @@
 typedef const CPUMCPUID *PCCPUMCPUID;
 
+/**
+ * SVM Host-state area (Nested Hw.virt - VirtualBox's layout).
+ */
+typedef struct SVMHOSTSTATE
+{
+    uint64_t    uEferMsr;
+    uint64_t    uCr0;
+    uint64_t    uCr4;
+    uint64_t    uCr3;
+    uint64_t    uRip;
+    uint64_t    uRsp;
+    uint64_t    uRax;
+    X86RFLAGS   rflags;
+    CPUMSELREG  es;
+    CPUMSELREG  cs;
+    CPUMSELREG  ss;
+    CPUMSELREG  ds;
+    VBOXGDTR    gdtr;
+    VBOXIDTR    idtr;
+} SVMHOSTSTATE;
+/** Pointer to the SVMHOSTSTATE structure. */
+typedef SVMHOSTSTATE *PSVMHOSTSTATE;
+/** Pointer to a const SVMHOSTSTATE structure. */
+typedef const SVMHOSTSTATE *PCSVMHOSTSTATE;
+
 /** @}  */
 
Index: /trunk/include/VBox/vmm/hm.h
===================================================================
--- /trunk/include/VBox/vmm/hm.h	(revision 66014)
+++ /trunk/include/VBox/vmm/hm.h	(revision 66015)
@@ -148,7 +148,5 @@
 VMM_INT_DECL(void)              HMHypercallsDisable(PVMCPU pVCpu);
 
-VMM_INT_DECL(VBOXSTRICTRC)      HMSvmVmmcall(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfRipUpdated);
-VMM_INT_DECL(VBOXSTRICTRC)      HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx);
-VMM_INT_DECL(VBOXSTRICTRC)      HMSvmNstGstVmExit(PVMCPU pVCpu, PCPUMCTX pCtx, int64_t iExitCode, uint64_t uExitInfo1,
+VMM_INT_DECL(VBOXSTRICTRC)      HMSvmNstGstVmExit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,
                                                   uint64_t uExitInfo2);
 VMM_INT_DECL(void)              HMVmxNstGstVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason);
Index: /trunk/include/VBox/vmm/hm_svm.h
===================================================================
--- /trunk/include/VBox/vmm/hm_svm.h	(revision 66014)
+++ /trunk/include/VBox/vmm/hm_svm.h	(revision 66015)
@@ -29,4 +29,5 @@
 #include <VBox/types.h>
 #include <VBox/err.h>
+#include <VBox/vmm/cpumctx.h>
 #include <iprt/assert.h>
 #include <iprt/asm.h>
@@ -72,5 +73,5 @@
  */
 /** Invalid guest state in VMCB. */
-#define SVM_EXIT_INVALID                (-1)
+#define SVM_EXIT_INVALID                (uint64_t)(-1)
 /** Read from CR0-CR15. */
 #define SVM_EXIT_READ_CR0               0x0
@@ -646,5 +647,5 @@
     uint16_t    u16InterceptWrDRx;
     /** Offset 0x08 - Intercept exception vectors 0-31. */
-    uint32_t    u32InterceptException;
+    uint32_t    u32InterceptXcpt;
     /** Offset 0x0c - Intercept control. */
     uint64_t    u64InterceptCtrl;
@@ -714,5 +715,5 @@
 AssertCompileMemberOffset(SVMVMCBCTRL, u16InterceptRdDRx,       0x04);
 AssertCompileMemberOffset(SVMVMCBCTRL, u16InterceptWrDRx,       0x06);
-AssertCompileMemberOffset(SVMVMCBCTRL, u32InterceptException,   0x08);
+AssertCompileMemberOffset(SVMVMCBCTRL, u32InterceptXcpt,        0x08);
 AssertCompileMemberOffset(SVMVMCBCTRL, u64InterceptCtrl,        0x0c);
 AssertCompileMemberOffset(SVMVMCBCTRL, u8Reserved,              0x14);
@@ -969,4 +970,7 @@
 
 
+VMM_INT_DECL(VBOXSTRICTRC)      HMSvmVmmcall(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfRipUpdated);
+VMM_INT_DECL(VBOXSTRICTRC)      HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb, PSVMHOSTSTATE pHostState);
+
 /** @} */
 
Index: /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 66014)
+++ /trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp	(revision 66015)
@@ -182,8 +182,52 @@
  * @param   pVCpu               The cross context virtual CPU structure.
  * @param   pCtx                Pointer to the guest-CPU context.
- */
-VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx)
-{
-    RT_NOREF2(pVCpu, pCtx);
+ * @param   pVmcb               The VMCB of the nested-guest.
+ * @param   pHostState          The host-state save area in the guest.
+ */
+VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb, PSVMHOSTSTATE pHostState)
+{
+    Assert(pHostState);
+    Assert(pVmcb);
+
+    /*
+     * Save host state.
+     */
+    pHostState->es       = pCtx->es;
+    pHostState->cs       = pCtx->cs;
+    pHostState->ss       = pCtx->ss;
+    pHostState->ds       = pCtx->ds;
+    pHostState->gdtr     = pCtx->gdtr;
+    pHostState->idtr     = pCtx->idtr;
+    pHostState->uEferMsr = pCtx->msrEFER;
+    pHostState->uCr0     = pCtx->cr0;
+    pHostState->uCr3     = pCtx->cr3;
+    pHostState->uCr4     = pCtx->cr4;
+    pHostState->rflags   = pCtx->rflags;
+    pHostState->uRip     = pCtx->rip;
+    pHostState->uRsp     = pCtx->rsp;
+    pHostState->uRax     = pCtx->rax;
+
+    /*
+     * Load controls from VMCB.
+     */
+    pCtx->hwvirt.svm.u16InterceptRdCRx = pVmcb->ctrl.u16InterceptRdCRx;
+    pCtx->hwvirt.svm.u16InterceptWrCRx = pVmcb->ctrl.u16InterceptWrCRx;
+    pCtx->hwvirt.svm.u16InterceptRdDRx = pVmcb->ctrl.u16InterceptRdDRx;
+    pCtx->hwvirt.svm.u16InterceptWrDRx = pVmcb->ctrl.u16InterceptWrDRx;
+    pCtx->hwvirt.svm.u64InterceptCtrl  = pVmcb->ctrl.u64InterceptCtrl;
+    pCtx->hwvirt.svm.u32InterceptXcpt  = pVmcb->ctrl.u32InterceptXcpt;
+    if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN))
+    {
+        Log(("HMSvmVmRun: VMRUN instruction not intercepted -> #VMEXIT\n"));
+        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+    if (!pVmcb->ctrl.TLBCtrl.n.u32ASID)
+    {
+        Log(("HMSvmVmRun: Guest ASID is invalid -> #VMEXIT\n"));
+        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
+    }
+
+
+    /** @todo the rest. */
 
     return VERR_NOT_IMPLEMENTED;
@@ -197,13 +241,13 @@
  * @param   pVCpu       The cross context virtual CPU structure.
  * @param   pCtx        The guest-CPU context.
- * @param   iExitCode   The exit reason.
+ * @param   uExitCode   The exit code.
  * @param   uExitInfo1  The exit info. 1 field.
  * @param   uExitInfo2  The exit info. 2 field.
  */
-VMM_INT_DECL(VBOXSTRICTRC) HMSvmNstGstVmExit(PVMCPU pVCpu, PCPUMCTX pCtx, int64_t iExitCode, uint64_t uExitInfo1,
+VMM_INT_DECL(VBOXSTRICTRC) HMSvmNstGstVmExit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,
                                              uint64_t uExitInfo2)
 {
     if (   CPUMIsGuestInNestedHwVirtMode(pCtx)
-        || iExitCode == SVM_EXIT_INVALID)
+        || uExitCode == SVM_EXIT_INVALID)
     {
         RT_NOREF(pVCpu);
@@ -211,5 +255,5 @@
         pCtx->hwvirt.svm.fGif = 0;
 
-        /** @todo implement VMEXIT. */
+        /** @todo implement \#VMEXIT. */
 
         return VINF_SUCCESS;
@@ -217,5 +261,5 @@
     else
     {
-        Log(("HMNstGstSvmVmExit: Not in SVM guest mode! uExitCode=%RI64 uExitInfo1=%RU64 uExitInfo2=%RU64\n", iExitCode,
+        Log(("HMNstGstSvmVmExit: Not in SVM guest mode! uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitCode,
              uExitInfo1, uExitInfo2));
         RT_NOREF2(uExitInfo1, uExitInfo2);
@@ -225,16 +269,2 @@
 }
 
-
-/**
- * Peforms the functions of a VMRUN instruction.
- *
- * @returns Strict VBox status code.
- * @param   pVCpu       The cross context virtual CPU structure.
- * @param   pCtx        The guest-CPU context.
- */
-VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmRun(PVMCPU pVCpu, PCPUMCTX pCtx)
-{
-    RT_NOREF2(pVCpu, pCtx);
-    return VERR_NOT_IMPLEMENTED;
-}
-
Index: /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
===================================================================
--- /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 66014)
+++ /trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h	(revision 66015)
@@ -5895,17 +5895,34 @@
     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMRUN))
     {
-        Log(("vmrun: Guest intercept -> VMexit\n"));
+        Log(("vmrun: Guest intercept -> #VMEXIT\n"));
         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
 #endif
 
-    /** @todo think - I probably need to map both the HSAVE area page and the
-     *        guest VMCB via iemMemPageMap here and do the copying? */
-    pCtx->hwvirt.svm.GCPhysNstGstVmcb = GCPhysVmcb;
     void *pvVmcb;
     PGMPAGEMAPLOCK PgLockVmcb;
     VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_RW, &pvVmcb, &PgLockVmcb);
     if (rcStrict == VINF_SUCCESS)
-        return HMSvmVmrun(pVCpu, pCtx);
+    {
+        pCtx->hwvirt.svm.GCPhysNstGstVmcb = GCPhysVmcb;
+
+        RTGCPHYS GCPhysHostState = pCtx->hwvirt.svm.uMsrHSavePa;
+        /** @todo SVM does not validate the host-state area beyond checking the
+         *        alignment and range of the physical address. Nothing to prevent users
+         *        from using MMIO or other weird stuff in which case anything might
+         *        happen. */
+        void *pvHostState;
+        PGMPAGEMAPLOCK PgLockHostState;
+        rcStrict = iemMemPageMap(pVCpu, GCPhysHostState, IEM_ACCESS_DATA_RW, &pvHostState, &PgLockHostState);
+        if (rcStrict == VINF_SUCCESS)
+        {
+            PSVMHOSTSTATE pHostState = (PSVMHOSTSTATE)pvHostState;
+            PSVMVMCB      pVmcb      = (PSVMVMCB)pvVmcb;
+            rcStrict = HMSvmVmrun(pVCpu, pCtx, pVmcb, pHostState);
+
+            iemMemPageUnmap(pVCpu, GCPhysHostState, IEM_ACCESS_DATA_RW, pvHostState, &PgLockHostState);
+        }
+        iemMemPageUnmap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_RW, pvVmcb, &PgLockVmcb);
+    }
     RT_NOREF(cbInstr);
     return rcStrict;
@@ -5922,5 +5939,5 @@
     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL))
     {
-        Log(("vmrun: Guest intercept -> VMexit\n"));
+        Log(("vmrun: Guest intercept -> #VMEXIT\n"));
         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
@@ -5950,5 +5967,5 @@
     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
     {
-        Log(("vmload: Guest intercept -> VMexit\n"));
+        Log(("vmload: Guest intercept -> #VMEXIT\n"));
         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
@@ -6001,5 +6018,5 @@
     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
     {
-        Log(("vmsave: Guest intercept -> VMexit\n"));
+        Log(("vmsave: Guest intercept -> #VMEXIT\n"));
         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
@@ -6052,5 +6069,5 @@
     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
     {
-        Log(("clgi: Guest intercept -> VMexit\n"));
+        Log(("clgi: Guest intercept -> #VMEXIT\n"));
         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
@@ -6073,5 +6090,5 @@
     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI))
     {
-        Log2(("stgi: Guest intercept -> VMexit\n"));
+        Log2(("stgi: Guest intercept -> #VMEXIT\n"));
         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
@@ -6094,5 +6111,5 @@
     if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
     {
-        Log2(("invlpga: Guest intercept -> VMexit\n"));
+        Log2(("invlpga: Guest intercept -> #VMEXIT\n"));
         return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     }
Index: /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
===================================================================
--- /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 66014)
+++ /trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp	(revision 66015)
@@ -666,16 +666,16 @@
 
         /* Always trap #AC for reasons of security. */
-        pVmcb->ctrl.u32InterceptException |= RT_BIT_32(X86_XCPT_AC);
+        pVmcb->ctrl.u32InterceptXcpt |= RT_BIT_32(X86_XCPT_AC);
 
         /* Always trap #DB for reasons of security. */
-        pVmcb->ctrl.u32InterceptException |= RT_BIT_32(X86_XCPT_DB);
+        pVmcb->ctrl.u32InterceptXcpt |= RT_BIT_32(X86_XCPT_DB);
 
         /* Trap exceptions unconditionally (debug purposes). */
 #ifdef HMSVM_ALWAYS_TRAP_PF
-        pVmcb->ctrl.u32InterceptException |=   RT_BIT(X86_XCPT_PF);
+        pVmcb->ctrl.u32InterceptXcpt |=   RT_BIT(X86_XCPT_PF);
 #endif
 #ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
         /* If you add any exceptions here, make sure to update hmR0SvmHandleExit(). */
-        pVmcb->ctrl.u32InterceptException |= 0
+        pVmcb->ctrl.u32InterceptXcpt |= 0
                                              | RT_BIT(X86_XCPT_BP)
                                              | RT_BIT(X86_XCPT_DE)
@@ -767,5 +767,5 @@
 
             /* Page faults must be intercepted to implement shadow paging. */
-            pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF);
+            pVmcb->ctrl.u32InterceptXcpt |= RT_BIT(X86_XCPT_PF);
         }
 
@@ -776,5 +776,5 @@
         /* Apply the exceptions intercepts needed by the GIM provider. */
         if (pVCpu->hm.s.fGIMTrapXcptUD)
-            pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_UD);
+            pVmcb->ctrl.u32InterceptXcpt |= RT_BIT(X86_XCPT_UD);
 
         /* Setup Pause Filter for guest pause-loop (spinlock) exiting. */
@@ -1076,7 +1076,7 @@
 DECLINLINE(void) hmR0SvmAddXcptIntercept(PSVMVMCB pVmcb, uint32_t u32Xcpt)
 {
-    if (!(pVmcb->ctrl.u32InterceptException & RT_BIT(u32Xcpt)))
-    {
-        pVmcb->ctrl.u32InterceptException |= RT_BIT(u32Xcpt);
+    if (!(pVmcb->ctrl.u32InterceptXcpt & RT_BIT(u32Xcpt)))
+    {
+        pVmcb->ctrl.u32InterceptXcpt |= RT_BIT(u32Xcpt);
         pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     }
@@ -1096,7 +1096,7 @@
     Assert(u32Xcpt != X86_XCPT_AC);
 #ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
-    if (pVmcb->ctrl.u32InterceptException & RT_BIT(u32Xcpt))
-    {
-        pVmcb->ctrl.u32InterceptException &= ~RT_BIT(u32Xcpt);
+    if (pVmcb->ctrl.u32InterceptXcpt & RT_BIT(u32Xcpt))
+    {
+        pVmcb->ctrl.u32InterceptXcpt &= ~RT_BIT(u32Xcpt);
         pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     }
@@ -1538,5 +1538,5 @@
     }
 
-    Assert(pVmcb->ctrl.u32InterceptException & RT_BIT_32(X86_XCPT_DB));
+    Assert(pVmcb->ctrl.u32InterceptXcpt & RT_BIT_32(X86_XCPT_DB));
     if (fInterceptMovDRx)
     {
@@ -2767,5 +2767,5 @@
         Log4(("ctrl.u16InterceptRdDRx            %#x\n",      pVmcb->ctrl.u16InterceptRdDRx));
         Log4(("ctrl.u16InterceptWrDRx            %#x\n",      pVmcb->ctrl.u16InterceptWrDRx));
-        Log4(("ctrl.u32InterceptException        %#x\n",      pVmcb->ctrl.u32InterceptException));
+        Log4(("ctrl.u32InterceptXcpt             %#x\n",      pVmcb->ctrl.u32InterceptXcpt));
         Log4(("ctrl.u64InterceptCtrl             %#RX64\n",   pVmcb->ctrl.u64InterceptCtrl)); 
         Log4(("ctrl.u64IOPMPhysAddr              %#RX64\n",   pVmcb->ctrl.u64IOPMPhysAddr));
@@ -3283,5 +3283,5 @@
     hmR0SvmSaveGuestState(pVCpu, pMixedCtx);                    /* Save the guest state from the VMCB to the guest-CPU context. */
 
-    if (RT_LIKELY(pSvmTransient->u64ExitCode != (uint64_t)SVM_EXIT_INVALID))
+    if (RT_LIKELY(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID))
     {
         if (pVCpu->hm.s.svm.fSyncVTpr)
@@ -3345,6 +3345,6 @@
         hmR0SvmPostRunGuest(pVM, pVCpu, pCtx, &SvmTransient, rc);
 
-        if (RT_UNLIKELY(   rc != VINF_SUCCESS                                         /* Check for VMRUN errors. */
-                        || SvmTransient.u64ExitCode == (uint64_t)SVM_EXIT_INVALID))   /* Check for invalid guest-state errors. */
+        if (RT_UNLIKELY(   rc != VINF_SUCCESS                               /* Check for VMRUN errors. */
+                        || SvmTransient.u64ExitCode == SVM_EXIT_INVALID))   /* Check for invalid guest-state errors. */
         {
             if (rc == VINF_SUCCESS)
@@ -3423,6 +3423,6 @@
          */
         hmR0SvmPostRunGuest(pVM, pVCpu, pCtx, &SvmTransient, rc);
-        if (RT_UNLIKELY(   rc != VINF_SUCCESS                                         /* Check for VMRUN errors. */
-                        || SvmTransient.u64ExitCode == (uint64_t)SVM_EXIT_INVALID))   /* Check for invalid guest-state errors. */
+        if (RT_UNLIKELY(   rc != VINF_SUCCESS                               /* Check for VMRUN errors. */
+                        || SvmTransient.u64ExitCode == SVM_EXIT_INVALID))   /* Check for invalid guest-state errors. */
         {
             if (rc == VINF_SUCCESS)
@@ -3517,5 +3517,5 @@
 DECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
 {
-    Assert(pSvmTransient->u64ExitCode != (uint64_t)SVM_EXIT_INVALID);
+    Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
     Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
 
@@ -4070,5 +4070,5 @@
                     Log4(("IDT: Nested #AC - Bad guest\n"));
                 }
-                else if (   (pVmcb->ctrl.u32InterceptException & HMSVM_CONTRIBUTORY_XCPT_MASK)
+                else if (   (pVmcb->ctrl.u32InterceptXcpt & HMSVM_CONTRIBUTORY_XCPT_MASK)
                          && hmR0SvmIsContributoryXcpt(uExitVector)
                          && (   hmR0SvmIsContributoryXcpt(uIdtVector)
