Index: /trunk/include/iprt/mangling.h
===================================================================
--- /trunk/include/iprt/mangling.h	(revision 40965)
+++ /trunk/include/iprt/mangling.h	(revision 40966)
@@ -1220,4 +1220,5 @@
 # define RTSortIsSorted                                 RT_MANGLER(RTSortIsSorted)
 # define RTSpinlockAcquire                              RT_MANGLER(RTSpinlockAcquire)
+# define RTSpinlockAcquireNoInts                        RT_MANGLER(RTSpinlockAcquireNoInts)
 # define RTSpinlockCreate                               RT_MANGLER(RTSpinlockCreate)
 # define RTSpinlockDestroy                              RT_MANGLER(RTSpinlockDestroy)
@@ -1751,4 +1752,5 @@
 # define g_aRTUniUpperRanges                            RT_MANGLER(g_aRTUniUpperRanges)
 # define g_fRTAlignmentChecks                           RT_MANGLER(g_fRTAlignmentChecks)
+# define g_hKrnlDbgInfo                                 RT_MANGLER(g_hKrnlDbgInfo) /* solaris */
 # define g_pStdErr                                      RT_MANGLER(g_pStdErr)
 # define g_pStdIn                                       RT_MANGLER(g_pStdIn)
Index: /trunk/src/VBox/Runtime/Makefile.kmk
===================================================================
--- /trunk/src/VBox/Runtime/Makefile.kmk	(revision 40965)
+++ /trunk/src/VBox/Runtime/Makefile.kmk	(revision 40966)
@@ -1915,6 +1915,5 @@
  	r0drv/solaris/vbi/thread2-r0drv-solaris.c \
  	r0drv/solaris/vbi/time-r0drv-solaris.c \
- 	r0drv/solaris/vbi/timer-r0drv-solaris.c \
- 	r0drv/solaris/vbi/i86pc/os/vbi.c
+ 	r0drv/solaris/vbi/timer-r0drv-solaris.c
 
 
Index: /trunk/src/VBox/Runtime/r0drv/solaris/RTLogWriteDebugger-r0drv-solaris.c
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/solaris/RTLogWriteDebugger-r0drv-solaris.c	(revision 40965)
+++ /trunk/src/VBox/Runtime/r0drv/solaris/RTLogWriteDebugger-r0drv-solaris.c	(revision 40966)
@@ -45,5 +45,5 @@
     if (pch[cb] != '\0')
         AssertBreakpoint();
-    if (    !g_frtSolarisSplSetsEIF
+    if (    !g_frtSolSplSetsEIF
 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
         ||  ASMIntAreEnabled()
Index: /trunk/src/VBox/Runtime/r0drv/solaris/dbg-r0drv-solaris.c
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/solaris/dbg-r0drv-solaris.c	(revision 40965)
+++ /trunk/src/VBox/Runtime/r0drv/solaris/dbg-r0drv-solaris.c	(revision 40966)
@@ -94,4 +94,5 @@
             *ppCTF = ctf_modopen(((modctl_t *)*ppMod)->mod_mp, &err);
             mutex_exit(&mod_lock);
+            mod_release_mod(*ppMod);
 
             if (*ppCTF)
@@ -102,6 +103,4 @@
                 rc = VERR_INTERNAL_ERROR_3;
             }
-
-            mod_release_mod(*ppMod);
         }
         else
@@ -133,5 +132,4 @@
 
     ctf_close(pCTF);
-    mod_release_mod(pMod);
 }
 
Index: /trunk/src/VBox/Runtime/r0drv/solaris/initterm-r0drv-solaris.c
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/solaris/initterm-r0drv-solaris.c	(revision 40965)
+++ /trunk/src/VBox/Runtime/r0drv/solaris/initterm-r0drv-solaris.c	(revision 40966)
@@ -32,4 +32,5 @@
 #include "internal/iprt.h"
 
+#include <iprt/assert.h>
 #include <iprt/err.h>
 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
@@ -42,8 +43,9 @@
 *   Global Variables                                                           *
 *******************************************************************************/
+/** Kernel debug info handle. */
+RTDBGKRNLINFO               g_hKrnlDbgInfo;
 /** Indicates that the spl routines (and therefore a bunch of other ones too)
  * will set EFLAGS::IF and break code that disables interrupts.  */
-bool g_frtSolarisSplSetsEIF = false;
-
+bool g_frtSolSplSetsEIF                                    = false;
 /** timeout_generic address. */
 PFNSOL_timeout_generic      g_pfnrtR0Sol_timeout_generic   = NULL;
@@ -52,13 +54,34 @@
 /** cyclic_reprogram address. */
 PFNSOL_cyclic_reprogram     g_pfnrtR0Sol_cyclic_reprogram  = NULL;
-
+/** Whether to use the kernel page freelist. */
+bool                        g_frtSolUseKflt                = false;
+/** Whether we've completed R0 initialization. */
+bool                        g_frtSolInitDone               = false;
+/** Whether to use old-style xc_call interface. */
+bool                        g_frtSolOldIPI                 = false;
+/** Whether to use old-style xc_call interface using one ulong_t as the CPU set
+ *  representation. */
+bool                        g_frtSolOldIPIUlong            = false;
+/** The xc_call callout table structure. */
+RTR0FNSOLXCCALL             g_rtSolXcCall;
+/** Thread preemption offset. */
+size_t                      g_offrtSolThreadPreempt;
+/** Host scheduler preemption offset. */
+size_t                      g_offrtSolCpuPreempt;
+/** Host scheduler force preemption offset. */
+size_t                      g_offrtSolCpuForceKernelPreempt;
+/* Resolve using dl_lookup (remove if no longer relevant for supported S10 versions) */
+extern void contig_free(void *addr, size_t size);
+#pragma weak contig_free
+/** contig_free address. */
+PFNSOL_contig_free          g_pfnrtR0Sol_contig_free       = contig_free;
 
 DECLHIDDEN(int) rtR0InitNative(void)
 {
     /*
-     * Initialize vbi (keeping it separate for now)
+     * IPRT has not yet been initialized at this point, so use Solaris' native cmn_err() for logging.
      */
-    int rc = vbi_init();
-    if (!rc)
+    int rc = RTR0DbgKrnlInfoOpen(&g_hKrnlDbgInfo, 0 /* fFlags */);
+    if (RT_SUCCESS(rc))
     {
 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
@@ -70,8 +93,8 @@
         int iOld = splr(DISP_LEVEL);
         if (ASMIntAreEnabled())
-            g_frtSolarisSplSetsEIF = true;
+            g_frtSolSplSetsEIF = true;
         splx(iOld);
         if (ASMIntAreEnabled())
-            g_frtSolarisSplSetsEIF = true;
+            g_frtSolSplSetsEIF = true;
         ASMSetFlags(uOldFlags);
 #else
@@ -80,8 +103,66 @@
 
         /*
-         * Dynamically resolve new symbols we want to use.
-         */
-        g_pfnrtR0Sol_timeout_generic    = (PFNSOL_timeout_generic  )kobj_getsymvalue("timeout_generic",   1);
-        g_pfnrtR0Sol_untimeout_generic  = (PFNSOL_untimeout_generic)kobj_getsymvalue("untimeout_generic", 1);
+         * Mandatory: Preemption offsets.
+         */
+        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, "cpu_t", "cpu_runrun", &g_offrtSolCpuPreempt);
+        if (RT_FAILURE(rc))
+        {
+            cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_runrun!\n");
+            goto errorbail;
+        }
+
+        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, "cpu_t", "cpu_kprunrun", &g_offrtSolCpuForceKernelPreempt);
+        if (RT_FAILURE(rc))
+        {
+            cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_kprunrun!\n");
+            goto errorbail;
+        }
+
+        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, "kthread_t", "t_preempt", &g_offrtSolThreadPreempt);
+        if (RT_FAILURE(rc))
+        {
+            cmn_err(CE_NOTE, "Failed to find kthread_t::t_preempt!\n");
+            goto errorbail;
+        }
+        cmn_err(CE_CONT, "!cpu_t::cpu_runrun @ 0x%lx\n",    g_offrtSolCpuPreempt);
+        cmn_err(CE_CONT, "!cpu_t::cpu_kprunrun @ 0x%lx\n",  g_offrtSolCpuForceKernelPreempt);
+        cmn_err(CE_CONT, "!kthread_t::t_preempt @ 0x%lx\n", g_offrtSolThreadPreempt);
+
+        /*
+         * Mandatory: CPU cross call infrastructure. Refer the-solaris-kernel.h for details.
+         */
+        rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "xc_init_cpu", NULL /* ppvSymbol */);
+        if (RT_SUCCESS(rc))
+        {
+            if (ncpus > IPRT_SOL_NCPUS)
+            {
+                cmn_err(CE_NOTE, "rtR0InitNative: CPU count mismatch! ncpus=%d IPRT_SOL_NCPUS=%d\n", ncpus, IPRT_SOL_NCPUS);
+                rc = VERR_NOT_SUPPORTED;
+                goto errorbail;
+            }
+            g_rtSolXcCall.u.pfnSol_xc_call = (void *)xc_call;
+        }
+        else
+        {
+            g_frtSolOldIPI = true;
+            g_rtSolXcCall.u.pfnSol_xc_call_old = (void *)xc_call;
+            if (max_cpuid + 1 == sizeof(ulong_t) * 8)
+            {
+                g_frtSolOldIPIUlong = true;
+                g_rtSolXcCall.u.pfnSol_xc_call_old_ulong = (void *)xc_call;
+            }
+            else if (max_cpuid + 1 != IPRT_SOL_NCPUS)
+            {
+                cmn_err(CE_NOTE, "rtR0InitNative: cpuset_t size mismatch! max_cpuid=%d IPRT_SOL_NCPUS=%d\n", max_cpuid, IPRT_SOL_NCPUS);
+                rc = VERR_NOT_SUPPORTED;
+                goto errorbail;
+            }
+        }
+
+        /*
+         * Optional: Timeout hooks.
+         */
+        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "timeout_generic", (void **)&g_pfnrtR0Sol_timeout_generic);
+        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "untimeout_generic", (void **)&g_pfnrtR0Sol_untimeout_generic);
         if ((g_pfnrtR0Sol_timeout_generic == NULL) != (g_pfnrtR0Sol_untimeout_generic == NULL))
         {
@@ -92,12 +173,46 @@
             g_pfnrtR0Sol_untimeout_generic = NULL;
         }
-
-        g_pfnrtR0Sol_cyclic_reprogram   = (PFNSOL_cyclic_reprogram )kobj_getsymvalue("cyclic_reprogram",  1);
-
-
+        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "cyclic_reprogram", (void **)&g_pfnrtR0Sol_cyclic_reprogram);
+
+        /*
+         * Optional: Kernel page freelist (kflt)
+         *
+         * Only applicable to 64-bit Solaris kernels. Use kflt flags to get pages from kernel page freelists
+         * while allocating physical pages, once the userpages are exhausted. snv_161+, see @bugref{5632}.
+         */
+        rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "kflt_init", NULL /* ppvSymbol */);
+        if (RT_SUCCESS(rc))
+        {
+            int *pKfltDisable = NULL;
+            rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "kflt_disable", (void **)&pKfltDisable);
+            if (RT_SUCCESS(rc) && pKfltDisable && *pKfltDisable == 0)
+                g_frtSolUseKflt = true;
+        }
+
+        /*
+         * Weak binding failures: contig_free
+         */
+        if (g_pfnrtR0Sol_contig_free == NULL)
+        {
+            rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "contig_free", (void **)&g_pfnrtR0Sol_contig_free);
+            if (RT_FAILURE(rc))
+            {
+                cmn_err(CE_NOTE, "rtR0InitNative: failed to find contig_free!\n");
+                goto errorbail;
+            }
+        }
+
+        g_frtSolInitDone = true;
         return VINF_SUCCESS;
     }
-    cmn_err(CE_NOTE, "vbi_init failed. rc=%d\n", rc);
-    return VERR_GENERAL_FAILURE;
+    else
+    {
+        cmn_err(CE_NOTE, "RTR0DbgKrnlInfoOpen failed. rc=%d\n", rc);
+        return rc;
+    }
+
+errorbail:
+    RTR0DbgKrnlInfoRelease(g_hKrnlDbgInfo);
+    return rc;
 }
 
@@ -105,4 +220,6 @@
 DECLHIDDEN(void) rtR0TermNative(void)
 {
+    RTR0DbgKrnlInfoRelease(g_hKrnlDbgInfo);
+    g_frtSolInitDone = false;
 }
 
Index: /trunk/src/VBox/Runtime/r0drv/solaris/semeventwait-r0drv-solaris.h
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/solaris/semeventwait-r0drv-solaris.h	(revision 40965)
+++ /trunk/src/VBox/Runtime/r0drv/solaris/semeventwait-r0drv-solaris.h	(revision 40966)
@@ -229,5 +229,5 @@
     PRTR0SEMSOLWAIT pWait   = (PRTR0SEMSOLWAIT)pvUser;
     kthread_t      *pThread = pWait->pThread;
-    kmutex_t       *pMtx    = (kmutex_t *)ASMAtomicReadPtr(&pWait->pvMtx);
+    kmutex_t       *pMtx    = (kmutex_t *)ASMAtomicReadPtr((void * volatile *)&pWait->pvMtx);
     if (VALID_PTR(pMtx))
     {
@@ -487,3 +487,4 @@
 }
 
-#endif
+#endif /* ___r0drv_solaris_semeventwait_r0drv_solaris_h */
+
Index: /trunk/src/VBox/Runtime/r0drv/solaris/semmutex-r0drv-solaris.c
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/solaris/semmutex-r0drv-solaris.c	(revision 40965)
+++ /trunk/src/VBox/Runtime/r0drv/solaris/semmutex-r0drv-solaris.c	(revision 40966)
@@ -147,5 +147,5 @@
 
 /**
- * Worker for rtSemMutexSolarisRequest that handles the case where we go to sleep.
+ * Worker for rtSemMutexSolRequest that handles the case where we go to sleep.
  *
  * @returns VINF_SUCCESS, VERR_INTERRUPTED, or VERR_SEM_DESTROYED.
@@ -157,5 +157,5 @@
  * @remarks This needs to be called with the mutex object held!
  */
-static int rtSemMutexSolarisRequestSleep(PRTSEMMUTEXINTERNAL pThis, RTMSINTERVAL cMillies,
+static int rtSemMutexSolRequestSleep(PRTSEMMUTEXINTERNAL pThis, RTMSINTERVAL cMillies,
                                        bool fInterruptible)
 {
@@ -254,5 +254,5 @@
  * Internal worker.
  */
-DECLINLINE(int) rtSemMutexSolarisRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, bool fInterruptible)
+DECLINLINE(int) rtSemMutexSolRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, bool fInterruptible)
 {
     PRTSEMMUTEXINTERNAL pThis = hMutexSem;
@@ -296,5 +296,5 @@
      */
     else
-        rc = rtSemMutexSolarisRequestSleep(pThis, cMillies, fInterruptible);
+        rc = rtSemMutexSolRequestSleep(pThis, cMillies, fInterruptible);
 
     mutex_exit(&pThis->Mtx);
@@ -305,5 +305,5 @@
 RTDECL(int) RTSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
 {
-    return rtSemMutexSolarisRequest(hMutexSem, cMillies, false /*fInterruptible*/);
+    return rtSemMutexSolRequest(hMutexSem, cMillies, false /*fInterruptible*/);
 }
 
@@ -317,5 +317,5 @@
 RTDECL(int) RTSemMutexRequestNoResume(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
 {
-    return rtSemMutexSolarisRequest(hMutexSem, cMillies, true /*fInterruptible*/);
+    return rtSemMutexSolRequest(hMutexSem, cMillies, true /*fInterruptible*/);
 }
 
Index: /trunk/src/VBox/Runtime/r0drv/solaris/the-solaris-kernel.h
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/solaris/the-solaris-kernel.h	(revision 40965)
+++ /trunk/src/VBox/Runtime/r0drv/solaris/the-solaris-kernel.h	(revision 40966)
@@ -57,5 +57,4 @@
 #include <sys/ctf_api.h>
 #include <sys/modctl.h>
-#include "vbi.h"
 
 #undef u /* /usr/include/sys/user.h:249:1 is where this is defined to (curproc->p_user). very cool. */
@@ -63,21 +62,88 @@
 #include <iprt/cdefs.h>
 #include <iprt/types.h>
+#include <iprt/dbg.h>
 
 RT_C_DECLS_BEGIN
 
+/* IPRT functions. */
+DECLHIDDEN(void *)   rtR0SolMemAlloc(uint64_t cbPhysHi, uint64_t *puPhys, size_t cb, uint64_t cbAlign, bool fContig);
+DECLHIDDEN(void)     rtR0SolMemFree(void *pv, size_t cb);
+
+
+/* Solaris functions. */
 typedef callout_id_t (*PFNSOL_timeout_generic)(int type, void (*func)(void *),
                                                void *arg, hrtime_t expiration,
                                                hrtime_t resultion, int flags);
-typedef hrtime_t    (*PFNSOL_untimeout_generic)(callout_id_t id, int nowait);
-typedef int         (*PFNSOL_cyclic_reprogram)(cyclic_id_t id, hrtime_t expiration);
-
+typedef hrtime_t     (*PFNSOL_untimeout_generic)(callout_id_t id, int nowait);
+typedef int          (*PFNSOL_cyclic_reprogram)(cyclic_id_t id, hrtime_t expiration);
+typedef void         (*PFNSOL_contig_free)(void *addr, size_t size);
 
 /* IPRT globals. */
-extern bool                     g_frtSolarisSplSetsEIF;
+extern bool                     g_frtSolSplSetsEIF;
 extern struct ddi_dma_attr      g_SolarisX86PhysMemLimits;
-extern RTCPUSET                 g_rtMpSolarisCpuSet;
+extern RTCPUSET                 g_rtMpSolCpuSet;
 extern PFNSOL_timeout_generic   g_pfnrtR0Sol_timeout_generic;
 extern PFNSOL_untimeout_generic g_pfnrtR0Sol_untimeout_generic;
 extern PFNSOL_cyclic_reprogram  g_pfnrtR0Sol_cyclic_reprogram;
+extern PFNSOL_contig_free       g_pfnrtR0Sol_contig_free;
+extern bool                     g_frtSolUseKflt;
+extern size_t                   g_offrtSolThreadPreempt;
+extern size_t                   g_offrtSolCpuPreempt;
+extern size_t                   g_offrtSolCpuForceKernelPreempt;
+extern bool                     g_frtSolInitDone;
+extern RTDBGKRNLINFO            g_hKrnlDbgInfo;
+
+/*
+ * Workarounds for running on old versions of solaris with different cross call
+ * interfaces. If we find xc_init_cpu() in the kernel, then just use the
+ * defined interfaces for xc_call() from the include file where the xc_call()
+ * interfaces just takes a pointer to a ulong_t array. The array must be long
+ * enough to hold "ncpus" bits at runtime.
+
+ * The reason for the hacks is that using the type "cpuset_t" is pretty much
+ * impossible from code built outside the Solaris source repository that wants
+ * to run on multiple releases of Solaris.
+ *
+ * For old style xc_call()s, 32 bit solaris and older 64 bit versions use
+ * "ulong_t" as cpuset_t.
+ *
+ * Later versions of 64 bit Solaris used: struct {ulong_t words[x];}
+ * where "x" depends on NCPU.
+ *
+ * We detect the difference in 64 bit support by checking the kernel value of
+ * max_cpuid, which always holds the compiled value of NCPU - 1.
+ *
+ * If Solaris increases NCPU to more than 256, VBox will continue to work on
+ * all versions of Solaris as long as the number of installed CPUs in the
+ * machine is <= IPRT_SOLARIS_NCPUS. If IPRT_SOLARIS_NCPUS is increased, this
+ * code has to be re-written some to provide compatibility with older Solaris
+ * which expects cpuset_t to be based on NCPU==256 -- or we discontinue
+ * support of old Nevada/S10.
+ */
+#define IPRT_SOL_NCPUS          256
+#define IPRT_SOL_SET_WORDS      (IPRT_SOL_NCPUS / (sizeof(ulong_t) * 8))
+#define IPRT_SOL_X_CALL_HIPRI   (2) /* for Old Solaris interface */
+typedef struct RTSOLCPUSET
+{
+    ulong_t                     auCpus[IPRT_SOL_SET_WORDS];
+} RTSOLCPUSET;
+typedef RTSOLCPUSET *PRTSOLCPUSET;
+
+/* Avoid warnings even if it means more typing... */
+typedef struct RTR0FNSOLXCCALL
+{
+    union
+    {
+        void *(*pfnSol_xc_call)          (xc_arg_t, xc_arg_t, xc_arg_t, ulong_t *, xc_func_t);
+        void *(*pfnSol_xc_call_old)      (xc_arg_t, xc_arg_t, xc_arg_t, int, RTSOLCPUSET, xc_func_t);
+        void *(*pfnSol_xc_call_old_ulong)(xc_arg_t, xc_arg_t, xc_arg_t, int, ulong_t, xc_func_t);
+    } u;
+} RTR0FNSOLXCCALL;
+typedef RTR0FNSOLXCCALL *PRTR0FNSOLXCCALL;
+
+extern RTR0FNSOLXCCALL          g_rtSolXcCall;
+extern bool                     g_frtSolOldIPI;
+extern bool                     g_frtSolOldIPIUlong;
+
 
 /* Solaris globals. */
Index: /trunk/src/VBox/Runtime/r0drv/solaris/vbi/RTMpPokeCpu-r0drv-solaris.c
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/solaris/vbi/RTMpPokeCpu-r0drv-solaris.c	(revision 40965)
+++ /trunk/src/VBox/Runtime/r0drv/solaris/vbi/RTMpPokeCpu-r0drv-solaris.c	(revision 40966)
@@ -44,5 +44,6 @@
 {
     RT_ASSERT_INTS_ON();
-    vbi_poke_cpu(idCpu);
+    if (idCpu < ncpus)
+        poke_cpu(idCpu);
     return VINF_SUCCESS;
 }
Index: /trunk/src/VBox/Runtime/r0drv/solaris/vbi/alloc-r0drv-solaris.c
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/solaris/vbi/alloc-r0drv-solaris.c	(revision 40965)
+++ /trunk/src/VBox/Runtime/r0drv/solaris/vbi/alloc-r0drv-solaris.c	(revision 40966)
@@ -41,4 +41,25 @@
 
 
+/*******************************************************************************
+*   Structures and Typedefs                                                    *
+*******************************************************************************/
+static ddi_dma_attr_t s_rtR0SolDmaAttr =
+{
+    DMA_ATTR_V0,                /* Version Number */
+    (uint64_t)0,                /* Lower limit */
+    (uint64_t)0,                /* High limit */
+    (uint64_t)0xffffffff,       /* Counter limit */
+    (uint64_t)PAGESIZE,         /* Alignment */
+    (uint64_t)PAGESIZE,         /* Burst size */
+    (uint64_t)PAGESIZE,         /* Effective DMA size */
+    (uint64_t)0xffffffff,       /* Max DMA xfer size */
+    (uint64_t)0xffffffff,       /* Segment boundary */
+    1,                          /* Scatter-gather list length (1 for contiguous) */
+    1,                          /* Device granularity */
+    0                           /* Bus-specific flags */
+};
+
+extern void *contig_alloc(size_t cb, ddi_dma_attr_t *pDmaAttr, size_t uAlign, int fCanSleep);
+
 
 /**
@@ -55,5 +76,5 @@
         AssertReturn(!(fFlags & RTMEMHDR_FLAG_ANY_CTX), NULL);
         cbAllocated = RT_ALIGN_Z(cb + sizeof(*pHdr), PAGE_SIZE) - sizeof(*pHdr);
-        pHdr = (PRTMEMHDR)vbi_text_alloc(cbAllocated + sizeof(*pHdr));
+        pHdr = (PRTMEMHDR)segkmem_alloc(heaptext_arena, cbAllocated + sizeof(*pHdr), KM_SLEEP);
     }
     else
@@ -90,5 +111,5 @@
 #ifdef RT_ARCH_AMD64
     if (pHdr->fFlags & RTMEMHDR_FLAG_EXEC)
-        vbi_text_free(pHdr, pHdr->cb + sizeof(*pHdr));
+        segkmem_free(heaptext_arena, pHdr, pHdr->cb + sizeof(*pHdr));
     else
 #endif
@@ -97,4 +118,64 @@
 
 
+/**
+ * Allocates physical memory which satisfy the given constraints.
+ *
+ * @param   uPhysHi        The upper physical address limit (inclusive).
+ * @param   puPhys         Where to store the physical address of the allocated
+ *                         memory. Optional, can be NULL.
+ * @param   cb             Size of allocation.
+ * @param   uAlignment     Alignment.
+ * @param   fContig        Whether the memory must be physically contiguous or
+ *                         not.
+ *
+ * @returns Virtual address of allocated memory block or NULL if allocation
+ *        failed.
+ */
+DECLHIDDEN(void *) rtR0SolMemAlloc(uint64_t uPhysHi, uint64_t *puPhys, size_t cb, uint64_t uAlignment, bool fContig)
+{
+    if ((cb & PAGEOFFSET) != 0)
+        return NULL;
+
+    size_t cPages = (cb + PAGESIZE - 1) >> PAGESHIFT;
+    if (!cPages)
+        return NULL;
+
+    ddi_dma_attr_t DmaAttr = s_rtR0SolDmaAttr;
+    DmaAttr.dma_attr_addr_hi    = uPhysHi;
+    DmaAttr.dma_attr_align      = uAlignment;
+    if (!fContig)
+        DmaAttr.dma_attr_sgllen = cPages > INT_MAX ? INT_MAX - 1 : cPages;
+    else
+        AssertRelease(DmaAttr.dma_attr_sgllen == 1);
+
+    void *pvMem = contig_alloc(cb, &DmaAttr, PAGESIZE, 1 /* can sleep */);
+    if (!pvMem)
+    {
+        LogRel(("rtR0SolMemAlloc failed. cb=%u Align=%u fContig=%d\n", (unsigned)cb, (unsigned)uAlignment, fContig));
+        return NULL;
+    }
+
+    pfn_t PageFrameNum = hat_getpfnum(kas.a_hat, (caddr_t)pvMem);
+    AssertRelease(PageFrameNum != PFN_INVALID);
+    if (puPhys)
+        *puPhys = (uint64_t)PageFrameNum << PAGESHIFT;
+
+    return pvMem;
+}
+
+
+/**
+ * Frees memory allocated using rtR0SolMemAlloc().
+ *
+ * @param   pv         The memory to free.
+ * @param   cb         Size of the memory block
+ */
+DECLHIDDEN(void) rtR0SolMemFree(void *pv, size_t cb)
+{
+    if (RT_LIKELY(pv))
+        g_pfnrtR0Sol_contig_free(pv, cb);
+}
+
+
 RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb)
 {
@@ -104,15 +185,15 @@
 
     /* Allocate physically contiguous (< 4GB) page-aligned memory. */
-    uint64_t physAddr = _4G -1;
-    caddr_t virtAddr  = vbi_contig_alloc(&physAddr, cb);
-    if (virtAddr == NULL)
-    {
-        LogRel(("vbi_contig_alloc failed to allocate %u bytes\n", cb));
-        return NULL;
-    }
-
-    Assert(physAddr < _4G);
-    *pPhys = physAddr;
-    return virtAddr;
+    uint64_t uPhys;
+    void *pvMem = rtR0SolMemAlloc((uint64_t)_4G - 1, &uPhys, cb, PAGESIZE, true);
+    if (RT_UNLIKELY(!pvMem))
+    {
+        LogRel(("RTMemContAlloc failed to allocate %u bytes\n", cb));
+        return NULL;
+    }
+
+    Assert(uPhys < _4G);
+    *pPhys = uPhys;
+    return pvMem;
 }
 
@@ -121,6 +202,5 @@
 {
     RT_ASSERT_PREEMPTIBLE();
-    if (pv)
-        vbi_contig_free(pv, cb);
-}
-
+    rtR0SolMemFree(pv, cb);
+}
+
Index: /trunk/src/VBox/Runtime/r0drv/solaris/vbi/memobj-r0drv-solaris.c
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/solaris/vbi/memobj-r0drv-solaris.c	(revision 40965)
+++ /trunk/src/VBox/Runtime/r0drv/solaris/vbi/memobj-r0drv-solaris.c	(revision 40966)
@@ -41,4 +41,8 @@
 #include <iprt/process.h>
 #include "internal/memobj.h"
+#include "memobj-r0drv-solaris.h"
+
+#define SOL_IS_KRNL_ADDR(vx)    ((uintptr_t)(vx) >= kernelbase)
+static vnode_t                  s_PageVnode;
 
 /*******************************************************************************
@@ -48,5 +52,5 @@
  * The Solaris version of the memory object structure.
  */
-typedef struct RTR0MEMOBJSOLARIS
+typedef struct RTR0MEMOBJSOL
 {
     /** The core structure. */
@@ -61,29 +65,530 @@
      *  allocation. */
     bool                fLargePage;
-} RTR0MEMOBJSOLARIS, *PRTR0MEMOBJSOLARIS;
-
+} RTR0MEMOBJSOL, *PRTR0MEMOBJSOL;
+
+
+/**
+ * Returns the physical address for a virtual address.
+ *
+ * @param pv        The virtual address.
+ *
+ * @returns The physical address corresponding to @a pv.
+ */
+static uint64_t rtR0MemObjSolVirtToPhys(void *pv)
+{
+    struct hat *pHat         = NULL;
+    pfn_t       PageFrameNum = 0;
+    uintptr_t   uVirtAddr    = (uintptr_t)pv;
+
+    if (SOL_IS_KRNL_ADDR(pv))
+        pHat = kas.a_hat;
+    else
+    {
+        proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
+        AssertRelease(pProcess);
+        pHat = pProcess->p_as->a_hat;
+    }
+
+    PageFrameNum = hat_getpfnum(pHat, (caddr_t)(uVirtAddr & PAGEMASK));
+    AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolVirtToPhys failed. pv=%p\n", pv));
+    return (((uint64_t)PageFrameNum << PAGESHIFT) | (uVirtAddr & PAGEOFFSET));
+}
+
+
+/**
+ * Returns the physical address of a page from an array of pages.
+ *
+ * @param ppPages       The array of pages.
+ * @param iPage         Index of the page in the array to get the physical
+ *                      address.
+ *
+ * @returns Physical address of specific page within the list of pages specified
+ *         in @a ppPages.
+ */
+static inline uint64_t rtR0MemObjSolPageToPhys(page_t **ppPages, size_t iPage)
+{
+    pfn_t PageFrameNum = page_pptonum(ppPages[iPage]);
+    AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolPageToPhys failed. ppPages=%p iPage=%u\n", ppPages, iPage));
+    return (uint64_t)PageFrameNum << PAGESHIFT;
+}
+
+
+/**
+ * Retreives a free page from the kernel freelist.
+ *
+ * @param virtAddr       The virtual address to which this page maybe mapped in
+ *                       the future.
+ * @param cbPage         The size of the page.
+ *
+ * @returns Pointer to the allocated page, NULL on failure.
+ */
+static page_t *rtR0MemObjSolPageFromFreelist(caddr_t virtAddr, size_t cbPage)
+{
+    seg_t KernelSeg;
+    KernelSeg.s_as = &kas;
+    page_t *pPage = page_get_freelist(&s_PageVnode, 0 /* offset */, &KernelSeg, virtAddr,
+                                      cbPage, 0 /* flags */, NULL /* NUMA group */);
+    if (   !pPage
+        && g_frtSolUseKflt)
+    {
+        pPage = page_get_freelist(&s_PageVnode, 0 /* offset */, &KernelSeg, virtAddr,
+                                  cbPage, 0x200 /* PG_KFLT */, NULL /* NUMA group */);
+    }
+    return pPage;
+}
+
+
+/**
+ * Retrieves a free page from the kernel cachelist.
+ *
+ * @param virtAddr      The virtual address to which this page maybe mapped in
+ *                      the future.
+ * @param cbPage        The size of the page.
+ *
+ * @return Pointer to the allocated page, NULL on failure.
+ */
+static page_t *rtR0MemObjSolPageFromCachelist(caddr_t virtAddr, size_t cbPage)
+{
+    seg_t KernelSeg;
+    KernelSeg.s_as = &kas;
+    page_t *pPage = page_get_cachelist(&s_PageVnode, 0 /* offset */, &KernelSeg, virtAddr,
+                                       0 /* flags */, NULL /* NUMA group */);
+    if (   !pPage
+        && g_frtSolUseKflt)
+    {
+        pPage = page_get_cachelist(&s_PageVnode, 0 /* offset */, &KernelSeg, virtAddr,
+                                   0x200 /* PG_KFLT */, NULL /* NUMA group */);
+    }
+
+    /*
+     * Remove association with the vnode for pages from the cachelist.
+     */
+    if (!PP_ISAGED(pPage))
+        page_hashout(pPage, NULL /* mutex */);
+
+    return pPage;
+}
+
+
+/**
+ * Allocates physical non-contiguous memory.
+ *
+ * @param uPhysHi   The upper physical address limit (inclusive).
+ * @param puPhys    Where to store the physical address of first page. Optional,
+ *                  can be NULL.
+ * @param cb        The size of the allocation.
+ *
+ * @return Array of allocated pages, NULL on failure.
+ */
+static page_t **rtR0MemObjSolPagesAlloc(uint64_t uPhysHi, uint64_t *puPhys, size_t cb)
+{
+    /** @todo We need to satisfy the upper physical address constraint */
+
+    /*
+     * The page freelist and cachelist both hold pages that are not mapped into any address space.
+     * The cachelist is not really free pages but when memory is exhausted they'll be moved to the
+     * free lists, it's the total of the free+cache list that we see on the 'free' column in vmstat.
+     *
+     * Reserve available memory for pages and create the pages.
+     */
+    pgcnt_t cPages = (cb + PAGESIZE - 1) >> PAGESHIFT;
+    int rc = page_resv(cPages, KM_NOSLEEP);
+    if (rc)
+    {
+        rc = page_create_wait(cPages, 0 /* flags */);
+        if (rc)
+        {
+            size_t   cbPages = cPages * sizeof(page_t *);
+            page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
+            if (RT_LIKELY(ppPages))
+            {
+                /*
+                 * Get pages from kseg, the 'virtAddr' here is only for colouring but unfortunately
+                 * we don't yet have the 'virtAddr' to which this memory may be mapped.
+                 */
+                caddr_t virtAddr = NULL;
+                for (size_t i = 0; i < cPages; i++, virtAddr += PAGESIZE)
+                {
+                    /*
+                     * Get a page from the freelist or cachelist.
+                     */
+                    page_t *pPage = rtR0MemObjSolPageFromFreelist(virtAddr, PAGESIZE);
+                    if (!pPage)
+                        pPage = rtR0MemObjSolPageFromCachelist(virtAddr, PAGESIZE);
+                    if (RT_UNLIKELY(!pPage))
+                    {
+                        /*
+                         * No more pages found, release was grabbed so far.
+                         */
+                        page_create_putback(cPages - i);
+                        while (--i >= 0)
+                            page_free(ppPages[i], 0 /* don't need page, move to tail of pagelist */);
+                        kmem_free(ppPages, cbPages);
+                        page_unresv(cPages);
+                        return NULL;
+                    }
+
+                    PP_CLRFREE(pPage);      /* Page is no longer free */
+                    PP_CLRAGED(pPage);      /* Page is not hashed in */
+                    ppPages[i] = pPage;
+                }
+
+                /*
+                 * We now have the pages locked exclusively, before they are mapped in
+                 * we must downgrade the lock.
+                 */
+                if (puPhys)
+                    *puPhys = (uint64_t)page_pptonum(ppPages[0]) << PAGESHIFT;
+                return ppPages;
+            }
+
+            page_create_putback(cPages);
+        }
+
+        page_unresv(cPages);
+    }
+
+    return NULL;
+}
+
+
+/**
+ * Prepares pages allocated by rtR0MemObjSolPagesAlloc for mapping.
+ *
+ * @param    ppPages    Pointer to the page list.
+ * @param    cb         Size of the allocation.
+ * @param    auPhys     Where to store the physical address of the premapped
+ *                      pages.
+ * @param    cPages     The number of pages (entries) in @a auPhys.
+ *
+ * @returns IPRT status code.
+ */
+static int rtR0MemObjSolPagesPreMap(page_t **ppPages, size_t cb, uint64_t auPhys[], size_t cPages)
+{
+    AssertPtrReturn(ppPages, VERR_INVALID_PARAMETER);
+    AssertPtrReturn(auPhys, VERR_INVALID_PARAMETER);
+
+    for (size_t iPage = 0; iPage < cPages; iPage++)
+    {
+        /*
+         * Prepare pages for mapping into kernel/user-space. Downgrade the
+         * exclusive page lock to a shared lock if necessary.
+         */
+        if (page_tryupgrade(ppPages[iPage]) == 1)
+            page_downgrade(ppPages[iPage]);
+
+        auPhys[iPage] = rtR0MemObjSolPageToPhys(ppPages, iPage);
+    }
+
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Frees pages allocated by rtR0MemObjSolPagesAlloc.
+ *
+ * @param ppPages       Pointer to the page list.
+ * @param cbPages       Size of the allocation.
+ */
+static void rtR0MemObjSolPagesFree(page_t **ppPages, size_t cb)
+{
+    size_t cPages  = (cb + PAGESIZE - 1) >> PAGESHIFT;
+    size_t cbPages = cPages * sizeof(page_t *);
+    for (size_t iPage = 0; iPage < cPages; iPage++)
+    {
+        /*
+         *  We need to exclusive lock the pages before freeing them.
+         */
+        int rc = page_tryupgrade(ppPages[iPage]);
+        if (!rc)
+        {
+            page_unlock(ppPages[iPage]);
+            while (!page_lock(ppPages[iPage], SE_EXCL, NULL /* mutex */, P_RECLAIM))
+            {
+                /* nothing */;
+            }
+        }
+        page_free(ppPages[iPage], 0 /* don't need page, move to tail of pagelist */);
+    }
+    kmem_free(ppPages, cbPages);
+    page_unresv(cPages);
+}
+
+
+/**
+ * Allocates a large page to cover the required allocation size.
+ *
+ * @param puPhys        Where to store the physical address of the allocated
+ *                      page. Optional, can be NULL.
+ * @param cb            Size of the allocation.
+ *
+ * @returns Pointer to the allocated large page, NULL on failure.
+ */
+static page_t *rtR0MemObjSolLargePageAlloc(uint64_t *puPhys, size_t cb)
+{
+    /*
+     * Reserve available memory and create the sub-pages.
+     */
+    const pgcnt_t cPages = cb >> PAGESHIFT;
+    int rc = page_resv(cPages, KM_NOSLEEP);
+    if (rc)
+    {
+        rc = page_create_wait(cPages, 0 /* flags */);
+        if (rc)
+        {
+            /*
+             * Get a page off the free list. We set virtAddr to 0 since we don't know where
+             * the memory is going to be mapped.
+             */
+            seg_t KernelSeg;
+            caddr_t virtAddr  = NULL;
+            KernelSeg.s_as    = &kas;
+            page_t *pRootPage = rtR0MemObjSolPageFromFreelist(virtAddr, cb);
+            if (pRootPage)
+            {
+                AssertMsg(!(page_pptonum(pRootPage) & (cPages - 1)), ("%p:%lx cPages=%lx\n", pRootPage, page_pptonum(pRootPage), cPages));
+
+                /*
+                 * Mark all the sub-pages as non-free and not-hashed-in.
+                 * It is paramount that we destroy the list (before freeing it).
+                 */
+                page_t *pPageList = pRootPage;
+                for (size_t iPage = 0; iPage < cPages; iPage++)
+                {
+                    page_t *pPage = pPageList;
+                    AssertPtr(pPage);
+                    AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage),
+                        ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage)));
+                    page_sub(&pPageList, pPage);
+
+                    /*
+                     * Ensure page is now be free and the page size-code must match that of the root page.
+                     */
+                    AssertMsg(PP_ISFREE(pPage), ("%p\n", pPage));
+                    AssertMsg(pPage->p_szc == pRootPage->p_szc, ("%p - %d expected %d \n", pPage, pPage->p_szc, pRootPage->p_szc));
+
+                    PP_CLRFREE(pPage);      /* Page no longer free */
+                    PP_CLRAGED(pPage);      /* Page no longer hashed-in */
+                }
+
+                uint64_t uPhys = (uint64_t)page_pptonum(pRootPage) << PAGESHIFT;
+                AssertMsg(!(uPhys & (cb - 1)), ("%llx %zx\n", uPhys, cb));
+                if (puPhys)
+                    *puPhys = uPhys;
+
+                return pRootPage;
+            }
+
+            page_create_putback(cPages);
+        }
+
+        page_unresv(cPages);
+    }
+
+    return NULL;
+}
+
+/**
+ * Prepares the large page allocated by rtR0MemObjSolLargePageAlloc to be mapped.
+ *
+ * @param    pRootPage      Pointer to the root page.
+ * @param    cb             Size of the allocation.
+ *
+ * @returns IPRT status code.
+ */
+static int rtR0MemObjSolLargePagePreMap(page_t *pRootPage, size_t cb)
+{
+    const pgcnt_t cPages = cb >> PAGESHIFT;
+
+    Assert(page_get_pagecnt(pRootPage->p_szc) == cPages);
+    AssertMsg(!(page_pptonum(pRootPage) & (cPages - 1)), ("%p:%lx npages=%lx\n", pRootPage, page_pptonum(pRootPage), cPages));
+
+    /*
+     * We need to downgrade the sub-pages from exclusive to shared locking
+     * because otherweise we cannot <you go figure>.
+     */
+    for (pgcnt_t iPage = 0; iPage < cPages; iPage++)
+    {
+        page_t *pPage = page_nextn(pRootPage, iPage);
+        AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage),
+            ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage)));
+        AssertMsg(!PP_ISFREE(pPage), ("%p\n", pPage));
+
+        if (page_tryupgrade(pPage) == 1)
+            page_downgrade(pPage);
+        AssertMsg(!PP_ISFREE(pPage), ("%p\n", pPage));
+    }
+
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Frees the page allocated by rtR0MemObjSolLargePageAlloc.
+ *
+ * @param    pRootPage      Pointer to the root page.
+ * @param    cb             Allocated size.
+ */
+static void rtR0MemObjSolLargePageFree(page_t *pRootPage, size_t cb)
+{
+    pgcnt_t cPages = cb >> PAGESHIFT;
+
+    Assert(page_get_pagecnt(pRootPage->p_szc) == cPages);
+    AssertMsg(!(page_pptonum(pRootPage) & (cPages - 1)), ("%p:%lx cPages=%lx\n", pRootPage, page_pptonum(pRootPage), cPages));
+
+    /*
+     * We need to exclusively lock the sub-pages before freeing the large one.
+     */
+    for (pgcnt_t iPage = 0; iPage < cPages; iPage++)
+    {
+        page_t *pPage = page_nextn(pRootPage, iPage);
+        AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage),
+                  ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage)));
+        AssertMsg(!PP_ISFREE(pPage), ("%p\n", pPage));
+
+        int rc = page_tryupgrade(pPage);
+        if (!rc)
+        {
+            page_unlock(pPage);
+            while (!page_lock(pPage, SE_EXCL, NULL /* mutex */, P_RECLAIM))
+            {
+                /* nothing */;
+            }
+        }
+    }
+
+    /*
+     * Free the large page and unreserve the memory.
+     */
+    page_free_pages(pRootPage);
+    page_unresv(cPages);
+
+}
+
+
+/**
+ * Unmaps kernel/user-space mapped memory.
+ *
+ * @param    pv         Pointer to the mapped memory block.
+ * @param    cb         Size of the memory block.
+ */
+static void rtR0MemObjSolUnmap(void *pv, size_t cb)
+{
+    if (SOL_IS_KRNL_ADDR(pv))
+    {
+        hat_unload(kas.a_hat, pv, cb, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
+        vmem_free(heap_arena, pv, cb);
+    }
+    else
+    {
+        struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
+        AssertPtr(pAddrSpace);
+        as_rangelock(pAddrSpace);
+        as_unmap(pAddrSpace, pv, cb);
+        as_rangeunlock(pAddrSpace);
+    }
+}
+
+/**
+ * Lock down memory mappings for a virtual address.
+ *
+ * @param    pv             Pointer to the memory to lock down.
+ * @param    cb             Size of the memory block.
+ * @param    fAccess        Page access rights (S_READ, S_WRITE, S_EXEC)
+ *
+ * @returns IPRT status code.
+ */
+static int rtR0MemObjSolLock(void *pv, size_t cb, int fPageAccess)
+{
+    /*
+     * Kernel memory mappings on x86/amd64 are always locked, only handle user-space memory.
+     */
+    if (!SOL_IS_KRNL_ADDR(pv))
+    {
+        proc_t *pProc = (proc_t *)RTR0ProcHandleSelf();
+        AssertPtr(pProc);
+        faultcode_t rc = as_fault(pProc->p_as->a_hat, pProc->p_as, (caddr_t)pv, cb, F_SOFTLOCK, fPageAccess);
+        if (rc)
+        {
+            LogRel(("rtR0MemObjSolLock failed for pv=%pv cb=%lx fPageAccess=%d rc=%d\n", pv, cb, fPageAccess, rc));
+            return VERR_LOCK_FAILED;
+        }
+    }
+    return VINF_SUCCESS;
+}
+
+
+/**
+ * Unlock memory mappings for a virtual address.
+ *
+ * @param    pv             Pointer to the locked memory.
+ * @param    cb             Size of the memory block.
+ * @param    fPageAccess    Page access rights (S_READ, S_WRITE, S_EXEC).
+ */
+static void rtR0MemObjSolUnlock(void *pv, size_t cb, int fPageAccess)
+{
+    if (!SOL_IS_KRNL_ADDR(pv))
+    {
+        proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
+        AssertPtr(pProcess);
+        as_fault(pProcess->p_as->a_hat, pProcess->p_as, (caddr_t)pv, cb, F_SOFTUNLOCK, fPageAccess);
+    }
+}
+
+
+/**
+ * Maps a list of physical pages into user address space.
+ *
+ * @param    pVirtAddr      Where to store the virtual address of the mapping.
+ * @param    fPageAccess    Page access rights (PROT_READ, PROT_WRITE,
+ *                          PROT_EXEC)
+ * @param    paPhysAddrs    Array of physical addresses to pages.
+ * @param    cb             Size of memory being mapped.
+ *
+ * @returns IPRT status code.
+ */
+static int rtR0MemObjSolUserMap(caddr_t *pVirtAddr, unsigned fPageAccess, uint64_t *paPhysAddrs, size_t cb)
+{
+    struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
+    int rc = VERR_INTERNAL_ERROR;
+    SEGVBOX_CRARGS Args;
+
+    Args.paPhysAddrs = paPhysAddrs;
+    Args.fPageAccess = fPageAccess;
+
+    as_rangelock(pAddrSpace);
+    map_addr(pVirtAddr, cb, 0 /* offset */, 0 /* vacalign */, MAP_SHARED);
+    if (*pVirtAddr != NULL)
+        rc = as_map(pAddrSpace, *pVirtAddr, cb, rtR0SegVBoxSolCreate, &Args);
+    else
+        rc = ENOMEM;
+    as_rangeunlock(pAddrSpace);
+
+    return RTErrConvertFromErrno(rc);
+}
 
 
 DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
 {
-    PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)pMem;
+    PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
 
     switch (pMemSolaris->Core.enmType)
     {
         case RTR0MEMOBJTYPE_LOW:
-            vbi_lowmem_free(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
+            rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
             break;
 
         case RTR0MEMOBJTYPE_PHYS:
-            if (!pMemSolaris->Core.u.Phys.fAllocated)
-            {   /* nothing to do here */;   }
-            else if (pMemSolaris->fLargePage)
-                vbi_large_page_free(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
-            else
-                vbi_phys_free(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
+            if (pMemSolaris->Core.u.Phys.fAllocated)
+            {
+                if (pMemSolaris->fLargePage)
+                    rtR0MemObjSolLargePageFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
+                else
+                    rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
+            }
             break;
 
         case RTR0MEMOBJTYPE_PHYS_NC:
-            vbi_pages_free(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
+            rtR0MemObjSolPagesFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
             break;
 
@@ -93,9 +598,9 @@
 
         case RTR0MEMOBJTYPE_LOCK:
-            vbi_unlock_va(pMemSolaris->Core.pv, pMemSolaris->Core.cb, pMemSolaris->fAccess, pMemSolaris->pvHandle);
+            rtR0MemObjSolUnlock(pMemSolaris->Core.pv, pMemSolaris->Core.cb, pMemSolaris->fAccess);
             break;
 
         case RTR0MEMOBJTYPE_MAPPING:
-            vbi_unmap(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
+            rtR0MemObjSolUnmap(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
             break;
 
@@ -122,10 +627,10 @@
 {
     /* Create the object. */
-    PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PAGE, NULL, cb);
-    if (!pMemSolaris)
+    PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PAGE, NULL, cb);
+    if (RT_UNLIKELY(!pMemSolaris))
         return VERR_NO_MEMORY;
 
-    void *virtAddr = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie);
-    if (!virtAddr)
+    void *pvMem = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie);
+    if (RT_UNLIKELY(!pvMem))
     {
         rtR0MemObjDelete(&pMemSolaris->Core);
@@ -133,5 +638,5 @@
     }
 
-    pMemSolaris->Core.pv  = virtAddr;
+    pMemSolaris->Core.pv  = pvMem;
     pMemSolaris->pvHandle = NULL;
     *ppMem = &pMemSolaris->Core;
@@ -145,17 +650,17 @@
 
     /* Create the object */
-    PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOW, NULL, cb);
+    PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOW, NULL, cb);
     if (!pMemSolaris)
         return VERR_NO_MEMORY;
 
     /* Allocate physically low page-aligned memory. */
-    uint64_t physAddr = _4G - 1;
-    caddr_t virtAddr  = vbi_lowmem_alloc(physAddr, cb);
-    if (virtAddr == NULL)
+    uint64_t uPhysHi = _4G - 1;
+    void *pvMem = rtR0SolMemAlloc(uPhysHi, NULL /* puPhys */, cb, PAGESIZE, false /* fContig */);
+    if (RT_UNLIKELY(!pvMem))
     {
         rtR0MemObjDelete(&pMemSolaris->Core);
         return VERR_NO_LOW_MEMORY;
     }
-    pMemSolaris->Core.pv = virtAddr;
+    pMemSolaris->Core.pv = pvMem;
     pMemSolaris->pvHandle = NULL;
     *ppMem = &pMemSolaris->Core;
@@ -174,13 +679,13 @@
 {
 #if HC_ARCH_BITS == 64
-    PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
-    if (!pMemSolaris)
+    PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
+    if (RT_UNLIKELY(!pMemSolaris))
         return VERR_NO_MEMORY;
 
-    uint64_t PhysAddr = PhysHighest;
-    void *pvPages = vbi_pages_alloc(&PhysAddr, cb);
+    uint64_t PhysAddr = UINT64_MAX;
+    void *pvPages = rtR0MemObjSolPagesAlloc((uint64_t)PhysHighest, &PhysAddr, cb);
     if (!pvPages)
     {
-        LogRel(("rtR0MemObjNativeAllocPhysNC: vbi_pages_alloc failed.\n"));
+        LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0MemObjSolPagesAlloc failed for cb=%u.\n", cb));
         rtR0MemObjDelete(&pMemSolaris->Core);
         return VERR_NO_MEMORY;
@@ -189,4 +694,5 @@
     pMemSolaris->pvHandle  = pvPages;
 
+    Assert(PhysAddr != UINT64_MAX);
     Assert(!(PhysAddr & PAGE_OFFSET_MASK));
     *ppMem = &pMemSolaris->Core;
@@ -203,6 +709,6 @@
     AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);
 
-    PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
-    if (!pMemSolaris)
+    PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
+    if (RT_UNLIKELY(!pMemSolaris))
         return VERR_NO_MEMORY;
 
@@ -228,6 +734,7 @@
          * Allocate one large page.
          */
-        void *pvPages = vbi_large_page_alloc(&PhysAddr, cb);
-        if (pvPages)
+        cmn_err(CE_NOTE,  "calling rtR0MemObjSolLargePageAlloc\n");
+        void *pvPages = rtR0MemObjSolLargePageAlloc(&PhysAddr, cb);
+        if (RT_LIKELY(pvPages))
         {
             AssertMsg(!(PhysAddr & (cb - 1)), ("%RHp\n", PhysAddr));
@@ -247,7 +754,8 @@
          * Allocate physically contiguous memory aligned as specified.
          */
+        cmn_err(CE_NOTE,  "rtR0MemObjNativeAllocPhys->rtR0SolMemAlloc\n");
         AssertCompile(NIL_RTHCPHYS == UINT64_MAX);
         PhysAddr = PhysHighest;
-        caddr_t pvMem = vbi_phys_alloc(&PhysAddr, cb, uAlignment, 1 /* contiguous */);
+        void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, uAlignment, true /* fContig */);
         if (RT_LIKELY(pvMem))
         {
@@ -276,5 +784,5 @@
 
     /* Create the object. */
-    PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
+    PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
     if (!pMemSolaris)
         return VERR_NO_MEMORY;
@@ -296,5 +804,5 @@
 
     /* Create the locking object */
-    PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
+    PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
     if (!pMemSolaris)
         return VERR_NO_MEMORY;
@@ -306,16 +814,15 @@
     if (fAccess & RTMEM_PROT_EXEC)
         fPageAccess = S_EXEC;
-    void *pvPageList = NULL;
-    int rc = vbi_lock_va((caddr_t)R3Ptr, cb, fPageAccess, &pvPageList);
-    if (rc != 0)
-    {
-        LogRel(("rtR0MemObjNativeLockUser: vbi_lock_va failed rc=%d\n", rc));
+    int rc = rtR0MemObjSolLock((void *)R3Ptr, cb, fPageAccess);
+    if (RT_FAILURE(rc))
+    {
+        LogRel(("rtR0MemObjNativeLockUser: rtR0MemObjSolLock failed rc=%d\n", rc));
         rtR0MemObjDelete(&pMemSolaris->Core);
-        return VERR_LOCK_FAILED;
+        return rc;
     }
 
     /* Fill in the object attributes and return successfully. */
     pMemSolaris->Core.u.Lock.R0Process  = R0Process;
-    pMemSolaris->pvHandle               = pvPageList;
+    pMemSolaris->pvHandle               = NULL;
     pMemSolaris->fAccess                = fPageAccess;
     *ppMem = &pMemSolaris->Core;
@@ -328,5 +835,5 @@
     NOREF(fAccess);
 
-    PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, pv, cb);
+    PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, pv, cb);
     if (!pMemSolaris)
         return VERR_NO_MEMORY;
@@ -338,17 +845,16 @@
     if (fAccess & RTMEM_PROT_EXEC)
         fPageAccess = S_EXEC;
-    void *pvPageList = NULL;
-    int rc = vbi_lock_va((caddr_t)pv, cb, fPageAccess, &pvPageList);
-    if (rc != 0)
-    {
-        LogRel(("rtR0MemObjNativeLockKernel: vbi_lock_va failed rc=%d\n", rc));
+    int rc = rtR0MemObjSolLock(pv, cb, fPageAccess);
+    if (RT_FAILURE(rc))
+    {
+        LogRel(("rtR0MemObjNativeLockKernel: rtR0MemObjSolLock failed rc=%d\n", rc));
         rtR0MemObjDelete(&pMemSolaris->Core);
-        return VERR_LOCK_FAILED;
+        return rc;
     }
 
     /* Fill in the object attributes and return successfully. */
     pMemSolaris->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
-    pMemSolaris->pvHandle = pvPageList;
-    pMemSolaris->fAccess = fPageAccess;
+    pMemSolaris->pvHandle              = NULL;
+    pMemSolaris->fAccess               = fPageAccess;
     *ppMem = &pMemSolaris->Core;
     return VINF_SUCCESS;
@@ -358,16 +864,16 @@
 DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
 {
-    PRTR0MEMOBJSOLARIS  pMemSolaris;
+    PRTR0MEMOBJSOL  pMemSolaris;
 
     /*
      * Use xalloc.
      */
-    void *pv = vmem_xalloc(heap_arena, cb, uAlignment, 0 /*phase*/, 0 /*nocross*/,
-                           NULL /*minaddr*/, NULL /*maxaddr*/, VM_SLEEP);
+    void *pv = vmem_xalloc(heap_arena, cb, uAlignment, 0 /* phase */, 0 /* nocross */,
+                           NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
     if (RT_UNLIKELY(!pv))
         return VERR_NO_MEMORY;
 
     /* Create the object. */
-    pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
+    pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
     if (!pMemSolaris)
     {
@@ -411,14 +917,14 @@
      * Get parameters from the source object.
      */
-    PRTR0MEMOBJSOLARIS  pMemToMapSolaris = (PRTR0MEMOBJSOLARIS)pMemToMap;
-    void               *pv               = pMemToMapSolaris->Core.pv;
-    size_t              cb               = pMemToMapSolaris->Core.cb;
-    pgcnt_t             cPages           = cb >> PAGE_SHIFT;
+    PRTR0MEMOBJSOL  pMemToMapSolaris     = (PRTR0MEMOBJSOL)pMemToMap;
+    void           *pv                   = pMemToMapSolaris->Core.pv;
+    size_t          cb                   = pMemToMapSolaris->Core.cb;
+    size_t          cPages               = cb >> PAGE_SHIFT;
 
     /*
      * Create the mapping object
      */
-    PRTR0MEMOBJSOLARIS pMemSolaris;
-    pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, cb);
+    PRTR0MEMOBJSOL pMemSolaris;
+    pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, cb);
     if (RT_UNLIKELY(!pMemSolaris))
         return VERR_NO_MEMORY;
@@ -432,5 +938,5 @@
          */
         if (pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS_NC)
-            rc = vbi_pages_premap(pMemToMapSolaris->pvHandle, cb, paPhysAddrs);
+            rc = rtR0MemObjSolPagesPreMap(pMemToMapSolaris->pvHandle, cb, paPhysAddrs, cPages);
         else if (   pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS
                  && pMemToMapSolaris->fLargePage)
@@ -439,18 +945,20 @@
             for (pgcnt_t iPage = 0; iPage < cPages; iPage++, Phys += PAGE_SIZE)
                 paPhysAddrs[iPage] = Phys;
-            rc = vbi_large_page_premap(pMemToMapSolaris->pvHandle, cb);
+            rc = rtR0MemObjSolLargePagePreMap(pMemToMapSolaris->pvHandle, cb);
         }
         else
         {
-            /* Have kernel mapping, just translate virtual to physical. */
+            /*
+             * Have kernel mapping, just translate virtual to physical.
+             */
             AssertPtr(pv);
-            rc = 0;
-            for (pgcnt_t iPage = 0; iPage < cPages; iPage++)
+            rc = VINF_SUCCESS;
+            for (size_t iPage = 0; iPage < cPages; iPage++)
             {
-                paPhysAddrs[iPage] = vbi_va_to_pa(pv);
+                paPhysAddrs[iPage] = rtR0MemObjSolVirtToPhys(pv);
                 if (RT_UNLIKELY(paPhysAddrs[iPage] == -(uint64_t)1))
                 {
                     LogRel(("rtR0MemObjNativeMapUser: no page to map.\n"));
-                    rc = -1;
+                    rc = VERR_MAP_FAILED;
                     break;
                 }
@@ -458,12 +966,18 @@
             }
         }
-        if (!rc)
-        {
+        if (RT_SUCCESS(rc))
+        {
+            unsigned fPageAccess = PROT_READ;
+            if (fProt & RTMEM_PROT_WRITE)
+                fPageAccess |= PROT_WRITE;
+            if (fProt & RTMEM_PROT_EXEC)
+                fPageAccess |= PROT_EXEC;
+
             /*
              * Perform the actual mapping.
              */
             caddr_t UserAddr = NULL;
-            rc = vbi_user_map(&UserAddr, fProt, paPhysAddrs, cb);
-            if (!rc)
+            rc = rtR0MemObjSolUserMap(&UserAddr, fPageAccess, paPhysAddrs, cb);
+            if (RT_SUCCESS(rc))
             {
                 pMemSolaris->Core.u.Mapping.R0Process = R0Process;
@@ -475,6 +989,7 @@
             }
 
-            LogRel(("rtR0MemObjNativeMapUser: vbi_user_map failed.\n"));
-        }
+            LogRel(("rtR0MemObjNativeMapUser: rtR0MemObjSolUserMap failed rc=%d.\n", rc));
+        }
+
         rc = VERR_MAP_FAILED;
         kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
@@ -499,5 +1014,5 @@
 DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
 {
-    PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)pMem;
+    PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
 
     switch (pMemSolaris->Core.enmType)
@@ -507,7 +1022,7 @@
             {
                 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
-                return vbi_va_to_pa(pb);
+                return rtR0MemObjSolVirtToPhys(pb);
             }
-            return vbi_page_to_pa(pMemSolaris->pvHandle, iPage);
+            return rtR0MemObjSolPageToPhys(pMemSolaris->pvHandle, iPage);
 
         case RTR0MEMOBJTYPE_PAGE:
@@ -516,9 +1031,9 @@
         {
             uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
-            return vbi_va_to_pa(pb);
+            return rtR0MemObjSolVirtToPhys(pb);
         }
 
         /*
-         * Although mapping can be handled by vbi_va_to_pa(offset) like the above case,
+         * Although mapping can be handled by rtR0MemObjSolVirtToPhys(offset) like the above case,
          * request it from the parent so that we have a clear distinction between CONT/PHYS_NC.
          */
Index: /trunk/src/VBox/Runtime/r0drv/solaris/vbi/memobj-r0drv-solaris.h
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/solaris/vbi/memobj-r0drv-solaris.h	(revision 40966)
+++ /trunk/src/VBox/Runtime/r0drv/solaris/vbi/memobj-r0drv-solaris.h	(revision 40966)
@@ -0,0 +1,296 @@
+/* $Id$ */
+/** @file
+ * IPRT - Ring-0 Memory Objects - Segment driver, Solaris.
+ */
+
+/*
+ * Copyright (C) 2012 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+
+#ifndef ___r0drv_solaris_memobj_r0drv_solaris_h
+#define ___r0drv_solaris_memobj_r0drv_solaris_h
+
+/*******************************************************************************
+*   Header Files                                                               *
+*******************************************************************************/
+#include "../the-solaris-kernel.h"
+
+
+/*******************************************************************************
+*   Structures and Typedefs                                                    *
+*******************************************************************************/
+typedef struct SEGVBOX_CRARGS
+{
+    uint64_t *paPhysAddrs;
+    uint_t    fPageAccess;
+} SEGVBOX_CRARGS;
+typedef SEGVBOX_CRARGS *PSEGVBOX_CRARGS;
+
+typedef struct SEGVBOX_DATA
+{
+    uint_t    fPageAccess;
+} SEGVBOX_DATA;
+typedef SEGVBOX_DATA *PSEGVBOX_DATA;
+
+static struct seg_ops s_SegVBoxOps;
+static vnode_t s_segVBoxVnode;
+
+
+DECLINLINE(int) rtR0SegVBoxSolCreate(seg_t *pSeg, void *pvArgs)
+{
+    struct as      *pAddrSpace = pSeg->s_as;
+    PSEGVBOX_CRARGS pArgs      = pvArgs;
+    PSEGVBOX_DATA   pData      = kmem_zalloc(sizeof(*pData), KM_SLEEP);
+
+    AssertPtr(pAddrSpace);
+    AssertPtr(pArgs);
+    AssertPtr(pData);
+
+    hat_map(pAddrSpace->a_hat, pSeg->s_base, pSeg->s_size, HAT_MAP);
+    pData->fPageAccess = pArgs->fPageAccess | PROT_USER;
+
+    pSeg->s_ops  = &s_SegVBoxOps;
+    pSeg->s_data = pData;
+
+    /*
+     * Now load the locked mappings to the pages.
+     */
+    caddr_t virtAddr = pSeg->s_base;
+    pgcnt_t cPages   = (pSeg->s_size + PAGESIZE - 1) >> PAGESHIFT;
+    for (pgcnt_t iPage = 0; iPage < cPages; ++iPage, virtAddr += PAGESIZE)
+    {
+        hat_devload(pAddrSpace->a_hat, virtAddr, PAGESIZE, pArgs->paPhysAddrs[iPage] >> PAGESHIFT,
+                    pData->fPageAccess | HAT_UNORDERED_OK, HAT_LOAD | HAT_LOAD_LOCK);
+    }
+
+    return 0;
+}
+
+
+static int rtR0SegVBoxSolDup(seg_t *pSrcSeg, seg_t *pDstSeg)
+{
+    /*
+     * Duplicate a segment and return the new segment in 'pDstSeg'.
+     */
+    PSEGVBOX_DATA pSrcData = pSrcSeg->s_data;
+    PSEGVBOX_DATA pDstData = kmem_zalloc(sizeof(*pDstData), KM_SLEEP);
+
+    AssertPtr(pDstData);
+    AssertPtr(pSrcData);
+
+    pDstData->fPageAccess  = pSrcData->fPageAccess;
+    pDstSeg->s_ops         = &s_SegVBoxOps;
+    pDstSeg->s_data        = pDstData;
+
+    return 0;
+}
+
+
+static int rtR0SegVBoxSolUnmap(seg_t *pSeg, caddr_t virtAddr, size_t cb)
+{
+    /** @todo make these into release assertions. */
+    if (   virtAddr < pSeg->s_base
+        || virtAddr + cb > pSeg->s_base + pSeg->s_size
+        || (cb & PAGEOFFSET) || ((uintptr_t)virtAddr & PAGEOFFSET))
+    {
+        panic("rtRt0SegVBoxSolUnmap");
+    }
+
+    if (virtAddr != pSeg->s_base || cb != pSeg->s_size)
+        return ENOTSUP;
+
+    hat_unload(pSeg->s_as->a_hat, virtAddr, cb, HAT_UNLOAD_UNMAP | HAT_UNLOAD_UNLOCK);
+
+    seg_free(pSeg);
+    return 0;
+}
+
+
+static void rtR0SegVBoxSolFree(seg_t *pSeg)
+{
+    PSEGVBOX_DATA pData = pSeg->s_data;
+    kmem_free(pData, sizeof(*pData));
+}
+
+
+static int rtR0SegVBoxSolFault(struct hat *pHat, seg_t *pSeg, caddr_t virtAddr, size_t cb, enum fault_type FaultType,
+                               enum seg_rw ReadWrite)
+{
+    /*
+     * We would demand fault if the (u)read() path would SEGOP_FAULT() on buffers mapped in via our
+     * segment driver i.e. prefaults before DMA. Don't fail in such case where we're called directly,
+     * see #5047.
+     */
+    return 0;
+}
+
+
+static int rtR0SegVBoxSolFaultA(seg_t *pSeg, caddr_t virtAddr)
+{
+    return 0;
+}
+
+
+static int rtR0SegVBoxSolSetProt(seg_t *pSeg, caddr_t virtAddr, size_t cb, uint_t fPageAccess)
+{
+    return EACCES;
+}
+
+
+static int rtR0SegVBoxSolCheckProt(seg_t *pSeg, caddr_t virtAddr, size_t cb, uint_t fPageAccess)
+{
+    return EINVAL;
+}
+
+
+static int rtR0SegVBoxSolKluster(seg_t *pSeg, caddr_t virtAddr, ssize_t Delta)
+{
+    return -1;
+}
+
+
+static int rtR0SegVBoxSolSync(seg_t *pSeg, caddr_t virtAddr, size_t cb, int Attr, uint_t fFlags)
+{
+    return 0;
+}
+
+
+static size_t rtR0SegVBoxSolInCore(seg_t *pSeg, caddr_t virtAddr, size_t cb, char *pVec)
+{
+    size_t cbLen = (cb + PAGEOFFSET) & PAGEMASK;
+    for (virtAddr = 0; cbLen != 0; cbLen -= PAGESIZE, virtAddr += PAGESIZE)
+        *pVec++ = 1;
+    return cbLen;
+}
+
+
+static int rtR0SegVBoxSolLockOp(seg_t *pSeg, caddr_t virtAddr, size_t cb, int Attr, int Op, ulong_t *pLockMap, size_t off)
+{
+    return 0;
+}
+
+
+static int rtR0SegVBoxSolGetProt(seg_t *pSeg, caddr_t virtAddr, size_t cb, uint_t *pafPageAccess)
+{
+    PSEGVBOX_DATA pData = pSeg->s_data;
+    size_t iPage = seg_page(pSeg, virtAddr + cb) - seg_page(pSeg, virtAddr) + 1;
+    if (iPage)
+    {
+        do
+        {
+            iPage--;
+            pafPageAccess[iPage] = pData->fPageAccess;
+        } while (iPage);
+    }
+    return 0;
+}
+
+
+static u_offset_t rtR0SegVBoxSolGetOffset(seg_t *pSeg, caddr_t virtAddr)
+{
+    return ((uintptr_t)virtAddr - (uintptr_t)pSeg->s_base);
+}
+
+
+static int rtR0SegVBoxSolGetType(seg_t *pSeg, caddr_t virtAddr)
+{
+    return MAP_SHARED;
+}
+
+
+static int rtR0SegVBoxSolGetVp(seg_t *pSeg, caddr_t virtAddr, vnode_t **ppVnode)
+{
+    *ppVnode = &s_segVBoxVnode;
+    return 0;
+}
+
+
+static int rtR0SegVBoxSolAdvise(seg_t *pSeg, caddr_t virtAddr, size_t cb, uint_t Behav /* wut? */)
+{
+    return 0;
+}
+
+
+static void rtR0SegVBoxSolDump(seg_t *pSeg)
+{
+    /* Nothing to do. */
+}
+
+
+static int rtR0SegVBoxSolPageLock(seg_t *pSeg, caddr_t virtAddr, size_t cb, page_t ***pppPage, enum lock_type LockType, enum seg_rw ReadWrite)
+{
+    return ENOTSUP;
+}
+
+
+static int rtR0SegVBoxSolSetPageSize(seg_t *pSeg, caddr_t virtAddr, size_t cb, uint_t SizeCode)
+{
+    return ENOTSUP;
+}
+
+
+static int rtR0SegVBoxSolGetMemId(seg_t *pSeg, caddr_t virtAddr, memid_t *pMemId)
+{
+    return ENODEV;
+}
+
+
+static lgrp_mem_policy_info_t *rtR0SegVBoxSolGetPolicy(seg_t *pSeg, caddr_t virtAddr)
+{
+    return NULL;
+}
+
+
+static int rtR0SegVBoxSolCapable(seg_t *pSeg, segcapability_t Capab)
+{
+    return 0;
+}
+
+
+static struct seg_ops s_SegVBoxOps =
+{
+    rtR0SegVBoxSolDup,
+    rtR0SegVBoxSolUnmap,
+    rtR0SegVBoxSolFree,
+    rtR0SegVBoxSolFault,
+    rtR0SegVBoxSolFaultA,
+    rtR0SegVBoxSolSetProt,
+    rtR0SegVBoxSolCheckProt,
+    rtR0SegVBoxSolKluster,
+    NULL,                       /* swapout */
+    rtR0SegVBoxSolSync,
+    rtR0SegVBoxSolInCore,
+    rtR0SegVBoxSolLockOp,
+    rtR0SegVBoxSolGetProt,
+    rtR0SegVBoxSolGetOffset,
+    rtR0SegVBoxSolGetType,
+    rtR0SegVBoxSolGetVp,
+    rtR0SegVBoxSolAdvise,
+    rtR0SegVBoxSolDump,
+    rtR0SegVBoxSolPageLock,
+    rtR0SegVBoxSolSetPageSize,
+    rtR0SegVBoxSolGetMemId,
+    rtR0SegVBoxSolGetPolicy,
+    rtR0SegVBoxSolCapable
+};
+
+#endif /* ___r0drv_solaris_memobj_r0drv_solaris_h */
+
Index: /trunk/src/VBox/Runtime/r0drv/solaris/vbi/mp-r0drv-solaris.c
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/solaris/vbi/mp-r0drv-solaris.c	(revision 40965)
+++ /trunk/src/VBox/Runtime/r0drv/solaris/vbi/mp-r0drv-solaris.c	(revision 40966)
@@ -33,4 +33,5 @@
 #include <iprt/mp.h>
 #include <iprt/cpuset.h>
+#include <iprt/thread.h>
 
 #include <iprt/asm.h>
@@ -41,4 +42,6 @@
 #include "r0drv/mp-r0drv.h"
 
+typedef int FNRTMPSOLWORKER(void *pvUser1, void *pvUser2, void *pvUser3);
+typedef FNRTMPSOLWORKER *PFNRTMPSOLWORKER;
 
 
@@ -51,5 +54,5 @@
 RTDECL(RTCPUID) RTMpCpuId(void)
 {
-    return vbi_cpu_id();
+    return CPU->cpu_id;
 }
 
@@ -57,5 +60,5 @@
 RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
 {
-    return idCpu < RTCPUSET_MAX_CPUS && idCpu < vbi_cpu_maxcount() ? idCpu : -1;
+    return idCpu < RTCPUSET_MAX_CPUS && idCpu <= max_cpuid ? idCpu : -1;
 }
 
@@ -63,5 +66,5 @@
 RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
 {
-    return (unsigned)iCpu < vbi_cpu_maxcount() ? iCpu : NIL_RTCPUID;
+    return (unsigned)iCpu <= max_cpuid ? iCpu : NIL_RTCPUID;
 }
 
@@ -69,5 +72,5 @@
 RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
 {
-    return vbi_max_cpu_id();
+    return max_cpuid;
 }
 
@@ -78,12 +81,8 @@
      * We cannot query CPU status recursively, check cpu member from cached set.
      */
-    if (idCpu >= vbi_cpu_count())
+    if (idCpu >= ncpus)
         return false;
 
-    return RTCpuSetIsMember(&g_rtMpSolarisCpuSet, idCpu);
-
-#if 0
-    return idCpu < vbi_cpu_count() && vbi_cpu_online(idCpu);
-#endif
+    return RTCpuSetIsMember(&g_rtMpSolCpuSet, idCpu);
 }
 
@@ -91,5 +90,5 @@
 RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
 {
-    return idCpu < vbi_cpu_count();
+    return idCpu < ncpus;
 }
 
@@ -113,5 +112,5 @@
 RTDECL(RTCPUID) RTMpGetCount(void)
 {
-    return vbi_cpu_count();
+    return ncpus;
 }
 
@@ -122,5 +121,5 @@
      * We cannot query CPU status recursively, return the cached set.
      */
-    *pSet = g_rtMpSolarisCpuSet;
+    *pSet = g_rtMpSolCpuSet;
     return pSet;
 }
@@ -135,4 +134,50 @@
 
 
+/**
+ * Wrapper to Solaris IPI infrastructure.
+ *
+ * @param    pCpuSet        Pointer to Solaris CPU set.
+ * @param    pfnSolWorker     Function to execute on target CPU(s).
+ * @param     pArgs            Pointer to RTMPARGS to pass to @a pfnSolWorker.
+ *
+ * @returns Solaris error code.
+ */
+static void rtMpSolCrossCall(PRTSOLCPUSET pCpuSet, PFNRTMPSOLWORKER pfnSolWorker, PRTMPARGS pArgs)
+{
+    AssertPtrReturnVoid(pCpuSet);
+    AssertPtrReturnVoid(pfnSolWorker);
+    AssertPtrReturnVoid(pCpuSet);
+
+    if (g_frtSolOldIPI)
+    {
+        if (g_frtSolOldIPIUlong)
+        {
+            g_rtSolXcCall.u.pfnSol_xc_call_old_ulong((xc_arg_t)pArgs,          /* Arg to IPI function */
+                                                     0,                        /* Arg2, ignored */
+                                                     0,                        /* Arg3, ignored */
+                                                     IPRT_SOL_X_CALL_HIPRI,    /* IPI priority */
+                                                     pCpuSet->auCpus[0],       /* Target CPU(s) */
+                                                     (xc_func_t)pfnSolWorker); /* Function to execute on target(s) */
+        }
+        else
+        {
+            g_rtSolXcCall.u.pfnSol_xc_call_old((xc_arg_t)pArgs,          /* Arg to IPI function */
+                                               0,                        /* Arg2, ignored */
+                                               0,                        /* Arg3, ignored */
+                                               IPRT_SOL_X_CALL_HIPRI,    /* IPI priority */
+                                               *pCpuSet,                 /* Target CPU set */
+                                               (xc_func_t)pfnSolWorker); /* Function to execute on target(s) */
+        }
+    }
+    else
+    {
+        g_rtSolXcCall.u.pfnSol_xc_call((xc_arg_t)pArgs,          /* Arg to IPI function */
+                                       0,                        /* Arg2 */
+                                       0,                        /* Arg3 */
+                                       &pCpuSet->auCpus[0],      /* Target CPU set */
+                                       (xc_func_t)pfnSolWorker); /* Function to execute on target(s) */
+    }
+}
+
 
 /**
@@ -144,5 +189,5 @@
  * @param   uIgnored2   Ignored.
  */
-static int rtmpOnAllSolarisWrapper(void *uArg, void *uIgnored1, void *uIgnored2)
+static int rtMpSolOnAllCpuWrapper(void *uArg, void *uIgnored1, void *uIgnored2)
 {
     PRTMPARGS pArgs = (PRTMPARGS)(uArg);
@@ -174,9 +219,14 @@
     Args.cHits = 0;
 
-    vbi_preempt_disable();
-
-    vbi_execute_on_all(rtmpOnAllSolarisWrapper, &Args);
-
-    vbi_preempt_enable();
+    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+    RTThreadPreemptDisable(&PreemptState);
+
+    RTSOLCPUSET CpuSet;
+    for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
+        CpuSet.auCpus[i] = (ulong_t)-1L;
+
+    rtMpSolCrossCall(&CpuSet, rtMpSolOnAllCpuWrapper, &Args);
+
+    RTThreadPreemptRestore(&PreemptState);
 
     return VINF_SUCCESS;
@@ -192,5 +242,5 @@
  * @param   uIgnored2   Ignored.
  */
-static int rtmpOnOthersSolarisWrapper(void *uArg, void *uIgnored1, void *uIgnored2)
+static int rtMpSolOnOtherCpusWrapper(void *uArg, void *uIgnored1, void *uIgnored2)
 {
     PRTMPARGS pArgs = (PRTMPARGS)(uArg);
@@ -210,7 +260,4 @@
     RTMPARGS Args;
     RT_ASSERT_INTS_ON();
-
-    /* The caller is supposed to have disabled preemption, but take no chances. */
-    vbi_preempt_disable();
 
     Args.pfnWorker = pfnWorker;
@@ -220,7 +267,16 @@
     Args.cHits = 0;
 
-    vbi_execute_on_others(rtmpOnOthersSolarisWrapper, &Args);
-
-    vbi_preempt_enable();
+    /* The caller is supposed to have disabled preemption, but take no chances. */
+    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+    RTThreadPreemptDisable(&PreemptState);
+
+    RTSOLCPUSET CpuSet;
+    for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
+        CpuSet.auCpus[0] = (ulong_t)-1L;
+    BT_CLEAR(CpuSet.auCpus, RTMpCpuId());
+
+    rtMpSolCrossCall(&CpuSet, rtMpSolOnOtherCpusWrapper, &Args);
+
+    RTThreadPreemptRestore(&PreemptState);
 
     return VINF_SUCCESS;
@@ -232,10 +288,11 @@
  * for the RTMpOnSpecific API.
  *
- *
  * @param   uArgs       Pointer to the RTMPARGS package.
  * @param   uIgnored1   Ignored.
  * @param   uIgnored2   Ignored.
- */
-static int rtmpOnSpecificSolarisWrapper(void *uArg, void *uIgnored1, void *uIgnored2)
+ *
+ * @returns Solaris error code.
+ */
+static int rtMpSolOnSpecificCpuWrapper(void *uArg, void *uIgnored1, void *uIgnored2)
 {
     PRTMPARGS pArgs = (PRTMPARGS)(uArg);
@@ -257,5 +314,5 @@
     RT_ASSERT_INTS_ON();
 
-    if (idCpu >= vbi_cpu_count())
+    if (idCpu >= ncpus)
         return VERR_CPU_NOT_FOUND;
 
@@ -269,9 +326,15 @@
     Args.cHits = 0;
 
-    vbi_preempt_disable();
-
-    vbi_execute_on_one(rtmpOnSpecificSolarisWrapper, &Args, idCpu);
-
-    vbi_preempt_enable();
+    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+    RTThreadPreemptDisable(&PreemptState);
+
+    RTSOLCPUSET CpuSet;
+    for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
+        CpuSet.auCpus[i] = 0;
+    BT_SET(CpuSet.auCpus, idCpu);
+
+    rtMpSolCrossCall(&CpuSet, rtMpSolOnSpecificCpuWrapper, &Args);
+
+    RTThreadPreemptRestore(&PreemptState);
 
     Assert(ASMAtomicUoReadU32(&Args.cHits) <= 1);
Index: /trunk/src/VBox/Runtime/r0drv/solaris/vbi/mpnotification-r0drv-solaris.c
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/solaris/vbi/mpnotification-r0drv-solaris.c	(revision 40965)
+++ /trunk/src/VBox/Runtime/r0drv/solaris/vbi/mpnotification-r0drv-solaris.c	(revision 40966)
@@ -42,71 +42,110 @@
 *   Global Variables                                                           *
 *******************************************************************************/
-/** CPU watch callback handle. */
-static vbi_cpu_watch_t *g_hVbiCpuWatch = NULL;
+/** Whether CPUs are being watched or not. */
+static volatile bool g_fSolCpuWatch = false;
 /** Set of online cpus that is maintained by the MP callback.
  * This avoids locking issues querying the set from the kernel as well as
  * eliminating any uncertainty regarding the online status during the
  * callback. */
-RTCPUSET g_rtMpSolarisCpuSet;
+RTCPUSET g_rtMpSolCpuSet;
+
+/**
+ * Internal solaris representation for watching CPUs.
+ */
+typedef struct RTMPSOLWATCHCPUS
+{
+    /** Function pointer to Mp worker. */
+    PFNRTMPWORKER   pfnWorker;
+    /** Argument to pass to the Mp worker. */
+    void           *pvArg;
+} RTMPSOLWATCHCPUS;
+typedef RTMPSOLWATCHCPUS *PRTMPSOLWATCHCPUS;
 
 
-static void rtMpNotificationSolarisOnCurrentCpu(void *pvArgs, void *uIgnored1, void *uIgnored2)
+/**
+ * PFNRTMPWORKER worker for executing Mp events on the target CPU.
+ *
+ * @param    idCpu          The current CPU Id.
+ * @param    pvArg          Opaque pointer to event type (online/offline).
+ * @param    pvIgnored1     Ignored.
+ */
+static void rtMpNotificationSolOnCurrentCpu(RTCPUID idCpu, void *pvArg, void *pvIgnored1)
 {
-    NOREF(uIgnored1);
-    NOREF(uIgnored2);
+    NOREF(pvIgnored1);
+    NOREF(idCpu);
 
-    PRTMPARGS pArgs = (PRTMPARGS)(pvArgs);
+    PRTMPARGS pArgs = (PRTMPARGS)pvArg;
     AssertRelease(pArgs && pArgs->idCpu == RTMpCpuId());
-    Assert(pArgs->pvUser2);
+    Assert(pArgs->pvUser1);
     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
 
-    int online = *(int *)pArgs->pvUser2;
-    if (online)
+    RTMPEVENT enmMpEvent = *(RTMPEVENT *)pArgs->pvUser1;
+    rtMpNotificationDoCallbacks(enmMpEvent, pArgs->idCpu);
+}
+
+
+/**
+ * Solaris callback function for Mp event notification.
+ *
+ * @param    CpuState   The current event/state of the CPU.
+ * @param    iCpu       Which CPU is this event fore.
+ * @param    pvArg      Ignored.
+ *
+ * @remarks This function assumes index == RTCPUID.
+ * @returns Solaris error code.
+ */
+static int rtMpNotificationCpuEvent(cpu_setup_t CpuState, int iCpu, void *pvArg)
+{
+    RTMPEVENT enmMpEvent;
+
+    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+    RTThreadPreemptDisable(&PreemptState);
+
+    /*
+     * Update our CPU set structures first regardless of whether we've been
+     * scheduled on the right CPU or not, this is just atomic accounting.
+     */
+    if (CpuState == CPU_ON)
     {
-        RTCpuSetAdd(&g_rtMpSolarisCpuSet, pArgs->idCpu);
-        rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, pArgs->idCpu);
+        enmMpEvent = RTMPEVENT_ONLINE;
+        RTCpuSetAdd(&g_rtMpSolCpuSet, iCpu);
+    }
+    else if (CpuState == CPU_OFF)
+    {
+        enmMpEvent = RTMPEVENT_OFFLINE;
+        RTCpuSetDel(&g_rtMpSolCpuSet, iCpu);
+    }
+    else
+        return 0;
+
+    /*
+     * Since we don't absolutely need to do CPU bound code in any of the CPU offline
+     * notification hooks, run it on the current CPU. Scheduling a callback to execute
+     * on the CPU going offline at this point is too late and will not work reliably.
+     */
+    bool fRunningOnTargetCpu = iCpu == RTMpCpuId();
+    if (   fRunningOnTargetCpu == true
+        || enmMpEvent == RTMPEVENT_OFFLINE)
+    {
+        rtMpNotificationDoCallbacks(enmMpEvent, iCpu);
     }
     else
     {
-        RTCpuSetDel(&g_rtMpSolarisCpuSet, pArgs->idCpu);
-        rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, pArgs->idCpu);
-    }
-}
-
-
-static void rtMpNotificationSolarisCallback(void *pvUser, int iCpu, int online)
-{
-    vbi_preempt_disable();
-
-    RTMPARGS Args;
-    RT_ZERO(Args);
-    Args.pvUser1 = pvUser;
-    Args.pvUser2 = &online;
-    Args.idCpu   = iCpu;
-
-    /*
-     * If we're not on the target CPU, schedule (synchronous) the event notification callback
-     * to run on the target CPU i.e. the one pertaining to the MP event.
-     */
-    bool fRunningOnTargetCpu = iCpu == RTMpCpuId();      /* ASSUMES iCpu == RTCPUID */
-    if (fRunningOnTargetCpu)
-        rtMpNotificationSolarisOnCurrentCpu(&Args, NULL /* pvIgnored1 */, NULL /* pvIgnored2 */);
-    else
-    {
-        if (online)
-            vbi_execute_on_one(rtMpNotificationSolarisOnCurrentCpu, &Args, iCpu);
-        else
-        {
-            /*
-             * Since we don't absolutely need to do CPU bound code in any of the CPU offline
-             * notification hooks, run it on the current CPU. Scheduling a callback to execute
-             * on the CPU going offline at this point is too late and will not work reliably.
-             */
-            RTCpuSetDel(&g_rtMpSolarisCpuSet, iCpu);
-            rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, iCpu);
-        }
+        /*
+         * We're not on the target CPU, schedule (synchronous) the event notification callback
+         * to run on the target CPU i.e. the CPU that was online'd.
+         */
+        RTMPARGS Args;
+        RT_ZERO(Args);
+        Args.pvUser1 = &enmMpEvent;
+        Args.pvUser2 = NULL;
+        Args.idCpu   = iCpu;
+        RTMpOnSpecific(iCpu, rtMpNotificationSolOnCurrentCpu, &Args, NULL /* pvIgnored1 */);
     }
 
-    vbi_preempt_enable();
+    RTThreadPreemptRestore(&PreemptState);
+
+    NOREF(pvArg);
+    return 0;
 }
 
@@ -114,13 +153,21 @@
 DECLHIDDEN(int) rtR0MpNotificationNativeInit(void)
 {
-    if (g_hVbiCpuWatch != NULL)
+    if (ASMAtomicReadBool(&g_fSolCpuWatch) == true)
         return VERR_WRONG_ORDER;
 
     /*
-     * Register the callback building the online cpu set as we
-     * do so (current_too = 1).
+     * Register the callback building the online cpu set as we do so.
      */
-    RTCpuSetEmpty(&g_rtMpSolarisCpuSet);
-    g_hVbiCpuWatch = vbi_watch_cpus(rtMpNotificationSolarisCallback, NULL, 1 /*current_too*/);
+    RTCpuSetEmpty(&g_rtMpSolCpuSet);
+
+    mutex_enter(&cpu_lock);
+    register_cpu_setup_func(rtMpNotificationCpuEvent, NULL /* pvArg */);
+
+    for (int i = 0; i < (int)RTMpGetCount(); ++i)
+        if (cpu_is_online(cpu[i]))
+            rtMpNotificationCpuEvent(CPU_ON, i, NULL /* pvArg */);
+
+    ASMAtomicWriteBool(&g_fSolCpuWatch, true);
+    mutex_exit(&cpu_lock);
 
     return VINF_SUCCESS;
@@ -130,7 +177,11 @@
 DECLHIDDEN(void) rtR0MpNotificationNativeTerm(void)
 {
-    if (g_hVbiCpuWatch != NULL)
-        vbi_ignore_cpus(g_hVbiCpuWatch);
-    g_hVbiCpuWatch = NULL;
+    if (ASMAtomicReadBool(&g_fSolCpuWatch) == true)
+    {
+        mutex_enter(&cpu_lock);
+        unregister_cpu_setup_func(rtMpNotificationCpuEvent, NULL /* pvArg */);
+        ASMAtomicWriteBool(&g_fSolCpuWatch, false);
+        mutex_exit(&cpu_lock);
+    }
 }
 
Index: /trunk/src/VBox/Runtime/r0drv/solaris/vbi/process-r0drv-solaris.c
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/solaris/vbi/process-r0drv-solaris.c	(revision 40965)
+++ /trunk/src/VBox/Runtime/r0drv/solaris/vbi/process-r0drv-solaris.c	(revision 40966)
@@ -43,5 +43,7 @@
 RTR0DECL(RTR0PROCESS) RTR0ProcHandleSelf(void)
 {
-    return (RTR0PROCESS)vbi_proc();
+    proc_t *pProcess = NULL;
+    drv_getparm(UPROCP, &pProcess);
+    return (RTR0PROCESS)pProcess;
 }
 
Index: /trunk/src/VBox/Runtime/r0drv/solaris/vbi/thread-r0drv-solaris.c
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/solaris/vbi/thread-r0drv-solaris.c	(revision 40965)
+++ /trunk/src/VBox/Runtime/r0drv/solaris/vbi/thread-r0drv-solaris.c	(revision 40966)
@@ -40,9 +40,11 @@
 #include <iprt/mp.h>
 
-
+#define SOL_THREAD_PREEMPT       (*((char *)curthread + g_offrtSolThreadPreempt))
+#define SOL_CPU_RUNRUN           (*((char *)CPU + g_offrtSolCpuPreempt))
+#define SOL_CPU_KPRUNRUN         (*((char *)CPU + g_offrtSolCpuForceKernelPreempt))
 
 RTDECL(RTNATIVETHREAD) RTThreadNativeSelf(void)
 {
-    return (RTNATIVETHREAD)vbi_curthread();
+    return (RTNATIVETHREAD)curthread;
 }
 
@@ -55,5 +57,5 @@
     if (!cMillies)
     {
-        vbi_yield();
+        RTThreadYield();
         return VINF_SUCCESS;
     }
@@ -84,5 +86,22 @@
 {
     RT_ASSERT_PREEMPTIBLE();
-    return vbi_yield();
+
+    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
+    RTThreadPreemptDisable(&PreemptState);
+
+    char cThreadPreempt = SOL_THREAD_PREEMPT;
+    char cForcePreempt  = SOL_CPU_KPRUNRUN;
+    bool fWillYield = false;
+    Assert(cThreadPreempt >= 1);
+
+    /*
+     * If we are the last preemption enabler for this thread and if force
+     * preemption is set on the CPU, only then we are guaranteed to be preempted.
+     */
+    if (cThreadPreempt == 1 && cForcePreempt != 0)
+        fWillYield = true;
+
+    RTThreadPreemptRestore(&PreemptState);
+    return fWillYield;
 }
 
@@ -91,5 +110,15 @@
 {
     Assert(hThread == NIL_RTTHREAD);
-    if (!vbi_is_preempt_enabled())
+    if (RT_UNLIKELY(g_frtSolInitDone == false))
+    {
+        cmn_err(CE_CONT, "!RTThreadPreemptIsEnabled called before RTR0Init!\n");
+        return true;
+    }
+
+    bool fThreadPreempt = false;
+    if (SOL_THREAD_PREEMPT == 0)
+        fThreadPreempt = true;
+
+    if (!fThreadPreempt)
         return false;
 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
@@ -106,5 +135,8 @@
 {
     Assert(hThread == NIL_RTTHREAD);
-    return !!vbi_is_preempt_pending();
+
+    char cPreempt      = SOL_CPU_RUNRUN;
+    char cForcePreempt = SOL_CPU_KPRUNRUN;
+    return (cPreempt != 0 || cForcePreempt != 0);
 }
 
@@ -128,5 +160,6 @@
     AssertPtr(pState);
 
-    vbi_preempt_disable();
+    SOL_THREAD_PREEMPT++;
+    Assert(SOL_THREAD_PREEMPT >= 1);
 
     RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
@@ -139,5 +172,7 @@
     RT_ASSERT_PREEMPT_CPUID_RESTORE(pState);
 
-    vbi_preempt_enable();
+    Assert(SOL_THREAD_PREEMPT >= 1);
+    if (--SOL_THREAD_PREEMPT == 0 && SOL_CPU_RUNRUN != 0)
+        kpreempt(KPREEMPT_SYNC);
 }
 
Index: /trunk/src/VBox/Runtime/r0drv/solaris/vbi/thread2-r0drv-solaris.c
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/solaris/vbi/thread2-r0drv-solaris.c	(revision 40965)
+++ /trunk/src/VBox/Runtime/r0drv/solaris/vbi/thread2-r0drv-solaris.c	(revision 40966)
@@ -32,4 +32,5 @@
 #include "internal/iprt.h"
 #include <iprt/thread.h>
+#include <iprt/process.h>
 
 #include <iprt/assert.h>
@@ -67,5 +68,9 @@
     }
 
-    vbi_set_priority(vbi_curthread(), iPriority);
+    kthread_t *pCurThread = curthread;
+    Assert(pCurThread);
+    thread_lock(pCurThread);
+    thread_change_pri(pCurThread, iPriority, 0);
+    thread_unlock(pCurThread);
     return VINF_SUCCESS;
 }
@@ -96,6 +101,6 @@
     PRTTHREADINT pThreadInt = (PRTTHREADINT)pvThreadInt;
 
-    rtThreadMain(pThreadInt, (RTNATIVETHREAD)vbi_curthread(), &pThreadInt->szName[0]);
-    vbi_thread_exit();
+    rtThreadMain(pThreadInt, RTThreadNativeSelf(), &pThreadInt->szName[0]);
+    thread_exit();
 }
 
@@ -103,11 +108,17 @@
 DECLHIDDEN(int) rtThreadNativeCreate(PRTTHREADINT pThreadInt, PRTNATIVETHREAD pNativeThread)
 {
-    void   *pvKernThread;
     RT_ASSERT_PREEMPTIBLE();
-
-    pvKernThread = vbi_thread_create(rtThreadNativeMain, pThreadInt, sizeof(pThreadInt), minclsyspri);
-    if (pvKernThread)
+    kthread_t *pThread = thread_create(NULL,                            /* Stack, use base */
+                                       0,                               /* Stack size */
+                                       rtThreadNativeMain,              /* Thread function */
+                                       pThreadInt,                      /* Function data */
+                                       sizeof(pThreadInt),              /* Data size*/
+                                       (proc_t *)RTR0ProcHandleSelf(),  /* Process handle */
+                                       TS_RUN,                          /* Ready to run */
+                                       minclsyspri                      /* Priority */
+                                       );
+    if (RT_LIKELY(pThread))
     {
-        *pNativeThread = (RTNATIVETHREAD)pvKernThread;
+        *pNativeThread = (RTNATIVETHREAD)pThread;
         return VINF_SUCCESS;
     }
Index: /trunk/src/VBox/Runtime/r0drv/solaris/vbi/time-r0drv-solaris.c
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/solaris/vbi/time-r0drv-solaris.c	(revision 40965)
+++ /trunk/src/VBox/Runtime/r0drv/solaris/vbi/time-r0drv-solaris.c	(revision 40966)
@@ -61,5 +61,10 @@
 RTDECL(PRTTIMESPEC) RTTimeNow(PRTTIMESPEC pTime)
 {
-    return RTTimeSpecSetNano(pTime, vbi_tod());
+    timestruc_t TimeSpec;
+
+    mutex_enter(&tod_lock);
+    TimeSpec = tod_get();
+    mutex_exit(&tod_lock);
+    return RTTimeSpecSetNano(pTime, (uint64_t)TimeSpec.tv_sec * 1000000000 + TimeSpec.tv_nsec);
 }
 
Index: /trunk/src/VBox/Runtime/r0drv/solaris/vbi/timer-r0drv-solaris.c
===================================================================
--- /trunk/src/VBox/Runtime/r0drv/solaris/vbi/timer-r0drv-solaris.c	(revision 40965)
+++ /trunk/src/VBox/Runtime/r0drv/solaris/vbi/timer-r0drv-solaris.c	(revision 40966)
@@ -46,8 +46,35 @@
 #include "internal/magics.h"
 
+#define SOL_TIMER_ANY_CPU       (-1)
 
 /*******************************************************************************
 *   Structures and Typedefs                                                    *
 *******************************************************************************/
+/**
+ * Single-CPU timer handle.
+ */
+typedef struct RTR0SINGLETIMERSOL
+{
+    /** Cyclic handler. */
+    cyc_handler_t           hHandler;
+    /** Cyclic time and interval representation. */
+    cyc_time_t              hFireTime;
+    /** Timer ticks. */
+    uint64_t                u64Tick;
+} RTR0SINGLETIMERSOL;
+typedef RTR0SINGLETIMERSOL *PRTR0SINGLETIMERSOL;
+
+/**
+ * Omni-CPU timer handle.
+ */
+typedef struct RTR0OMNITIMERSOL
+{
+    /** Absolute timestamp of when the timer should fire next. */
+    uint64_t                u64When;
+    /** Array of timer ticks per CPU. Reinitialized when a CPU is online'd. */
+    uint64_t               *au64Ticks;
+} RTR0OMNITIMERSOL;
+typedef RTR0OMNITIMERSOL *PRTR0OMNITIMERSOL;
+
 /**
  * The internal representation of a Solaris timer handle.
@@ -61,5 +88,5 @@
     /** Flag indicating that the timer is suspended. */
     uint8_t volatile        fSuspended;
-    /** Run on all CPUs if set */
+    /** Whether the timer must run on all CPUs or not. */
     uint8_t                 fAllCpu;
     /** Whether the timer must run on a specific CPU or not. */
@@ -67,10 +94,14 @@
     /** The CPU it must run on if fSpecificCpu is set. */
     uint8_t                 iCpu;
-    /** The nano second interval for repeating timers */
+    /** The nano second interval for repeating timers. */
     uint64_t                interval;
-    /** simple Solaris timer handle. */
-    vbi_stimer_t           *stimer;
-    /** global Solaris timer handle. */
-    vbi_gtimer_t           *gtimer;
+    /** Cyclic timer Id. */
+    cyclic_id_t             hCyclicId;
+    /** @todo Make this a union unless we intend to support omni<=>single timers
+     *        conversions. */
+    /** Single-CPU timer handle. */
+    PRTR0SINGLETIMERSOL     pSingleTimer;
+    /** Omni-CPU timer handle. */
+    PRTR0OMNITIMERSOL       pOmniTimer;
     /** The user callback. */
     PFNRTTIMER              pfnTimer;
@@ -88,17 +119,70 @@
     { \
         AssertPtrReturn(pTimer, VERR_INVALID_HANDLE); \
-        AssertReturn((pTimer)->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE); \
+        AssertMsgReturn((pTimer)->u32Magic == RTTIMER_MAGIC, ("pTimer=%p u32Magic=%x expected %x\n", (pTimer), (pTimer)->u32Magic, RTTIMER_MAGIC), \
+            VERR_INVALID_HANDLE); \
     } while (0)
 
 
-/*
- * Need a wrapper to get the PRTTIMER passed through
- */
-static void rtTimerSolarisCallbackWrapper(PRTTIMER pTimer, uint64_t tick)
-{
-    pTimer->pfnTimer(pTimer, pTimer->pvUser, tick);
-}
-
-
+/**
+ * Callback wrapper for Omni-CPU and single-CPU timers.
+ *
+ * @param    pvArg              Opaque pointer to the timer.
+ *
+ * @remarks This will be executed in interrupt context but only at the specified
+ *          level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the
+ *          cyclic subsystem here, neither should pfnTimer().
+ */
+static void rtTimerSolCallbackWrapper(void *pvArg)
+{
+    PRTTIMER pTimer = (PRTTIMER)pvArg;
+    AssertPtrReturnVoid(pTimer);
+
+    if (pTimer->pSingleTimer)
+    {
+        uint64_t u64Tick = ++pTimer->pSingleTimer->u64Tick;
+        pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick);
+    }
+    else if (pTimer->pOmniTimer)
+    {
+        uint64_t u64Tick = ++pTimer->pOmniTimer->au64Ticks[CPU->cpu_id];
+        pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick);
+    }
+}
+
+
+/**
+ * Omni-CPU cyclic online event. This is called before the omni cycle begins to
+ * fire on the specified CPU.
+ *
+ * @param    pvArg              Opaque pointer to the timer.
+ * @param    pCpu               Pointer to the CPU on which it will fire.
+ * @param    pCyclicHandler     Pointer to a cyclic handler to add to the CPU
+ *                              specified in @a pCpu.
+ * @param    pCyclicTime        Pointer to the cyclic time and interval object.
+ *
+ * @remarks We -CANNOT- call back into the cyclic subsystem here, we can however
+ *          block (sleep).
+ */
+static void rtTimerSolOmniCpuOnline(void *pvArg, cpu_t *pCpu, cyc_handler_t *pCyclicHandler, cyc_time_t *pCyclicTime)
+{
+    PRTTIMER pTimer = (PRTTIMER)pvArg;
+    AssertPtrReturnVoid(pTimer);
+    AssertPtrReturnVoid(pCpu);
+    AssertPtrReturnVoid(pCyclicHandler);
+    AssertPtrReturnVoid(pCyclicTime);
+
+    pTimer->pOmniTimer->au64Ticks[pCpu->cpu_id] = 0;
+    pCyclicHandler->cyh_func  = rtTimerSolCallbackWrapper;
+    pCyclicHandler->cyh_arg   = pTimer;
+    pCyclicHandler->cyh_level = CY_LOCK_LEVEL;
+
+    uint64_t u64Now = RTTimeNanoTS();
+    if (pTimer->pOmniTimer->u64When < u64Now)
+        pCyclicTime->cyt_when = u64Now + pTimer->interval / 2;
+    else
+        pCyclicTime->cyt_when = pTimer->pOmniTimer->u64When;
+
+    pCyclicTime->cyt_interval = pTimer->interval;
+}
 
 
@@ -152,7 +236,9 @@
     pTimer->pfnTimer = pfnTimer;
     pTimer->pvUser = pvUser;
-    pTimer->stimer = NULL;
-    pTimer->gtimer = NULL;
-
+    pTimer->pSingleTimer = NULL;
+    pTimer->pOmniTimer = NULL;
+    pTimer->hCyclicId = CYCLIC_NONE;
+
+    cmn_err(CE_NOTE, "Create pTimer->u32Magic=%x RTTIMER_MAGIC=%x\n",  pTimer->u32Magic, RTTIMER_MAGIC);
     *ppTimer = pTimer;
     return VINF_SUCCESS;
@@ -179,4 +265,5 @@
 RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
 {
+    cmn_err(CE_NOTE, "Start pTimer->u32Magic=%x RTTIMER_MAGIC=%x\n",  pTimer->u32Magic, RTTIMER_MAGIC);
     RTTIMER_ASSERT_VALID_RET(pTimer);
     RT_ASSERT_INTS_ON();
@@ -185,23 +272,81 @@
         return VERR_TIMER_ACTIVE;
 
+    /* One-shot timers are not supported by the cyclic system. */
+    if (pTimer->interval == 0)
+        return VERR_NOT_SUPPORTED;
+
     pTimer->fSuspended = false;
     if (pTimer->fAllCpu)
     {
-        pTimer->gtimer = vbi_gtimer_begin(rtTimerSolarisCallbackWrapper, pTimer, u64First, pTimer->interval);
-        if (pTimer->gtimer == NULL)
-            return VERR_INVALID_PARAMETER;
+        PRTR0OMNITIMERSOL pOmniTimer = RTMemAllocZ(sizeof(RTR0OMNITIMERSOL));
+        if (RT_UNLIKELY(!pOmniTimer))
+            return VERR_NO_MEMORY;
+
+        pOmniTimer->au64Ticks = RTMemAllocZ(RTMpGetCount() * sizeof(uint64_t));
+        if (RT_UNLIKELY(!pOmniTimer->au64Ticks))
+        {
+            RTMemFree(pOmniTimer);
+            return VERR_NO_MEMORY;
+        }
+
+        /*
+         * Setup omni (all CPU) timer. The Omni-CPU online event will fire
+         * and from there we setup periodic timers per CPU.
+         */
+        pTimer->pOmniTimer = pOmniTimer;
+        pOmniTimer->u64When     = pTimer->interval + RTTimeNanoTS();
+
+        cyc_omni_handler_t hOmni;
+        hOmni.cyo_online        = rtTimerSolOmniCpuOnline;
+        hOmni.cyo_offline       = NULL;
+        hOmni.cyo_arg           = pTimer;
+
+        mutex_enter(&cpu_lock);
+        pTimer->hCyclicId = cyclic_add_omni(&hOmni);
+        mutex_exit(&cpu_lock);
     }
     else
     {
-        int iCpu = VBI_ANY_CPU;
+        int iCpu = SOL_TIMER_ANY_CPU;
         if (pTimer->fSpecificCpu)
+        {
             iCpu = pTimer->iCpu;
-        pTimer->stimer = vbi_stimer_begin(rtTimerSolarisCallbackWrapper, pTimer, u64First, pTimer->interval, iCpu);
-        if (pTimer->stimer == NULL)
+            if (!RTMpIsCpuOnline(iCpu))    /* ASSUMES: index == cpuid */
+                return VERR_CPU_OFFLINE;
+        }
+
+        PRTR0SINGLETIMERSOL pSingleTimer = RTMemAllocZ(sizeof(RTR0SINGLETIMERSOL));
+        if (RT_UNLIKELY(!pSingleTimer))
+            return VERR_NO_MEMORY;
+
+        pTimer->pSingleTimer = pSingleTimer;
+        pSingleTimer->hHandler.cyh_func  = rtTimerSolCallbackWrapper;
+        pSingleTimer->hHandler.cyh_arg   = pTimer;
+        pSingleTimer->hHandler.cyh_level = CY_LOCK_LEVEL;
+
+        mutex_enter(&cpu_lock);
+        if (iCpu != SOL_TIMER_ANY_CPU && !cpu_is_online(cpu[iCpu]))
         {
-            if (iCpu != VBI_ANY_CPU)
-                return VERR_CPU_OFFLINE;
-            return VERR_INVALID_PARAMETER;
+            mutex_exit(&cpu_lock);
+            RTMemFree(pSingleTimer);
+            pTimer->pSingleTimer = NULL;
+            return VERR_CPU_OFFLINE;
         }
+
+        pSingleTimer->hFireTime.cyt_when = u64First + RTTimeNanoTS();
+        if (pTimer->interval == 0)
+        {
+            /* @todo use gethrtime_max instead of LLONG_MAX? */
+            AssertCompileSize(pSingleTimer->hFireTime.cyt_interval, sizeof(long long));
+            pSingleTimer->hFireTime.cyt_interval = LLONG_MAX - pSingleTimer->hFireTime.cyt_when;
+        }
+        else
+            pSingleTimer->hFireTime.cyt_interval = pTimer->interval;
+
+        pTimer->hCyclicId = cyclic_add(&pSingleTimer->hHandler, &pSingleTimer->hFireTime);
+        if (iCpu != SOL_TIMER_ANY_CPU)
+            cyclic_bind(pTimer->hCyclicId, cpu[iCpu], NULL /* cpupart */);
+
+        mutex_exit(&cpu_lock);
     }
 
@@ -219,13 +364,18 @@
 
     pTimer->fSuspended = true;
-    if (pTimer->stimer)
-    {
-        vbi_stimer_end(pTimer->stimer);
-        pTimer->stimer = NULL;
-    }
-    else if (pTimer->gtimer)
-    {
-        vbi_gtimer_end(pTimer->gtimer);
-        pTimer->gtimer = NULL;
+    if (pTimer->pSingleTimer)
+    {
+        mutex_enter(&cpu_lock);
+        cyclic_remove(pTimer->hCyclicId);
+        mutex_exit(&cpu_lock);
+        RTMemFree(pTimer->pSingleTimer);
+    }
+    else if (pTimer->pOmniTimer)
+    {
+        mutex_enter(&cpu_lock);
+        cyclic_remove(pTimer->hCyclicId);
+        mutex_exit(&cpu_lock);
+        RTMemFree(pTimer->pOmniTimer->au64Ticks);
+        RTMemFree(pTimer->pOmniTimer);
     }
 
@@ -234,5 +384,4 @@
 
 
-
 RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
 {
@@ -247,5 +396,5 @@
 RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
 {
-    return vbi_timer_granularity();
+    return nsec_per_tick;
 }
 
